code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import zigpy_znp.types as t
class BaseNvIds(t.enum_uint16):
pass
class ZclPortNvIds(BaseNvIds):
# ZCL Port NV IDs (Application Layer NV Items)
SCENE_TABLE = 0x0001
PROXY_TABLE = 0x0002
SINK_TABLE = 0x0003
class NvSysIds(t.enum_uint8):
NVDRVR = 0 # Refrain from use
ZSTACK = 1
TIMAC = 2
REMOTI = 3
BLE = 4
_6MESH = 5
TIOP = 6
APP = 7
class ExNvIds(BaseNvIds):
# OSAL NV Item IDs
LEGACY = 0x0000
ADDRMGR = 0x0001
BINDING_TABLE = 0x0002
DEVICE_LIST = 0x0003
TCLK_TABLE = 0x0004
TCLK_IC_TABLE = 0x0005
APS_KEY_DATA_TABLE = 0x0006
NWK_SEC_MATERIAL_TABLE = 0x0007
class OsalNvIds(BaseNvIds):
# Introduced by zigbeer/zigbee-shepherd and now used by Zigbee2MQTT
HAS_CONFIGURED_ZSTACK1 = 0x0F00
HAS_CONFIGURED_ZSTACK3 = 0x0060
# Although the docs say "IDs reserved for applications range from 0x0401 to 0x0FFF",
# no OSAL NVID beyond 0x03FF is writable with the MT interface when using Z-Stack 3.
ZIGPY_ZNP_MIGRATION_ID = 0x005F
# OSAL NV item IDs
EXTADDR = 0x0001
BOOTCOUNTER = 0x0002
STARTUP_OPTION = 0x0003
START_DELAY = 0x0004
# NWK Layer NV item IDs
NIB = 0x0021
DEVICE_LIST = 0x0022
ADDRMGR = 0x0023
POLL_RATE_OLD16 = 0x0024 # Deprecated when poll rate changed from 16 to 32 bits
QUEUED_POLL_RATE = 0x0025
RESPONSE_POLL_RATE = 0x0026
REJOIN_POLL_RATE = 0x0027
DATA_RETRIES = 0x0028
POLL_FAILURE_RETRIES = 0x0029
STACK_PROFILE = 0x002A
INDIRECT_MSG_TIMEOUT = 0x002B
ROUTE_EXPIRY_TIME = 0x002C
EXTENDED_PAN_ID = 0x002D
BCAST_RETRIES = 0x002E
PASSIVE_ACK_TIMEOUT = 0x002F
BCAST_DELIVERY_TIME = 0x0030
NWK_MODE = 0x0031 # Deprecated, as this will always be Mesh
CONCENTRATOR_ENABLE = 0x0032
CONCENTRATOR_DISCOVERY = 0x0033
CONCENTRATOR_RADIUS = 0x0034
POLL_RATE = 0x0035
CONCENTRATOR_RC = 0x0036
NWK_MGR_MODE = 0x0037
SRC_RTG_EXPIRY_TIME = 0x0038
ROUTE_DISCOVERY_TIME = 0x0039
NWK_ACTIVE_KEY_INFO = 0x003A
NWK_ALTERN_KEY_INFO = 0x003B
ROUTER_OFF_ASSOC_CLEANUP = 0x003C
NWK_LEAVE_REQ_ALLOWED = 0x003D
NWK_CHILD_AGE_ENABLE = 0x003E
DEVICE_LIST_KA_TIMEOUT = 0x003F
# APS Layer NV item IDs
BINDING_TABLE = 0x0041
GROUP_TABLE = 0x0042
APS_FRAME_RETRIES = 0x0043
APS_ACK_WAIT_DURATION = 0x0044
APS_ACK_WAIT_MULTIPLIER = 0x0045
BINDING_TIME = 0x0046
APS_USE_EXT_PANID = 0x0047
APS_USE_INSECURE_JOIN = 0x0048
COMMISSIONED_NWK_ADDR = 0x0049
APS_NONMEMBER_RADIUS = 0x004B # Multicast non_member radius
APS_LINK_KEY_TABLE = 0x004C
APS_DUPREJ_TIMEOUT_INC = 0x004D
APS_DUPREJ_TIMEOUT_COUNT = 0x004E
APS_DUPREJ_TABLE_SIZE = 0x004F
# System statistics and metrics NV ID
DIAGNOSTIC_STATS = 0x0050
# Additional NWK Layer NV item IDs
NWK_PARENT_INFO = 0x0051
NWK_ENDDEV_TIMEOUT_DEF = 0x0052
END_DEV_TIMEOUT_VALUE = 0x0053
END_DEV_CONFIGURATION = 0x0054
BDBNODEISONANETWORK = 0x0055 # bdbNodeIsOnANetwork attribute
BDBREPORTINGCONFIG = 0x0056
# Security NV Item IDs
SECURITY_LEVEL = 0x0061
PRECFGKEY = 0x0062
PRECFGKEYS_ENABLE = 0x0063
# Deprecated Item as there is only one security mode supported now Z3.0
SECURITY_MODE = 0x0064
SECURE_PERMIT_JOIN = 0x0065
APS_LINK_KEY_TYPE = 0x0066
APS_ALLOW_R19_SECURITY = 0x0067
DISTRIBUTED_KEY = 0x0068 # Default distributed nwk key Id. Nv ID not in use
IMPLICIT_CERTIFICATE = 0x0069
DEVICE_PRIVATE_KEY = 0x006A
CA_PUBLIC_KEY = 0x006B
KE_MAX_DEVICES = 0x006C
USE_DEFAULT_TCLK = 0x006D
# deprecated: TRUSTCENTER_ADDR (16-bit) 0x006E
RNG_COUNTER = 0x006F
RANDOM_SEED = 0x0070
TRUSTCENTER_ADDR = 0x0071
CERT_283 = 0x0072
PRIVATE_KEY_283 = 0x0073
PUBLIC_KEY_283 = 0x0074
LEGACY_NWK_SEC_MATERIAL_TABLE_START = 0x0075
LEGACY_NWK_SEC_MATERIAL_TABLE_END = 0x0080
# ZDO NV Item IDs
USERDESC = 0x0081
NWKKEY = 0x0082
PANID = 0x0083
CHANLIST = 0x0084
LEAVE_CTRL = 0x0085
SCAN_DURATION = 0x0086
LOGICAL_TYPE = 0x0087
NWKMGR_MIN_TX = 0x0088
NWKMGR_ADDR = 0x0089
ZDO_DIRECT_CB = 0x008F
# ZCL NV item IDs
SCENE_TABLE = 0x0091
MIN_FREE_NWK_ADDR = 0x0092
MAX_FREE_NWK_ADDR = 0x0093
MIN_FREE_GRP_ID = 0x0094
MAX_FREE_GRP_ID = 0x0095
MIN_GRP_IDS = 0x0096
MAX_GRP_IDS = 0x0097
OTA_BLOCK_REQ_DELAY = 0x0098
# Non-standard NV item IDs
SAPI_ENDPOINT = 0x00A1
# NV Items Reserved for Commissioning Cluster Startup Attribute Set (SAS):
# 0x00B1 - 0x00BF: Parameters related to APS and NWK layers
SAS_SHORT_ADDR = 0x00B1
SAS_EXT_PANID = 0x00B2
SAS_PANID = 0x00B3
SAS_CHANNEL_MASK = 0x00B4
SAS_PROTOCOL_VER = 0x00B5
SAS_STACK_PROFILE = 0x00B6
SAS_STARTUP_CTRL = 0x00B7
# 0x00C1 - 0x00CF: Parameters related to Security
SAS_TC_ADDR = 0x00C1
SAS_TC_MASTER_KEY = 0x00C2
SAS_NWK_KEY = 0x00C3
SAS_USE_INSEC_JOIN = 0x00C4
SAS_PRECFG_LINK_KEY = 0x00C5
SAS_NWK_KEY_SEQ_NUM = 0x00C6
SAS_NWK_KEY_TYPE = 0x00C7
SAS_NWK_MGR_ADDR = 0x00C8
# 0x00D1 - 0x00DF: Current key parameters
SAS_CURR_TC_MASTER_KEY = 0x00D1
SAS_CURR_NWK_KEY = 0x00D2
SAS_CURR_PRECFG_LINK_KEY = 0x00D3
USE_NVOCMP = 0x00FF
# NV Items Reserved for Trust Center Link Key Table entries
# 0x0101 - 0x01FF
TCLK_SEED = 0x0101 # Seed
TCLK_JOIN_DEV = (
0x0102 # Nv Id where Joining device store their APS key. Key is in plain text.
)
TCLK_DEFAULT = 0x0103 # Not accually a Nv Item but Id used by SecMgr
LEGACY_TCLK_IC_TABLE_START = 0x0104 # Deprecated. Refer to EX_TCLK_IC_TABLE
LEGACY_TCLK_IC_TABLE_END = 0x0110 # IC keys, referred with shift byte
LEGACY_TCLK_TABLE_START = 0x0111 # Deprecated. Refer to EX_TCLK_TABLE
LEGACY_TCLK_TABLE_END = 0x01FF
# NV Items Reserved for APS Link Key Table entries
# 0x0201 - 0x02FF
LEGACY_APS_LINK_KEY_DATA_START = 0x0201 # Deprecated. Refer to EX_APS_KEY_TABLE
LEGACY_APS_LINK_KEY_DATA_END = 0x02FF
# NV items used to duplicate system elements
DUPLICATE_BINDING_TABLE = 0x0300
DUPLICATE_DEVICE_LIST = 0x0301
DUPLICATE_DEVICE_LIST_KA_TIMEOUT = 0x0302
# NV Items Reserved for Proxy Table entries
# 0x0310 - 0x031F
LEGACY_PROXY_TABLE_START = 0x0310 # Deprecated. Refer to EX_GP_PROXY_TABLE
LEGACY_PROXY_TABLE_END = 0x031F
# NV Items Reserved for Sink Table entries
# 0x0320 - 0x032F
LEGACY_SINK_TABLE_START = 0x0320 # Deprecated. Refer to EX_GP_SINK_TABLE
LEGACY_SINK_TABLE_END = 0x032F
APP_ITEM_1 = 0x0F01
APP_ITEM_2 = 0x0F02
APP_ITEM_3 = 0x0F03
APP_ITEM_4 = 0x0F04
APP_ITEM_5 = 0x0F05
APP_ITEM_6 = 0x0F06
RF_TEST_PARMS = 0x0F07
UNKNOWN = 0x0F08
INVALID_INDEX = 0xFFFF
NWK_NVID_TABLES = {
OsalNvIds.LEGACY_NWK_SEC_MATERIAL_TABLE_START: (
OsalNvIds.LEGACY_NWK_SEC_MATERIAL_TABLE_END
),
OsalNvIds.LEGACY_TCLK_IC_TABLE_START: OsalNvIds.LEGACY_TCLK_IC_TABLE_END,
OsalNvIds.LEGACY_TCLK_TABLE_START: OsalNvIds.LEGACY_TCLK_TABLE_END,
OsalNvIds.LEGACY_APS_LINK_KEY_DATA_START: OsalNvIds.LEGACY_APS_LINK_KEY_DATA_END,
OsalNvIds.LEGACY_PROXY_TABLE_START: OsalNvIds.LEGACY_PROXY_TABLE_END,
OsalNvIds.LEGACY_SINK_TABLE_START: OsalNvIds.LEGACY_SINK_TABLE_END,
}
NWK_NVID_TABLE_KEYS = set(NWK_NVID_TABLES.keys()) | set(NWK_NVID_TABLES.values())
def is_secure_nvid(nvid: OsalNvIds) -> bool:
"""
Returns whether or not an nvid may be prevented from being read from NVRAM.
"""
if nvid in (
OsalNvIds.IMPLICIT_CERTIFICATE,
OsalNvIds.CA_PUBLIC_KEY,
OsalNvIds.DEVICE_PRIVATE_KEY,
OsalNvIds.NWK_ACTIVE_KEY_INFO,
OsalNvIds.NWK_ALTERN_KEY_INFO,
OsalNvIds.PRECFGKEY,
OsalNvIds.TCLK_SEED,
):
return True
if OsalNvIds.LEGACY_TCLK_TABLE_START <= nvid <= OsalNvIds.LEGACY_TCLK_TABLE_END:
return True
if (
OsalNvIds.LEGACY_APS_LINK_KEY_DATA_START
<= nvid
<= OsalNvIds.LEGACY_APS_LINK_KEY_DATA_END
):
return True
return False | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/types/nvids.py | nvids.py |
from __future__ import annotations
import typing
import inspect
import dataclasses
import zigpy.types as zigpy_t
import zigpy_znp.types as t
class ListSubclass(list):
# So we can call `setattr()` on it
pass
@dataclasses.dataclass(frozen=True)
class CStructField:
name: str
type: type
def __post_init__(self) -> None:
# Throw an error early
self.get_size_and_alignment()
def get_size_and_alignment(self, align=False) -> tuple[int, int]:
if issubclass(self.type, (zigpy_t.FixedIntType, t.FixedIntType)):
return self.type._size, (self.type._size if align else 1)
elif issubclass(self.type, zigpy_t.EUI64):
return 8, 1
elif issubclass(self.type, zigpy_t.KeyData):
return 16, 1
elif issubclass(self.type, CStruct):
return self.type.get_size(align=align), self.type.get_alignment(align=align)
elif issubclass(self.type, t.AddrModeAddress):
return 1 + 8, 1
else:
raise TypeError(f"Cannot get size of unknown type: {self.type!r}")
class CStruct:
_padding_byte = b"\xFF"
def __init_subclass__(cls):
super().__init_subclass__()
fields = ListSubclass()
for name, annotation in typing.get_type_hints(cls).items():
try:
field = CStructField(name=name, type=annotation)
except Exception as e:
raise TypeError(f"Invalid field {name}={annotation!r}") from e
fields.append(field)
setattr(fields, field.name, field)
cls.fields = fields
def __new__(cls, *args, **kwargs) -> CStruct:
# Like a copy constructor
if len(args) == 1 and isinstance(args[0], cls):
if kwargs:
raise ValueError(f"Cannot use copy constructor with kwargs: {kwargs!r}")
kwargs = args[0].as_dict()
args = ()
# Pretend our signature is `__new__(cls, p1: t1, p2: t2, ...)`
signature = inspect.Signature(
parameters=[
inspect.Parameter(
name=f.name,
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
default=None,
annotation=f.type,
)
for f in cls.fields
]
)
bound = signature.bind(*args, **kwargs)
bound.apply_defaults()
instance = super().__new__(cls)
# Set and convert the attributes to their respective types
for name, value in bound.arguments.items():
field = getattr(cls.fields, name)
if value is not None:
try:
value = field.type(value)
except Exception as e:
raise ValueError(
f"Failed to convert {name}={value!r} from type"
f" {type(value)} to {field.type}"
) from e
setattr(instance, name, value)
return instance
def as_dict(self) -> dict[str, typing.Any]:
return {f.name: getattr(self, f.name) for f in self.fields}
@classmethod
def get_padded_fields(
cls, *, align=False
) -> typing.Iterable[tuple[int, int, CStructField]]:
offset = 0
for field in cls.fields:
size, alignment = field.get_size_and_alignment(align=align)
padding = (-offset) % alignment
offset += padding + size
yield padding, size, field
@classmethod
def get_alignment(cls, *, align=False) -> int:
alignments = []
for field in cls.fields:
size, alignment = field.get_size_and_alignment(align=align)
alignments.append(alignment)
return max(alignments)
@classmethod
def get_size(cls, *, align=False) -> int:
total_size = 0
for padding, size, _field in cls.get_padded_fields(align=align):
total_size += padding + size
final_padding = (-total_size) % cls.get_alignment(align=align)
return total_size + final_padding
def serialize(self, *, align=False) -> bytes:
result = b""
for padding, _, field in self.get_padded_fields(align=align):
value = getattr(self, field.name)
if value is None:
raise ValueError(f"Field {field} cannot be empty")
try:
value = field.type(value)
except Exception as e:
raise ValueError(
f"Failed to convert {field.name}={value!r} from type"
f" {type(value)} to {field.type}"
) from e
result += self._padding_byte * padding
if isinstance(value, CStruct):
result += value.serialize(align=align)
else:
result += value.serialize()
# Pad the result to our final length
return result.ljust(self.get_size(align=align), self._padding_byte)
@classmethod
def deserialize(cls, data: bytes, *, align=False) -> tuple[CStruct, bytes]:
instance = cls()
orig_length = len(data)
expected_size = cls.get_size(align=align)
if orig_length < expected_size:
raise ValueError(
f"Data is too short, must be at least {expected_size} bytes: {data!r}"
)
for padding, _, field in cls.get_padded_fields(align=align):
data = data[padding:]
if issubclass(field.type, CStruct):
value, data = field.type.deserialize(data, align=align)
else:
value, data = field.type.deserialize(data)
setattr(instance, field.name, value)
# Strip off the final padding
data = data[expected_size - (orig_length - len(data)) :]
return instance, data
def replace(self, **kwargs) -> CStruct:
d = self.as_dict().copy()
d.update(kwargs)
return type(self)(**d)
def __eq__(self, other: object) -> bool:
if not isinstance(self, type(other)) and not isinstance(other, type(self)):
return NotImplemented
return self.as_dict() == other.as_dict()
def __repr__(self) -> str:
kwargs = ", ".join([f"{k}={v!r}" for k, v in self.as_dict().items()])
return f"{type(self).__name__}({kwargs})" | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/types/cstruct.py | cstruct.py |
from __future__ import annotations
import sys
import typing
import logging
import dataclasses
import zigpy.types
from zigpy.zdo.types import Status as ZDOStatus # noqa: F401
from . import basic, zigpy_types
LOGGER = logging.getLogger(__name__)
JSONType = typing.Dict[str, typing.Any]
class AddrMode(basic.enum_uint8):
"""Address mode."""
NOT_PRESENT = 0x00
Group = 0x01
NWK = 0x02
IEEE = 0x03
Broadcast = 0x0F
class AddrModeAddress:
def __new__(cls, mode=None, address=None):
if mode is not None and address is None and isinstance(mode, cls):
other = mode
return cls(mode=other.mode, address=other.address)
instance = super().__new__(cls)
if mode is not None and mode == AddrMode.NOT_PRESENT:
raise ValueError(f"Invalid address mode: {mode}")
instance.mode = None if mode is None else AddrMode(mode)
instance.address = (
None if address is None else instance._get_address_type()(address)
)
return instance
@classmethod
def from_zigpy_type(
cls, zigpy_addr: zigpy.types.AddrModeAddress
) -> AddrModeAddress:
return cls(
mode=AddrMode[zigpy_addr.addr_mode.name],
address=zigpy_addr.address,
)
def as_zigpy_type(self) -> zigpy.types.AddrModeAddress:
return zigpy.types.AddrModeAddress(
addr_mode=zigpy.types.AddrMode[self.mode.name],
address=self.address,
)
def _get_address_type(self):
return {
AddrMode.NWK: zigpy_types.NWK,
AddrMode.Group: zigpy_types.NWK,
AddrMode.Broadcast: zigpy_types.NWK,
AddrMode.IEEE: zigpy_types.EUI64,
}[self.mode]
@classmethod
def deserialize(cls, data: bytes) -> tuple[AddrModeAddress, bytes]:
mode, data = AddrMode.deserialize(data)
address, data = zigpy_types.EUI64.deserialize(data)
if mode != AddrMode.IEEE:
address, _ = zigpy_types.NWK.deserialize(address.serialize())
return cls(mode=mode, address=address), data
def serialize(self) -> bytes:
result = (
self.mode.serialize() + self._get_address_type()(self.address).serialize()
)
if self.mode != AddrMode.IEEE:
result += b"\x00\x00\x00\x00\x00\x00"
return result
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.mode == other.mode and self.address == other.address
def __repr__(self) -> str:
return f"{type(self).__name__}(mode={self.mode!r}, address={self.address!r})"
class GroupId(basic.uint16_t, hex_repr=True):
"""Group ID class"""
class ScanType(basic.enum_uint8):
EnergyDetect = 0x00
Active = 0x01
Passive = 0x02
Orphan = 0x03
@dataclasses.dataclass(frozen=True)
class Param:
"""Schema parameter"""
name: str
type: type = None
description: str = ""
optional: bool = False
class MissingEnumMixin:
@classmethod
def _missing_(cls, value):
if not isinstance(value, int):
raise ValueError(f"{value} is not a valid {cls.__name__}")
new_member = cls._member_type_.__new__(cls, value)
new_member._name_ = f"unknown_0x{value:02X}"
new_member._value_ = cls._member_type_(value)
if sys.version_info >= (3, 8):
# Show the warning in the calling code, not in this function
LOGGER.warning(
"Unhandled %s value: %s", cls.__name__, new_member, stacklevel=2
)
else:
LOGGER.warning("Unhandled %s value: %s", cls.__name__, new_member)
return new_member
class Status(MissingEnumMixin, basic.enum_uint8):
SUCCESS = 0x00
FAILURE = 0x01
INVALID_PARAMETER = 0x02
INVALID_TASK = 0x03
MSG_BUFFER_NOT_AVAIL = 0x04
INVALID_MSG_POINTER = 0x05
INVALID_EVENT_ID = 0x06
INVALID_INTERRUPT_ID = 0x07
NO_TIMER_AVAIL = 0x08
NV_ITEM_UNINIT = 0x09
NV_OPER_FAILED = 0x0A
INVALID_MEM_SIZE = 0x0B
NV_BAD_ITEM_LEN = 0x0C
MEM_ERROR = 0x10
BUFFER_FULL = 0x11
UNSUPPORTED_MODE = 0x12
MAC_MEM_ERROR = 0x13
SAPI_IN_PROGRESS = 0x20
SAPI_TIMEOUT = 0x21
SAPI_INIT = 0x22
NOT_AUTHORIZED = 0x7E
MALFORMED_CMD = 0x80
UNSUP_CLUSTER_CMD = 0x81
OTA_ABORT = 0x95
OTA_IMAGE_INVALID = 0x96
OTA_WAIT_FOR_DATA = 0x97
OTA_NO_IMAGE_AVAILABLE = 0x98
OTA_REQUIRE_MORE_IMAGE = 0x99
APS_FAIL = 0xB1
APS_TABLE_FULL = 0xB2
APS_ILLEGAL_REQUEST = 0xB3
APS_INVALID_BINDING = 0xB4
APS_UNSUPPORTED_ATTRIB = 0xB5
APS_NOT_SUPPORTED = 0xB6
APS_NO_ACK = 0xB7
APS_DUPLICATE_ENTRY = 0xB8
APS_NO_BOUND_DEVICE = 0xB9
APS_NOT_ALLOWED = 0xBA
APS_NOT_AUTHENTICATED = 0xBB
SEC_NO_KEY = 0xA1
SEC_OLD_FRM_COUNT = 0xA2
SEC_MAX_FRM_COUNT = 0xA3
SEC_CCM_FAIL = 0xA4
SEC_FAILURE = 0xAD
NWK_INVALID_PARAM = 0xC1
NWK_INVALID_REQUEST = 0xC2
NWK_NOT_PERMITTED = 0xC3
NWK_STARTUP_FAILURE = 0xC4
NWK_ALREADY_PRESENT = 0xC5
NWK_SYNC_FAILURE = 0xC6
NWK_TABLE_FULL = 0xC7
NWK_UNKNOWN_DEVICE = 0xC8
NWK_UNSUPPORTED_ATTRIBUTE = 0xC9
NWK_NO_NETWORKS = 0xCA
NWK_LEAVE_UNCONFIRMED = 0xCB
NWK_NO_ACK = 0xCC # not in spec
NWK_NO_ROUTE = 0xCD
# The operation is not supported in the current configuration
MAC_UNSUPPORTED = 0x18
# The operation could not be performed in the current state
MAC_BAD_STATE = 0x19
# The operation could not be completed because no memory resources were available
MAC_NO_RESOURCES = 0x1A
# For internal use only
MAC_ACK_PENDING = 0x1B
# For internal use only
MAC_NO_TIME = 0x1C
# For internal use only
MAC_TX_ABORTED = 0x1D
# For internal use only - A duplicated entry is added to the source matching table
MAC_DUPLICATED_ENTRY = 0x1E
# The frame counter puportedly applied by the originator of the received frame
# is invalid
MAC_COUNTER_ERROR = 0xDB
# The key purportedly applied by the originator of the received frame is not allowed
MAC_IMPROPER_KEY_TYPE = 0xDC
# The security level purportedly applied by the originator of the received frame
# does not meet the minimum security level
MAC_IMPROPER_SECURITY_LEVEL = 0xDD
# The received frame was secured with legacy security which is not supported
MAC_UNSUPPORTED_LEGACY = 0xDE
# The security of the received frame is not supported
MAC_UNSUPPORTED_SECURITY = 0xDF
# The beacon was lost following a synchronization request
MAC_BEACON_LOSS = 0xE0
# The operation or data request failed because of activity on the channel
MAC_CHANNEL_ACCESS_FAILURE = 0xE1
# The MAC was not able to enter low power mode.
MAC_DENIED = 0xE2
# Unused
MAC_DISABLE_TRX_FAILURE = 0xE3
# Cryptographic processing of the secure frame failed
MAC_SECURITY_ERROR = 0xE4
# The received frame or frame resulting from an operation or data request is
# too long to be processed by the MAC
MAC_FRAME_TOO_LONG = 0xE5
# Unused
MAC_INVALID_GTS = 0xE6
# The purge request contained an invalid handle
MAC_INVALID_HANDLE = 0xE7
# The API function parameter is out of range
MAC_INVALID_PARAMETER = 0xE8
# The operation or data request failed because no acknowledgement was received
MAC_NO_ACK = 0xE9
# The scan request failed because no beacons were received or the orphan scan failed
# because no coordinator realignment was received
MAC_NO_BEACON = 0xEA
# The associate request failed because no associate response was received or the
# poll request did not return any data
MAC_NO_DATA = 0xEB
# The short address parameter of the start request was invalid
MAC_NO_SHORT_ADDRESS = 0xEC
# Unused
MAC_OUT_OF_CAP = 0xED
# A PAN identifier conflict has been detected and communicated to the PAN
# coordinator
MAC_PAN_ID_CONFLICT = 0xEE
# A coordinator realignment command has been received
MAC_REALIGNMENT = 0xEF
# The associate response, disassociate request, or indirect data transmission failed
# because the peer device did not respond before the transaction expired or was
# purged
MAC_TRANSACTION_EXPIRED = 0xF0
# The request failed because MAC data buffers are full
MAC_TRANSACTION_OVERFLOW = 0xF1
# Unused
MAC_TX_ACTIVE = 0xF2
# The operation or data request failed because the security key is not available
MAC_UNAVAILABLE_KEY = 0xF3
# The set or get request failed because the attribute is not supported
MAC_UNSUPPORTED_ATTRIBUTE = 0xF4
# The data request failed because neither the source address nor destination address
# parameters were present
MAC_INVALID_ADDRESS = 0xF5
# Unused
MAC_ON_TIME_TOO_LONG = 0xF6
# Unused
MAC_PAST_TIME = 0xF7
# The start request failed because the device is not tracking the beacon of its
# coordinator
MAC_TRACKING_OFF = 0xF8
# Unused
MAC_INVALID_INDEX = 0xF9
# The scan terminated because the PAN descriptor storage limit was reached
MAC_LIMIT_REACHED = 0xFA
# A set request was issued with a read-only identifier
MAC_READ_ONLY = 0xFB
# The scan request failed because a scan is already in progress
MAC_SCAN_IN_PROGRESS = 0xFC
# The beacon start time overlapped the coordinator transmission time
MAC_SUPERFRAME_OVERLAP = 0xFD
# The AUTOPEND pending all is turned on
MAC_AUTOACK_PENDING_ALL_ON = 0xFE
# The AUTOPEND pending all is turned off
MAC_AUTOACK_PENDING_ALL_OFF = 0xFF
class ResetReason(basic.enum_uint8):
PowerUp = 0x00
External = 0x01
Watchdog = 0x02
class ResetType(basic.enum_uint8):
Hard = 0x00
Soft = 0x01
Shutdown = 0x02
class KeySource(basic.FixedList, item_type=basic.uint8_t, length=8):
pass
class StartupOptions(basic.enum_flag_uint8):
NONE = 0
ClearConfig = 1 << 0
ClearState = 1 << 1
AutoStart = 1 << 2
# FrameCounter should persist across factory resets.
# This should not be used as part of FN reset procedure.
# Set to reset the FrameCounter of all Nwk Security Material
ClearNwkFrameCounter = 1 << 7
class DeviceLogicalType(basic.enum_uint8):
Coordinator = 0
Router = 1
EndDevice = 2
class DeviceTypeCapabilities(basic.enum_flag_uint8):
Coordinator = 1 << 0
Router = 1 << 1
EndDevice = 1 << 2
class ClusterIdList(
basic.LVList, item_type=zigpy_types.ClusterId, length_type=basic.uint8_t
):
pass
class NWKList(basic.LVList, item_type=zigpy_types.NWK, length_type=basic.uint8_t):
pass
class NwkMode(basic.enum_uint8):
Star = 0
Tree = 1
Mesh = 2 | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/types/named.py | named.py |
from __future__ import annotations
import sys
import typing
import logging
import argparse
import jsonschema
import coloredlogs
import zigpy_znp.types as t
import zigpy_znp.logger as log
LOG_LEVELS = [logging.INFO, logging.DEBUG, log._TRACE]
OPEN_COORDINATOR_BACKUP_SCHEMA = {
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "https://github.com/zigpy/open-coordinator-backup/schema.json",
"type": "object",
"properties": {
"metadata": {
"type": "object",
"properties": {
"format": {
"type": "string",
"pattern": "^zigpy/open-coordinator-backup$",
},
"version": {"type": "integer", "minimum": 1, "maximum": 1},
"source": {"type": "string", "pattern": "^(.*?)+@(.*?)$"},
"internal": {"type": "object"},
},
"required": ["version", "source"],
},
"stack_specific": {
"type": "object",
"properties": {
"zstack": {
"type": "object",
"properties": {
"tclk_seed": {"type": "string", "pattern": "[a-fA-F0-9]{32}"},
},
}
},
},
"coordinator_ieee": {"type": "string", "pattern": "[a-fA-F0-9]{16}"},
"pan_id": {"type": "string", "pattern": "[a-fA-F0-9]{4}"},
"extended_pan_id": {"type": "string", "pattern": "[a-fA-F0-9]{16}"},
"nwk_update_id": {"type": "integer", "minimum": 0, "maximum": 255},
"security_level": {"type": "integer", "minimum": 0, "maximum": 7},
"channel": {"type": "integer", "minimum": 11, "maximum": 26},
"channel_mask": {
"type": "array",
"items": {"type": "integer", "minimum": 11, "maximum": 26},
},
"network_key": {
"type": "object",
"properties": {
"key": {"type": "string", "pattern": "[a-fA-F0-9]{32}"},
"sequence_number": {"type": "integer", "minimum": 0, "maximum": 255},
"frame_counter": {
"type": "integer",
"minimum": 0,
"maximum": 4294967295,
},
},
"required": ["key", "sequence_number", "frame_counter"],
},
"devices": {
"type": "array",
"items": {
"type": "object",
"properties": {
"nwk_address": {
"type": ["string", "null"],
"pattern": "[a-fA-F0-9]{4}",
},
"ieee_address": {"type": "string", "pattern": "[a-fA-F0-9]{16}"},
"is_child": {"type": "boolean"},
"link_key": {
"type": "object",
"properties": {
"key": {"type": "string", "pattern": "[a-fA-F0-9]{16}"},
"tx_counter": {
"type": "integer",
"minimum": 0,
"maximum": 4294967295,
},
"rx_counter": {
"type": "integer",
"minimum": 0,
"maximum": 4294967295,
},
},
"required": ["key", "tx_counter", "rx_counter"],
},
},
"required": ["nwk_address", "ieee_address"],
},
},
},
"required": [
"metadata",
"coordinator_ieee",
"pan_id",
"extended_pan_id",
"nwk_update_id",
"security_level",
"channel",
"channel_mask",
"network_key",
"devices",
],
}
def validate_backup_json(backup: t.JSONType) -> None:
jsonschema.validate(backup, schema=OPEN_COORDINATOR_BACKUP_SCHEMA)
class CustomArgumentParser(argparse.ArgumentParser):
def parse_args(self, args: typing.Sequence[str] | None = None, namespace=None):
args = super().parse_args(args, namespace)
# Since we're running as a CLI tool, install our own log level and color logger
log.TRACE = log._TRACE
logging.addLevelName(log.TRACE, "TRACE")
# But still allow the user to configure verbosity
verbosity = args.verbosity
log_level = LOG_LEVELS[min(max(0, verbosity), len(LOG_LEVELS) - 1)]
# coloredlogs uses "spam" for level 5, not "trace"
level_styles = coloredlogs.DEFAULT_LEVEL_STYLES.copy()
level_styles["trace"] = level_styles["spam"]
logging.getLogger().setLevel(log_level)
coloredlogs.install(
fmt=(
"%(asctime)s.%(msecs)03d"
" %(hostname)s"
" %(name)s"
" %(levelname)s %(message)s"
),
level=log_level,
level_styles=level_styles,
)
return args
class UnclosableFile:
"""
Wraps a file object so that every operation but "close" is proxied.
"""
def __init__(self, f):
self.f = f
def close(self):
return
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
return
def __getattr__(self, name):
return getattr(self.f, name)
class ClosableFileType(argparse.FileType):
"""
Allows `FileType` to always be closed properly, even with stdout and stdin.
"""
def __call__(self, string):
f = super().__call__(string)
if f not in (sys.stdin, sys.stdout, sys.stdin.buffer, sys.stdout.buffer):
return f
return UnclosableFile(f)
def setup_parser(description: str) -> argparse.ArgumentParser:
"""
Creates an ArgumentParser that sets up a logger with a configurable verbosity
and a positional serial port argument.
"""
parser = CustomArgumentParser(
description=description,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"-v",
"--verbose",
dest="verbosity",
action="count",
default=0,
help="increases verbosity",
)
parser.add_argument("serial", type=str, help="Serial port path")
return parser | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/tools/common.py | common.py |
import sys
import json
import asyncio
import logging
from zigpy_znp.api import ZNP
from zigpy_znp.config import CONFIG_SCHEMA
from zigpy_znp.types.nvids import ExNvIds, OsalNvIds
from zigpy_znp.tools.common import ClosableFileType, setup_parser
LOGGER = logging.getLogger(__name__)
async def nvram_write(znp: ZNP, backup):
# First write the NVRAM items common to all radios
for nwk_nvid, value in backup["LEGACY"].items():
if "+" in nwk_nvid:
nwk_nvid, _, offset = nwk_nvid.partition("+")
offset = int(offset)
nvid = OsalNvIds[nwk_nvid] + offset
else:
nvid = OsalNvIds[nwk_nvid]
offset = None
if offset is not None:
LOGGER.info("%s+%s = %r", OsalNvIds[nwk_nvid].name, offset, value)
else:
LOGGER.info("%s = %r", OsalNvIds[nwk_nvid].name, value)
await znp.nvram.osal_write(nvid, bytes.fromhex(value), create=True)
for item_name, sub_ids in backup.items():
item_id = ExNvIds[item_name]
if item_id == ExNvIds.LEGACY:
continue
for sub_id, value in sub_ids.items():
sub_id = int(sub_id, 16)
LOGGER.info("%s[0x%04X] = %r", item_id.name, sub_id, value)
await znp.nvram.write(
item_id=item_id,
sub_id=sub_id,
value=bytes.fromhex(value),
create=True,
)
# Reset afterwards to have the new values take effect
await znp.reset()
async def main(argv):
parser = setup_parser("Restore a radio's NVRAM from a previous backup")
parser.add_argument(
"--input", "-i", type=ClosableFileType("r"), help="Input file", required=True
)
args = parser.parse_args(argv)
with args.input as f:
backup = json.load(f)
znp = ZNP(CONFIG_SCHEMA({"device": {"path": args.serial}}))
await znp.connect()
await nvram_write(znp, backup)
if __name__ == "__main__":
asyncio.run(main(sys.argv[1:])) # pragma: no cover | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/tools/nvram_write.py | nvram_write.py |
import sys
import asyncio
import logging
import itertools
from collections import deque, defaultdict
import zigpy.zdo.types as zdo_t
from zigpy.exceptions import NetworkNotFormed
import zigpy_znp.types as t
from zigpy_znp.tools.common import setup_parser
from zigpy_znp.zigbee.application import ControllerApplication
LOGGER = logging.getLogger(__name__)
async def perform_energy_scan(radio_path, num_scans=None):
LOGGER.info("Starting up zigpy-znp")
config = ControllerApplication.SCHEMA({"device": {"path": radio_path}})
app = ControllerApplication(config)
await app.connect()
try:
await app.start_network(read_only=True)
except NetworkNotFormed as e:
LOGGER.error("Could not start application: %s", e)
LOGGER.error("Form a network with `python -m zigpy_znp.tools.form_network`")
return
LOGGER.info("Running scan...")
# We compute an average over the last 5 scans
channel_energies = defaultdict(lambda: deque([], maxlen=5))
for i in itertools.count(start=1):
if num_scans is not None and i > num_scans:
break
rsp = await app.get_device(nwk=0x0000).zdo.Mgmt_NWK_Update_req(
zdo_t.NwkUpdate(
ScanChannels=t.Channels.ALL_CHANNELS,
ScanDuration=0x02,
ScanCount=1,
)
)
_, scanned_channels, _, _, energy_values = rsp
for channel, energy in zip(scanned_channels, energy_values):
energies = channel_energies[channel]
energies.append(energy)
total = 0xFF * len(energies)
print(f"Channel energy (mean of {len(energies)} / {energies.maxlen}):")
print("------------------------------------------------")
print(" + Lower energy is better")
print(" + Active Zigbee networks on a channel may still cause congestion")
print(" + TX on 26 in North America may be with lower power due to regulations")
print(" + Zigbee channels 15, 20, 25 fall between WiFi channels 1, 6, 11")
print(" + Some Zigbee devices only join networks on channels 15, 20, and 25")
print("------------------------------------------------")
for channel, energies in channel_energies.items():
count = sum(energies)
asterisk = "*" if channel == 26 else " "
print(
f" - {channel:>02}{asterisk} {count / total:>7.2%} "
+ "#" * int(100 * count / total)
)
print()
await app.shutdown()
async def main(argv):
parser = setup_parser("Perform an energy scan")
parser.add_argument(
"-n",
"--num-scans",
dest="num_scans",
type=int,
default=None,
help="Number of scans to perform before exiting",
)
args = parser.parse_args(argv)
await perform_energy_scan(args.serial, num_scans=args.num_scans)
if __name__ == "__main__":
asyncio.run(main(sys.argv[1:])) # pragma: no cover | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/tools/energy_scan.py | energy_scan.py |
from __future__ import annotations
import sys
import json
import asyncio
import logging
import datetime
import importlib.metadata
import zigpy.state
import zigpy_znp.types as t
from zigpy_znp.api import ZNP
from zigpy_znp.tools.common import ClosableFileType, setup_parser, validate_backup_json
from zigpy_znp.zigbee.application import ControllerApplication
LOGGER = logging.getLogger(__name__)
def zigpy_state_to_json_backup(
network_info: zigpy.state.NetworkInfo, node_info: zigpy.state.NodeInfo
) -> t.JSONType:
devices = {}
for ieee, nwk in network_info.nwk_addresses.items():
devices[ieee] = {
"ieee_address": ieee.serialize()[::-1].hex(),
"nwk_address": nwk.serialize()[::-1].hex(),
"is_child": False,
}
for ieee in network_info.children:
nwk = network_info.nwk_addresses.get(ieee, None)
devices[ieee] = {
"ieee_address": ieee.serialize()[::-1].hex(),
"nwk_address": nwk.serialize()[::-1].hex() if nwk is not None else None,
"is_child": True,
}
for key in network_info.key_table:
if key.partner_ieee not in devices:
devices[key.partner_ieee] = {
"ieee_address": key.partner_ieee.serialize()[::-1].hex(),
"nwk_address": None,
"is_child": False,
}
devices[key.partner_ieee]["link_key"] = {
"key": key.key.serialize().hex(),
"tx_counter": key.tx_counter,
"rx_counter": key.rx_counter,
}
return {
"metadata": {
"version": 1,
"format": "zigpy/open-coordinator-backup",
"source": None,
"internal": None,
},
"coordinator_ieee": node_info.ieee.serialize()[::-1].hex(),
"pan_id": network_info.pan_id.serialize()[::-1].hex(),
"extended_pan_id": network_info.extended_pan_id.serialize()[::-1].hex(),
"nwk_update_id": network_info.nwk_update_id,
"security_level": network_info.security_level,
"channel": network_info.channel,
"channel_mask": list(network_info.channel_mask),
"network_key": {
"key": network_info.network_key.key.serialize().hex(),
"sequence_number": network_info.network_key.seq,
"frame_counter": network_info.network_key.tx_counter,
},
"devices": sorted(devices.values(), key=lambda d: d["ieee_address"]),
}
async def backup_network(znp: ZNP) -> t.JSONType:
await znp.load_network_info(load_devices=True)
obj = zigpy_state_to_json_backup(
network_info=znp.network_info,
node_info=znp.node_info,
)
now = datetime.datetime.now().astimezone()
obj["metadata"]["source"] = f"zigpy-znp@{importlib.metadata.version('zigpy-znp')}"
obj["metadata"]["internal"] = {
"creation_time": now.isoformat(timespec="seconds"),
"zstack": {
"version": znp.version,
},
}
if znp.network_info.stack_specific:
obj["stack_specific"] = znp.network_info.stack_specific
# Ensure our generated backup is valid
validate_backup_json(obj)
return obj
async def main(argv: list[str]) -> None:
parser = setup_parser("Backup adapter network settings")
parser.add_argument(
"--output", "-o", type=ClosableFileType("w"), help="Output file", default="-"
)
args = parser.parse_args(argv)
with args.output as f:
znp = ZNP(ControllerApplication.SCHEMA({"device": {"path": args.serial}}))
await znp.connect()
backup_obj = await backup_network(znp)
znp.close()
f.write(json.dumps(backup_obj, indent=4))
if __name__ == "__main__":
asyncio.run(main(sys.argv[1:])) # pragma: no cover | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/tools/network_backup.py | network_backup.py |
from __future__ import annotations
import sys
import json
import asyncio
import zigpy.state
import zigpy.zdo.types as zdo_t
import zigpy_znp.const as const
import zigpy_znp.types as t
from zigpy_znp.api import ZNP
from zigpy_znp.tools.common import ClosableFileType, setup_parser, validate_backup_json
from zigpy_znp.zigbee.application import ControllerApplication
def json_backup_to_zigpy_state(
backup: t.JSONType,
) -> tuple[zigpy.state.NetworkInfo, zigpy.state.NodeInfo]:
"""
Converts a JSON backup into a zigpy network and node info tuple.
"""
node_info = zigpy.state.NodeInfo()
node_info.nwk = 0x0000
node_info.logical_type = zdo_t.LogicalType.Coordinator
node_info.ieee, _ = t.EUI64.deserialize(
bytes.fromhex(backup["coordinator_ieee"])[::-1]
)
network_info = zigpy.state.NetworkInfo()
network_info.pan_id, _ = t.NWK.deserialize(bytes.fromhex(backup["pan_id"])[::-1])
network_info.extended_pan_id, _ = t.EUI64.deserialize(
bytes.fromhex(backup["extended_pan_id"])[::-1]
)
network_info.nwk_update_id = backup["nwk_update_id"]
network_info.nwk_manager_id = 0x0000
network_info.channel = backup["channel"]
network_info.channel_mask = t.Channels.from_channel_list(backup["channel_mask"])
network_info.security_level = backup["security_level"]
network_info.stack_specific = backup.get("stack_specific")
network_info.tc_link_key = zigpy.state.Key()
network_info.tc_link_key.key = const.DEFAULT_TC_LINK_KEY
network_info.network_key = zigpy.state.Key()
network_info.network_key.key, _ = t.KeyData.deserialize(
bytes.fromhex(backup["network_key"]["key"])
)
network_info.network_key.tx_counter = backup["network_key"]["frame_counter"]
network_info.network_key.rx_counter = 0
network_info.network_key.partner_ieee = None
network_info.network_key.seq = backup["network_key"]["sequence_number"]
network_info.children = []
network_info.nwk_addresses = {}
for obj in backup["devices"]:
node = zigpy.state.NodeInfo()
if obj["nwk_address"] is not None:
node.nwk, _ = t.NWK.deserialize(bytes.fromhex(obj["nwk_address"])[::-1])
else:
node.nwk = None
node.ieee, _ = t.EUI64.deserialize(bytes.fromhex(obj["ieee_address"])[::-1])
node.logical_type = None
# The `is_child` key is currently optional
if obj.get("is_child", True):
network_info.children.append(node.ieee)
if node.nwk is not None:
network_info.nwk_addresses[node.ieee] = node.nwk
if "link_key" in obj:
key = zigpy.state.Key()
key.key, _ = t.KeyData.deserialize(bytes.fromhex(obj["link_key"]["key"]))
key.tx_counter = obj["link_key"]["tx_counter"]
key.rx_counter = obj["link_key"]["rx_counter"]
key.partner_ieee = node.ieee
key.seq = 0
network_info.key_table.append(key)
# XXX: Devices that are not children, have no NWK address, and have no link key
# are effectively ignored, since there is no place to write them
return network_info, node_info
async def restore_network(
radio_path: str,
backup: t.JSONType,
counter_increment: int,
) -> None:
network_info, node_info = json_backup_to_zigpy_state(backup)
network_info.network_key.tx_counter += counter_increment
znp = ZNP(ControllerApplication.SCHEMA({"device": {"path": radio_path}}))
await znp.connect()
await znp.write_network_info(network_info=network_info, node_info=node_info)
await znp.reset()
znp.close()
async def main(argv: list[str]) -> None:
parser = setup_parser("Restore adapter network settings")
parser.add_argument(
"--input", "-i", type=ClosableFileType("r"), help="Input file", required=True
)
parser.add_argument(
"--counter-increment",
"-c",
type=t.uint32_t,
help="Counter increment",
default=2500,
)
args = parser.parse_args(argv)
with args.input as f:
backup = json.load(f)
validate_backup_json(backup)
await restore_network(
radio_path=args.serial,
backup=backup,
counter_increment=args.counter_increment,
)
if __name__ == "__main__":
asyncio.run(main(sys.argv[1:])) # pragma: no cover | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/tools/network_restore.py | network_restore.py |
import sys
import time
import asyncio
import logging
import itertools
import zigpy_znp.types as t
import zigpy_znp.commands as c
from zigpy_znp.api import ZNP
from zigpy_znp.config import CONFIG_SCHEMA
from zigpy_znp.types.nvids import OsalNvIds
from zigpy_znp.tools.common import setup_parser
LOGGER = logging.getLogger(__name__)
async def scan_once(znp: ZNP, channels: t.Channels, duration_exp: int):
async with znp.capture_responses(
[
c.ZDO.BeaconNotifyInd.Callback(partial=True),
c.ZDO.NwkDiscoveryCnf.Callback(partial=True),
]
) as updates:
await znp.request(
c.ZDO.NetworkDiscoveryReq.Req(
Channels=channels,
ScanDuration=duration_exp,
),
RspStatus=t.Status.SUCCESS,
)
while True:
update = await updates.get()
if isinstance(update, c.ZDO.NwkDiscoveryCnf.Callback):
break
for beacon in update.Beacons:
yield beacon
async def network_scan(
znp: ZNP, channels: t.Channels, num_scans: int, duration_exp: int, duplicates: bool
) -> None:
# Network scanning only works if our device is not joined to a network.
# If we don't start Z-Stack 3 it will always work but Z-Stack 1 keeps the device
# state in the NIB, which we have to temporarily delete in order for the scan to be
# possible.
if znp.version == 1.2:
previous_nib = await znp.nvram.osal_read(OsalNvIds.NIB, item_type=t.NIB)
await znp.nvram.osal_delete(OsalNvIds.NIB)
else:
previous_nib = None
previous_channels = await znp.nvram.osal_read(
OsalNvIds.CHANLIST, item_type=t.Channels
)
await znp.nvram.osal_write(OsalNvIds.CHANLIST, t.Channels.ALL_CHANNELS)
try:
await znp.request_callback_rsp(
request=c.SYS.ResetReq.Req(Type=t.ResetType.Soft),
callback=c.SYS.ResetInd.Callback(partial=True),
)
seen_beacons = set()
for i in itertools.count(start=1):
if num_scans is not None and i > num_scans:
break
async for beacon in scan_once(znp, channels, duration_exp):
if not duplicates:
key = beacon.replace(Depth=0, LQI=0).serialize()
if key in seen_beacons:
continue
seen_beacons.add(key)
print(
f"{time.time():0.2f}"
f" [EPID: {beacon.ExtendedPanId}, PID: {beacon.PanId},"
f" from: {beacon.Src}]: Channel={beacon.Channel:2>}"
f" PermitJoins={beacon.PermitJoining}"
f" RtrCapacity={beacon.RouterCapacity}"
f" DevCapacity={beacon.DeviceCapacity}"
f" ProtoVer={beacon.ProtocolVersion}"
f" StackProf={beacon.StackProfile}"
f" Depth={beacon.Depth:>3}"
f" UpdateId={beacon.UpdateId:>2}"
f" LQI={beacon.LQI:>3}"
)
finally:
if previous_nib is not None:
await znp.nvram.osal_write(OsalNvIds.NIB, previous_nib, create=True)
await znp.nvram.osal_write(OsalNvIds.CHANLIST, previous_channels)
znp.close()
async def main(argv):
parser = setup_parser("Actively scan for Zigbee networks")
parser.add_argument(
"-c",
"--channels",
dest="channels",
type=lambda s: t.Channels.from_channel_list(map(int, s.split(","))),
default=t.Channels.ALL_CHANNELS,
help="Channels on which to scan for networks",
)
parser.add_argument(
"-n",
"--num_scans",
dest="num_scans",
type=int,
default=None,
help="Number of scans to perform. Default is to scan forever.",
)
parser.add_argument(
"-d",
"--duration-exp",
dest="duration_exp",
type=int,
default=2,
help="Scan duration exponent",
)
parser.add_argument(
"-a",
"--allow-duplicates",
dest="allow_duplicates",
action="store_true",
default=False,
help="Allow duplicate beacons that differ only by LQI and depth",
)
args = parser.parse_args(argv)
znp = ZNP(CONFIG_SCHEMA({"device": {"path": args.serial}}))
await znp.connect()
await network_scan(
znp=znp,
channels=args.channels,
num_scans=args.num_scans,
duration_exp=args.duration_exp,
duplicates=args.allow_duplicates,
)
znp.close()
if __name__ == "__main__":
asyncio.run(main(sys.argv[1:])) # pragma: no cover | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/tools/network_scan.py | network_scan.py |
import sys
import json
import asyncio
import logging
import zigpy_znp.types as t
from zigpy_znp.api import ZNP
from zigpy_znp.config import CONFIG_SCHEMA
from zigpy_znp.exceptions import SecurityError, CommandNotRecognized
from zigpy_znp.types.nvids import NWK_NVID_TABLES, ExNvIds, OsalNvIds
from zigpy_znp.tools.common import ClosableFileType, setup_parser
LOGGER = logging.getLogger(__name__)
async def nvram_read(znp: ZNP):
data = {}
data["LEGACY"] = {}
# Legacy items need to be handled first, since they are named
for nwk_nvid in OsalNvIds:
if nwk_nvid == OsalNvIds.INVALID_INDEX:
continue
# Tables span ranges of items. Naming them properly is useful.
if nwk_nvid in NWK_NVID_TABLES:
start = nwk_nvid
end = NWK_NVID_TABLES[nwk_nvid]
for offset in range(0, end - start):
key = f"{nwk_nvid.name}+{offset}"
try:
value = await znp.nvram.osal_read(
nwk_nvid + offset, item_type=t.Bytes
)
except SecurityError:
LOGGER.warning("Read disallowed for %s", key)
continue
except KeyError:
break
LOGGER.info("%s = %r", key, value.hex())
data["LEGACY"][key] = value.hex()
else:
try:
value = await znp.nvram.osal_read(nwk_nvid, item_type=t.Bytes)
except KeyError:
continue
except SecurityError:
LOGGER.warning("Read disallowed for %s", nwk_nvid)
continue
LOGGER.info("%s = %r", nwk_nvid, value.hex())
data["LEGACY"][nwk_nvid.name] = value.hex()
for nvid in ExNvIds:
# Skip the LEGACY items, we did them above
if nvid == ExNvIds.LEGACY:
continue
for sub_id in range(2**16):
try:
value = await znp.nvram.read(
item_id=nvid, sub_id=sub_id, item_type=t.Bytes
)
except CommandNotRecognized:
# CC2531 only supports the legacy NVRAM interface, even on Z-Stack 3
return data
except KeyError:
# Once a read fails, no later reads will succeed
break
LOGGER.info("%s[0x%04X] = %r", nvid.name, sub_id, value.hex())
data.setdefault(nvid.name, {})[f"0x{sub_id:04X}"] = value.hex()
return data
async def main(argv):
parser = setup_parser("Backup a radio's NVRAM")
parser.add_argument(
"--output", "-o", type=ClosableFileType("w"), help="Output file", default="-"
)
args = parser.parse_args(argv)
with args.output as f:
znp = ZNP(CONFIG_SCHEMA({"device": {"path": args.serial}}))
await znp.connect()
obj = await nvram_read(znp)
znp.close()
f.write(json.dumps(obj, indent=4) + "\n")
if __name__ == "__main__":
asyncio.run(main(sys.argv[1:])) # pragma: no cover | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/tools/nvram_read.py | nvram_read.py |
from __future__ import annotations
import sys
import asyncio
import logging
import async_timeout
import zigpy_znp.types as t
import zigpy_znp.commands as c
from zigpy_znp.api import ZNP
from zigpy_znp.config import CONFIG_SCHEMA
from zigpy_znp.tools.common import ClosableFileType, setup_parser
from zigpy_znp.tools.nvram_reset import nvram_reset
LOGGER = logging.getLogger(__name__)
def compute_crc16(data: bytes) -> int:
poly = 0x1021
crc = 0x0000
for byte in data:
for _ in range(8):
msb = 1 if (crc & 0x8000) else 0
crc <<= 1
crc &= 0xFFFF
if byte & 0x80:
crc |= 0x0001
if msb:
crc ^= poly
byte <<= 1
return crc
def get_firmware_crcs(firmware: bytes) -> tuple[int, int]:
# There is room for *two* CRCs in the firmware file: the expected and the computed
firmware_without_crcs = (
firmware[: c.ubl.IMAGE_CRC_OFFSET]
+ firmware[c.ubl.IMAGE_CRC_OFFSET + 4 :]
+ b"\x00\x00"
)
# We only use the first one. The second one is written by the bootloader into flash.
real_crc = int.from_bytes(
firmware[c.ubl.IMAGE_CRC_OFFSET : c.ubl.IMAGE_CRC_OFFSET + 2], "little"
)
return real_crc, compute_crc16(firmware_without_crcs)
async def write_firmware(znp: ZNP, firmware: bytes, reset_nvram: bool):
if len(firmware) != c.ubl.IMAGE_SIZE:
raise ValueError(
f"Firmware is the wrong size."
f" Expected {c.ubl.IMAGE_SIZE}, got {len(firmware)}"
)
expected_crc, computed_crc = get_firmware_crcs(firmware)
if expected_crc != computed_crc:
raise ValueError(
f"Firmware CRC is incorrect."
f" Expected 0x{expected_crc:04X}, got 0x{computed_crc:04X}"
)
try:
async with async_timeout.timeout(5):
handshake_rsp = await znp.request_callback_rsp(
request=c.UBL.HandshakeReq.Req(),
callback=c.UBL.HandshakeRsp.Callback(partial=True),
)
except asyncio.TimeoutError:
raise RuntimeError(
"Did not receive a bootloader handshake response!"
" Make sure your adapter has just been plugged in and"
" nothing else has had a chance to communicate with it. Alternatively, "
" press the button furthest from the USB port. The LED should turn red."
)
if handshake_rsp.Status != c.ubl.BootloaderStatus.SUCCESS:
raise RuntimeError(f"Bad bootloader handshake response: {handshake_rsp}")
# All reads and writes are this size
buffer_size = handshake_rsp.BufferSize
for offset in range(0, c.ubl.IMAGE_SIZE, buffer_size):
address = offset // c.ubl.FLASH_WORD_SIZE
LOGGER.info("Write progress: %0.2f%%", (100.0 * offset) / c.ubl.IMAGE_SIZE)
write_rsp = await znp.request_callback_rsp(
request=c.UBL.WriteReq.Req(
FlashWordAddr=address,
Data=t.TrailingBytes(firmware[offset : offset + buffer_size]),
),
callback=c.UBL.WriteRsp.Callback(partial=True),
)
assert write_rsp.Status == c.ubl.BootloaderStatus.SUCCESS
# Now we have to read it all back
for offset in range(0, c.ubl.IMAGE_SIZE, buffer_size):
address = offset // c.ubl.FLASH_WORD_SIZE
LOGGER.info(
"Verification progress: %0.2f%%", (100.0 * offset) / c.ubl.IMAGE_SIZE
)
read_rsp = await znp.request_callback_rsp(
request=c.UBL.ReadReq.Req(
FlashWordAddr=address,
),
callback=c.UBL.ReadRsp.Callback(partial=True),
)
assert read_rsp.Status == c.ubl.BootloaderStatus.SUCCESS
assert read_rsp.FlashWordAddr == address
assert read_rsp.Data == firmware[offset : offset + buffer_size]
# This seems to cause the bootloader to compute and verify the CRC
enable_rsp = await znp.request_callback_rsp(
request=c.UBL.EnableReq.Req(),
callback=c.UBL.EnableRsp.Callback(partial=True),
)
assert enable_rsp.Status == c.ubl.BootloaderStatus.SUCCESS
if reset_nvram:
LOGGER.info("Success! Waiting for a few seconds to leave the bootloader...")
await asyncio.sleep(5)
await nvram_reset(znp)
else:
LOGGER.info("Unplug your adapter to leave bootloader mode!")
async def main(argv):
parser = setup_parser("Write firmware to a radio")
parser.add_argument(
"--input",
"-i",
type=ClosableFileType("rb"),
help="Input .bin file",
required=True,
)
parser.add_argument(
"--reset",
"-r",
action="store_true",
help="Resets the device's NVRAM after upgrade",
default=False,
)
args = parser.parse_args(argv)
with args.input as f:
firmware = f.read()
znp = ZNP(
CONFIG_SCHEMA(
{"znp_config": {"skip_bootloader": False}, "device": {"path": args.serial}}
)
)
# The bootloader handshake must be the very first command
await znp.connect(test_port=False)
await write_firmware(znp=znp, firmware=firmware, reset_nvram=args.reset)
znp.close()
if __name__ == "__main__":
asyncio.run(main(sys.argv[1:])) # pragma: no cover | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/tools/flash_write.py | flash_write.py |
import sys
import asyncio
import logging
from zigpy_znp.api import ZNP
from zigpy_znp.config import CONFIG_SCHEMA
from zigpy_znp.types.nvids import (
NWK_NVID_TABLES,
NWK_NVID_TABLE_KEYS,
ExNvIds,
OsalNvIds,
)
from zigpy_znp.tools.common import setup_parser
LOGGER = logging.getLogger(__name__)
async def nvram_reset(znp: ZNP) -> None:
# The legacy items are shared by all Z-Stack versions
for nvid in OsalNvIds:
if nvid in NWK_NVID_TABLES:
start = nvid
end = NWK_NVID_TABLES[nvid]
for nvid in range(start, end + 1):
deleted = await znp.nvram.osal_delete(nvid)
if not deleted:
break
LOGGER.info("Cleared %s[%s]", start, nvid - start)
elif nvid in NWK_NVID_TABLE_KEYS:
continue
else:
if await znp.nvram.osal_delete(nvid):
LOGGER.info("Cleared %s", nvid)
else:
LOGGER.debug("Item does not exist: %s", nvid)
if znp.version >= 3.30:
for nvid in ExNvIds:
# Skip the LEGACY items, we did them above
if nvid == ExNvIds.LEGACY:
continue
for sub_id in range(2**16):
existed = await znp.nvram.delete(item_id=nvid, sub_id=sub_id)
LOGGER.info("Cleared %s[0x%04X]", nvid.name, sub_id)
if not existed:
# Once a delete fails, no later reads will succeed
break
LOGGER.info("Resetting...")
await znp.reset()
async def main(argv):
parser = setup_parser("Reset a radio's state")
parser.add_argument(
"-c",
"--clear",
action="store_true",
default=False,
help="Deprecated: tries to delete every NVRAM value.",
)
args = parser.parse_args(argv)
if args.clear:
LOGGER.warning(
"The -c/--clear command line argument now the default"
" and will be removed in a future release."
)
znp = ZNP(CONFIG_SCHEMA({"device": {"path": args.serial}}))
await znp.connect()
await nvram_reset(znp)
if __name__ == "__main__":
asyncio.run(main(sys.argv[1:])) # pragma: no cover | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/tools/nvram_reset.py | nvram_reset.py |
import sys
import asyncio
import logging
import async_timeout
import zigpy_znp.commands as c
from zigpy_znp.api import ZNP
from zigpy_znp.config import CONFIG_SCHEMA
from zigpy_znp.tools.common import ClosableFileType, setup_parser
LOGGER = logging.getLogger(__name__)
async def read_firmware(znp: ZNP) -> bytearray:
try:
async with async_timeout.timeout(5):
handshake_rsp = await znp.request_callback_rsp(
request=c.UBL.HandshakeReq.Req(),
callback=c.UBL.HandshakeRsp.Callback(partial=True),
)
except asyncio.TimeoutError:
raise RuntimeError(
"Did not receive a bootloader handshake response!"
" Make sure your adapter has just been plugged in and"
" nothing else has had a chance to communicate with it. Alternatively,"
" press the button furthest from the USB port. The LED should turn red."
)
if handshake_rsp.Status != c.ubl.BootloaderStatus.SUCCESS:
raise RuntimeError(f"Bad bootloader handshake response: {handshake_rsp}")
# All reads and writes are this size
buffer_size = handshake_rsp.BufferSize
data = bytearray()
for offset in range(0, c.ubl.IMAGE_SIZE, buffer_size):
address = offset // c.ubl.FLASH_WORD_SIZE
LOGGER.info("Progress: %0.2f%%", (100.0 * offset) / c.ubl.IMAGE_SIZE)
read_rsp = await znp.request_callback_rsp(
request=c.UBL.ReadReq.Req(FlashWordAddr=address),
callback=c.UBL.ReadRsp.Callback(partial=True),
)
assert read_rsp.Status == c.ubl.BootloaderStatus.SUCCESS
assert read_rsp.FlashWordAddr == address
assert len(read_rsp.Data) == buffer_size
data.extend(read_rsp.Data)
return data
async def main(argv):
parser = setup_parser("Backup a radio's firmware")
parser.add_argument(
"--output",
"-o",
type=ClosableFileType("wb"),
help="Output .bin file",
required=True,
)
args = parser.parse_args(argv)
with args.output as f:
znp = ZNP(
CONFIG_SCHEMA(
{
"znp_config": {"skip_bootloader": False},
"device": {"path": args.serial},
}
)
)
# The bootloader handshake must be the very first command
await znp.connect(test_port=False)
data = await read_firmware(znp)
znp.close()
f.write(data)
LOGGER.info("Unplug your adapter to leave bootloader mode!")
if __name__ == "__main__":
asyncio.run(main(sys.argv[1:])) # pragma: no cover | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/tools/flash_read.py | flash_read.py |
import zigpy_znp.types as t
class NodeRelation(t.enum_uint8):
PARENT = 0
CHILD_RFD = 1
CHILD_RFD_RX_IDLE = 2
CHILD_FFD = 3
CHILD_FFD_RX_IDLE = 4
NEIGHBOR = 5
OTHER = 6
UNKNOWN_8 = 8
NOTUSED = 0xFF
class BindEntry(t.Struct):
srcEP: t.uint8_t
dstGroupMode: t.uint8_t # 0 - Normal address index, 1 - Group address
dstIdx: t.uint16_t
dstEP: t.uint8_t
clusterIdList: t.ClusterIdList
class AgingEndDevice(t.CStruct):
endDevCfg: t.uint8_t
deviceTimeout: t.uint32_t
class LinkInfo(t.CStruct):
txCounter: t.uint8_t # Counter of transmission success/failures
txCost: t.uint8_t # Average of sending rssi values if link status is enabled
# i.e. NWK_LINK_STATUS_PERIOD is defined as non zero
rxLqi: t.uint8_t # average of received rssi values
# needs to be converted to link cost (1-7) before used
inKeySeqNum: t.uint8_t # security key sequence number
inFrmCntr: t.uint32_t # security frame counter..
txFailure: t.uint16_t # higher values indicate more failures
class Device(t.CStruct):
shortAddr: t.NWK
addrIdx: t.uint16_t
nodeRelation: NodeRelation
devStatus: t.uint8_t
assocCnt: t.uint8_t
age: t.uint8_t
linkInfo: LinkInfo
endDev: AgingEndDevice
timeoutCounter: t.uint32_t
keepaliveRcv: t.uint8_t # not a bool, can be 0xFF
ctrl: t.uint8_t # XXX: This field is only present in Z-Stack 3.30+ !!!
class Key(t.FixedList, item_type=t.uint8_t, length=42):
pass
class RandomNumbers(t.FixedList, item_type=t.uint8_t, length=100):
pass
class LEDMode(t.enum_uint8):
OFF = 0
ON = 1
BLINK = 2
FLASH = 3
TOGGLE = 4
class UTIL(t.CommandsBase, subsystem=t.Subsystem.UTIL):
# MAC Reset command to reset MAC state machine
GetDeviceInfo = t.CommandDef(
t.CommandType.SREQ,
0x00,
req_schema=(),
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param("IEEE", t.EUI64, "Extended address of the device"),
t.Param("NWK", t.NWK, "Short address of the device"),
t.Param("DeviceType", t.DeviceTypeCapabilities, "Device type"),
t.Param("DeviceState", t.DeviceState, "Indicated the state of the device"),
t.Param(
"AssociatedDevices",
t.NWKList,
(
"Network addresses of Reduce Function Devices associated "
"to the local device."
),
),
),
)
# read a block of parameters from Non-Volatile storage of the target device
GetNVInfo = t.CommandDef(
t.CommandType.SREQ,
0x01,
req_schema=(),
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param("IEEE", t.EUI64, "IEEE address of the device"),
t.Param(
"ScanChannels",
t.uint32_t,
"Channels to be scanned when starting the device. Big endian!",
),
t.Param(
"PanId",
t.PanId,
"The PAN Id to use. This parameter is ignored if Pan",
),
# ToDo: Make this an enum
t.Param("SecurityLevel", t.uint8_t, "Security level of this data frame"),
t.Param("PreConfigKey", t.KeyData, "Preconfigured network key"),
),
)
# Set PAN ID
SetPanId = t.CommandDef(
t.CommandType.SREQ,
0x02,
req_schema=(t.Param("PanId", t.PanId, "The PAN Id to set"),),
rsp_schema=t.STATUS_SCHEMA,
)
# store a channel select bit-mask into Non-Volatile memory to be used the next
# time the target device resets
SetChannels = t.CommandDef(
t.CommandType.SREQ,
0x03,
req_schema=(
t.Param(
"Channels", t.Channels, "Channels to scan when starting the device"
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# store a security level value into Non-Volatile memory to be used the next time
# the target device reset
SetSecurityLevel = t.CommandDef(
t.CommandType.SREQ,
0x04,
req_schema=(
# ToDo: Make this an enum
t.Param(
"SecurityLevel",
t.uint8_t,
"Specifies the messaging network security level",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# store a pre-configured key array into Non-Volatile memory to be used the next
# time the target device resets
SetPreConfigKey = t.CommandDef(
t.CommandType.SREQ,
0x05,
req_schema=(t.Param("PreConfigKey", t.KeyData, "Preconfigured network key"),),
rsp_schema=t.STATUS_SCHEMA,
)
# subscribes/unsubscribes to layer callbacks. For particular subsystem callbacks
# to work, the software must be compiled with a special flag that is unique to that
# subsystem to enable the callback mechanism. For example to enable ZDO callbacks,
# MT_ZDO_CB_FUNC flag must be compiled when the software is built
CallbackSubCmd = t.CommandDef(
t.CommandType.SREQ,
0x06,
req_schema=(
t.Param(
"SubsystemId",
t.CallbackSubsystem,
"Subsystem id to subscribe/unsubscribe",
),
t.Param("Action", t.Bool, "True -- enable, False -- Disable"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# Send a key event to the device registered application
KeyEvent = t.CommandDef(
t.CommandType.SREQ,
0x07,
req_schema=(
t.Param("Keys", t.uint8_t, "Key code bitmask"),
t.Param("Shift", t.Bool, "True -- shift, False -- no shift"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# get the board's time alive
TimeAlive = t.CommandDef(
t.CommandType.SREQ,
0x09,
req_schema=(),
rsp_schema=(
t.Param("Seconds", t.uint32_t, "The time of the board's uptime in seconds"),
),
)
# control the LEDs on the board
LEDControl = t.CommandDef(
t.CommandType.SREQ,
0x0A,
req_schema=(
t.Param("LED", t.uint8_t, "The LED number. 0xFF for all."),
t.Param("Mode", LEDMode, "LED mode. ON/OFF are static."),
),
rsp_schema=t.STATUS_SCHEMA,
)
# test data buffer loopback
Loopback = t.CommandDef(
t.CommandType.SREQ,
0x10,
req_schema=(t.Param("Data", t.Bytes, "The data bytes to loop back"),),
rsp_schema=(t.Param("Data", t.Bytes, "The looped back data"),),
)
# effect a MAC MLME Poll Request
DataReq = t.CommandDef(
t.CommandType.SREQ,
0x11,
req_schema=(
t.Param(
"SecurityUse",
t.Bool,
"True -- to request MAC security, bun not used for now",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# enable AUTOPEND and source address matching
SrcMatchEnable = t.CommandDef(
t.CommandType.SREQ, 0x20, req_schema=(), rsp_schema=t.STATUS_SCHEMA
)
# add a short or extended address to source address table
SrcMatchAddEntry = t.CommandDef(
t.CommandType.SREQ,
0x21,
req_schema=(
t.Param("AddrModeAddress", t.AddrModeAddress, "Address mode and address"),
t.Param(
"PanId",
t.PanId,
"PAN Id of the device. Only use with a short address",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# delete a short or extended address to source address table
SrcMatchDelEntry = t.CommandDef(
t.CommandType.SREQ,
0x22,
req_schema=(
t.Param("AddrModeAddress", t.AddrModeAddress, "Address mode and address"),
t.Param(
"PanId",
t.PanId,
"PAN Id of the device. Only use with a short address",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# check if a short or extended address is in the source address table
SrcMatchCheckSrcAddr = t.CommandDef(
t.CommandType.SREQ,
0x23,
req_schema=(
t.Param("AddrModeAddress", t.AddrModeAddress, "Address mode and address"),
t.Param(
"PanId",
t.PanId,
"PAN Id of the device. Only use with a short address",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# enable/disable acknowledging all packets with pending bit set
SrcMatchAckAllPending = t.CommandDef(
t.CommandType.SREQ,
0x24,
req_schema=(
t.Param(
"Enabled",
t.Bool,
(
"True - acknowledging all packets with pending field set, "
"False - Otherwise"
),
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# check if acknowledging all packets with pending bit set is enabled
SrcMatchCheckAllPending = t.CommandDef(
t.CommandType.SREQ,
0x25,
req_schema=(),
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param(
"Enabled",
t.Bool,
(
"True - acknowledging all packets with pending field set, "
"False - Otherwise"
),
),
),
)
# proxy call to the AddrMgrEntryLookupExt() function
AddrMgrExtAddrLookup = t.CommandDef(
t.CommandType.SREQ,
0x40,
req_schema=(
t.Param(
"IEEE", t.EUI64, "Extended address of the device to lookup the NWK"
),
),
rsp_schema=(t.Param("NWK", t.NWK, "NWK address of the device"),),
)
# a proxy call to the AddrMgrEntryLookupNwk() function
AddrMgwNwkAddrLookUp = t.CommandDef(
t.CommandType.SREQ,
0x41,
req_schema=(
t.Param("NWK", t.NWK, "Short address of the device to lookup IEEE"),
),
rsp_schema=(t.Param("IEEE", t.EUI64, "Extended address of the device"),),
)
# retrieve APS link key data, Tx and Rx frame counters
APSMELinkKeyDataGet = t.CommandDef(
t.CommandType.SREQ,
0x44,
req_schema=(
t.Param("IEEE", t.EUI64, "Extended address of the device to get link data"),
),
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param("SecKey", t.KeyData, "Security Key"),
t.Param("TxFrmCntr", t.uint32_t, "On success, the TX frame counter"),
t.Param("RxFrmCntr", t.uint32_t, "On success, the RX frame counter"),
),
)
# a proxy call to the APSME_LinkKeyNvIdGet() function
APSMELinkKeyNvIdGet = t.CommandDef(
t.CommandType.SREQ,
0x45,
req_schema=(
t.Param("IEEE", t.EUI64, "Extended address of the device to get link data"),
),
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param(
"LinkKeyNvId",
t.uint16_t,
"On success, link key NV ID, otherwise 0xFFFF",
),
),
)
# a proxy call to the AssocCount() function
AssocCount = t.CommandDef(
t.CommandType.SREQ,
0x48,
req_schema=(
t.Param(
"StartRelation", NodeRelation, "A valid node relation from AssocList.h"
),
t.Param(
"EndRelation",
NodeRelation,
"Same as StartRelation, but the node relation to stop counting",
),
),
rsp_schema=(
t.Param("Count", t.uint16_t, "The count returned by the proxy call"),
),
)
# a proxy call to the AssocFindDevice() function
AssocFindDevice = t.CommandDef(
t.CommandType.SREQ,
0x49,
req_schema=(
t.Param("Index", t.uint8_t, "Nth active entry in the device list"),
),
# XXX: The struct is not packed when sent: `write(&struct, sizeof(struct))`
rsp_schema=(t.Param("Device", t.Bytes, "associated_devices_t structure"),),
)
# a proxy call to the AssocGetWithAddress() function
AssocGetWithAddress = t.CommandDef(
t.CommandType.SREQ,
0x4A,
req_schema=(
t.Param(
"IEEE",
t.EUI64,
(
"Extended address for the lookup or all zeroes to use the NWK "
"addr for the lookup"
),
),
t.Param(
"NWK", t.NWK, "NWK address to use for lookup if IEEE is all zeroes"
),
),
rsp_schema=(t.Param("Device", Device, "associated_devices_t structure"),),
)
# send a request key to the Trust Center from an originator device who wants to
# exchange messages with a partner device
APSMERequestKeyCmd = t.CommandDef(
t.CommandType.SREQ,
0x4B,
req_schema=(
t.Param(
"IEEE",
t.EUI64,
(
"Specifies the extended address of the partner device the "
"originator wants to exchange messages with"
),
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# a proxy call to the bindAddEntry() function
BindAddEntry = t.CommandDef(
t.CommandType.SREQ,
0x4D,
req_schema=(
t.Param(
"DstAddrModeAddr",
t.AddrModeAddress,
"Address mode address of the partner",
),
t.Param("DstEndpoint", t.uint8_t, "Binding entry destination endpoint"),
t.Param("ClusterIdList", t.ClusterIdList, "List of the cluster IDs"),
),
rsp_schema=(
t.Param(
"BindEntry",
BindEntry,
(
"Bind Entry. The dstIdx in the BindEntry is set to "
"INVALID_NODE_ADDR to indicate failure"
),
),
),
)
# Z2M firmware: proxy call to AssocRemove
AssocRemove = t.CommandDef(
t.CommandType.SREQ,
0x63,
req_schema=(
t.Param("IEEE", t.EUI64, "Extended address of the device to remove"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# Z2M firmware: proxy call to AssocAddNew
AssocAdd = t.CommandDef(
t.CommandType.SREQ,
0x64,
req_schema=(
t.Param("NWK", t.NWK, "Short address of the device"),
t.Param("IEEE", t.EUI64, "Extended address of the device to add"),
t.Param(
"NodeRelation",
NodeRelation,
"Relation of the device to the coordinator",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# a proxy call to zclGeneral_KeyEstablish_InitiateKeyEstablishment()
ZCLKeyEstInitEst = t.CommandDef(
t.CommandType.SREQ,
0x80,
req_schema=(
t.Param("TaskId", t.uint8_t, "The OSAL Task Id making the request"),
t.Param("SeqNum", t.uint8_t, "The sequence number of the request"),
t.Param("EndPoint", t.uint8_t, "The endpoint of the partner"),
t.Param(
"AddrModeAddr",
t.AddrModeAddress,
"Address mode address of the partner",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# a proxy call to zclGeneral_KeyEstablishment_ECDSASign()
ZCLKeyEstSign = t.CommandDef(
t.CommandType.SREQ,
0x81,
req_schema=(t.Param("Input", t.ShortBytes, "The input data"),),
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param("Key", Key, "The output key on success"),
),
)
# generate Secure Random Number. It generates 1,000,000 bits in sets of 100 bytes.
# As in 100 bytes of secure random numbers are generated until 1,000,000 bits are
# generated. 100 bytes are generated 1250 times. So 1250 SRSPs are generated.
# MT_SRNG has to be defined to include this API
SRngGen = t.CommandDef(
t.CommandType.SREQ,
0x4C,
req_schema=(),
rsp_schema=(
t.Param("RandomNumbers", RandomNumbers, "Secure random numbers list"),
),
)
# UTIL Callbacks
# asynchronous request/response handshake
# XXX: This command's request is completely identical to its response.
# SyncReq = t.CommandDef(t.CommandType.AREQ, 0xE0)
# RPC proxy indication for a ZCL_KEY_ESTABLISH_IND
ZCLKeyEstInd = t.CommandDef(
t.CommandType.AREQ,
0xE1,
rsp_schema=(
t.Param(
"TaskId",
t.uint8_t,
"The OSAL Task id registered to receive the indication",
),
t.Param("Event", t.uint8_t, "The OSAL message event"),
t.Param("Status", t.Status, "The OSAL message status"),
t.Param("WaitTime", t.uint8_t, "The wait time"),
t.Param("Suite", t.uint16_t, "The key establishment suite"),
),
) | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/commands/util.py | util.py |
import zigpy_znp.types as t
# Size of internal flash less 4 pages for boot loader,
# 6 pages for NV, & 1 page for lock bits.
IMAGE_SIZE = 0x40000 - 0x2000 - 0x3000 - 0x0800
IMAGE_CRC_OFFSET = 0x90
FLASH_WORD_SIZE = 4
class BootloaderStatus(t.enum_uint8):
SUCCESS = 0
FAILURE = 1
INVALID_FCS = 2
INVALID_FILE = 3
FILESYSTEM_ERROR = 4
ALREADY_STARTED = 5
NO_RESPOSNE = 6
VALIDATE_FAILED = 7
CANCELED = 8
class BootloaderDeviceType(t.enum_uint8):
CC2538 = 1
CC2530 = 2
class BootloaderRunMode(t.enum_uint8):
# Read the code, not the spec
FORCE_BOOT = 0x10
FORCE_RUN = FORCE_BOOT ^ 0xFF
class UBL(t.CommandsBase, subsystem=t.Subsystem.UBL_FUNC):
WriteReq = t.CommandDef(
t.CommandType.AREQ,
0x01,
req_schema=(
(
t.Param("FlashWordAddr", t.uint16_t, "Write address, in flash words"),
t.Param(
"Data", t.TrailingBytes, "HandshakeRsp.BufferSize bytes of data"
),
)
),
)
WriteRsp = t.CommandDef(
t.CommandType.AREQ,
0x81,
rsp_schema=((t.Param("Status", BootloaderStatus, "Write status"),)),
)
ReadReq = t.CommandDef(
t.CommandType.AREQ,
0x02,
req_schema=(
t.Param("FlashWordAddr", t.uint16_t, "Read address, in flash words"),
),
)
ReadRsp = t.CommandDef(
t.CommandType.AREQ,
0x82,
rsp_schema=(
t.Param("Status", BootloaderStatus, "Read status"),
# These are missing if the request is bad
t.Param(
"FlashWordAddr",
t.uint16_t,
"Address read from, in flash words",
optional=True,
),
t.Param(
"Data",
t.TrailingBytes,
"HandshakeRsp.BufferSize bytes of data",
optional=True,
),
),
)
EnableReq = t.CommandDef(t.CommandType.AREQ, 0x03, req_schema=())
EnableRsp = t.CommandDef(
t.CommandType.AREQ,
0x83,
rsp_schema=(t.Param("Status", BootloaderStatus, "Enable status"),),
)
HandshakeReq = t.CommandDef(t.CommandType.AREQ, 0x04, req_schema=())
HandshakeRsp = t.CommandDef(
t.CommandType.AREQ,
0x84,
rsp_schema=(
t.Param("Status", BootloaderStatus, "Handshake status"),
t.Param("BootloaderRevision", t.uint32_t, "Bootloader revision"),
t.Param("DeviceType", BootloaderDeviceType, "Device type"),
t.Param("BufferSize", t.uint32_t, "Read/write buffer size"),
t.Param("PageSize", t.uint32_t, "Device page size"),
t.Param("BootloaderCodeRevision", t.uint32_t, "Bootloader code revision"),
),
) | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/commands/ubl.py | ubl.py |
from __future__ import annotations
import zigpy.zdo.types
import zigpy_znp.types as t
class SecurityEntry(t.FixedList, item_type=t.uint8_t, length=5):
pass
class StartupState(t.enum_uint8):
RestoredNetworkState = 0x00
NewNetworkState = 0x01
NotStarted = 0x02
class RouteDiscoveryOptions(t.enum_flag_uint8):
UNICAST = 0x00
MTO_WITH_ROUTE_CACHE = 0x01
MTO_WITHOUT_ROUTE_CACHE = 0x03
class RouteStatus(t.enum_uint8):
INIT = 0
ACTIVE = 1
DISC = 2
LINK_FAIL = 3
REPAIR = 4
class RouteOptions(t.enum_flag_uint8):
# Used in option of NLME_RouteDiscoveryRequest() and rtgTable[]
MTO_ROUTE = 0x01
# Used in option of NLME_RouteDiscoveryRequest() and rtgTable[]
NO_ROUTE_CACHE = 0x02
# Used in option of rtgTable[]
RTG_RECORD = 0x04
# Sender has route cache. Used in option of rtgTable[]
MTO_ROUTE_RC = 0x08
# Sender doesn't have route cache. Used in option of rtgTable[]
MTO_ROUTE_NRC = 0x10
# Used in option of route request command frame
DEST_IEEE_ADDR = 0x20
# Used in all three places
MULTICAST_ROUTE = 0x40
class RoutingStatus(t.enum_uint8):
SUCCESS = 0
FAIL = 1
TBL_FULL = 2
HIGHER_COST = 3
NO_ENTRY = 4
INVALID_PATH = 5
INVALID_PARAM = 6
SRC_TBL_FULL = 7
class MACCapabilities(t.enum_flag_uint8):
PANCoordinator = 1 << 0
Router = 1 << 1
MainsPowered = 1 << 2
RXWhenIdle = 1 << 3
Reserved5 = 1 << 4
Reserved6 = 1 << 5
SecurityCapable = 1 << 6
AllocateShortAddrDuringAssocNeeded = 1 << 7
class LeaveOptions(t.enum_flag_uint8):
NONE = 0
Rejoin = 1 << 0
RemoveChildren = 1 << 1
class NetworkList(t.LVList, item_type=t.Network, length_type=t.uint8_t):
pass
class EndpointList(t.LVList, item_type=t.uint8_t, length_type=t.uint8_t):
pass
class GroupIdList(t.LVList, item_type=t.GroupId, length_type=t.uint8_t):
pass
class BindEntryList(t.LVList, item_type=zigpy.zdo.types.Binding, length_type=t.uint8_t):
pass
class BeaconList(t.LVList, item_type=t.Beacon, length_type=t.uint8_t):
pass
class EnergyValues(t.LVList, item_type=t.uint8_t, length_type=t.uint8_t):
pass
class ChildInfoList(t.LVList, item_type=t.EUI64, length_type=t.uint8_t):
pass
class NWKArray(t.CompleteList, item_type=t.NWK):
pass
class NullableNodeDescriptor(zigpy.zdo.types.NodeDescriptor):
@classmethod
def deserialize(cls, data: bytes) -> tuple[NullableNodeDescriptor, bytes]:
if data == b"\x00":
return cls(), b""
return super().deserialize(data)
def serialize(self) -> bytes:
# Special case when the node descriptor is completely empty
if not self.assigned_fields():
return b"\x00"
return super().serialize()
class AddrRequestType(t.enum_uint8):
SINGLE = 0x00
EXTENDED = 0x01
class ZDO(t.CommandsBase, subsystem=t.Subsystem.ZDO):
# send a "Network Address Request". This message sends a broadcast message looking
# for a 16 bit address with a known 64 bit IEEE address. You must subscribe to
# "ZDO Network Address Response" to receive the response to this message
NwkAddrReq = t.CommandDef(
t.CommandType.SREQ,
0x00,
req_schema=(
t.Param(
"IEEE",
t.EUI64,
"Extended address of the device requesting association",
),
t.Param(
"RequestType",
AddrRequestType,
"0x00 -- single device request, 0x01 -- Extended",
),
t.Param(
"StartIndex", t.uint8_t, "Starting index into the list of children"
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# request a device's IEEE 64-bit address
IEEEAddrReq = t.CommandDef(
t.CommandType.SREQ,
0x01,
req_schema=(
t.Param("NWK", t.NWK, "Short address of the device"),
t.Param(
"RequestType",
AddrRequestType,
"0x00 -- single device request, 0x01 -- Extended",
),
t.Param(
"StartIndex", t.uint8_t, "Starting index into the list of children"
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# inquire about the Node Descriptor information of the destination device
NodeDescReq = t.CommandDef(
t.CommandType.SREQ,
0x02,
req_schema=(
t.Param(
"DstAddr",
t.NWK,
"Short address of the device generating the inquiry",
),
t.Param(
"NWKAddrOfInterest",
t.NWK,
"Short address of the device being queried",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# inquire about the Power Descriptor information of the destination device
PowerDescReq = t.CommandDef(
t.CommandType.SREQ,
0x03,
req_schema=(
t.Param(
"DstAddr",
t.NWK,
"Short address of the device generating the inquiry",
),
t.Param(
"NWKAddrOfInterest",
t.NWK,
"Short address of the device being queried",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# inquire as to the Simple Descriptor of the destination device's Endpoint
SimpleDescReq = t.CommandDef(
t.CommandType.SREQ,
0x04,
req_schema=(
t.Param(
"DstAddr",
t.NWK,
"Short address of the device generating the inquiry",
),
t.Param(
"NWKAddrOfInterest",
t.NWK,
"Short address of the device being queried",
),
t.Param("Endpoint", t.uint8_t, "application endpoint the data is from"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# request a list of active endpoint from the destination device
ActiveEpReq = t.CommandDef(
t.CommandType.SREQ,
0x05,
req_schema=(
t.Param(
"DstAddr",
t.NWK,
"Short address of the device generating the inquiry",
),
t.Param(
"NWKAddrOfInterest",
t.NWK,
"Short address of the device being queried",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# request the device match descriptor
MatchDescReq = t.CommandDef(
t.CommandType.SREQ,
0x06,
req_schema=(
t.Param(
"DstAddr",
t.NWK,
"Short address of the device generating the inquiry",
),
t.Param(
"NWKAddrOfInterest",
t.NWK,
"Short address of the device being queried",
),
t.Param("ProfileId", t.uint16_t, "profile id of the device"),
t.Param("InputClusters", t.ClusterIdList, "Input cluster id list"),
t.Param("OutputClusters", t.ClusterIdList, "Output cluster id list"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# request for the destination device's complex descriptor
ComplexDescReq = t.CommandDef(
t.CommandType.SREQ,
0x07,
req_schema=(
t.Param(
"DstAddr",
t.NWK,
"Short address of the device generating the inquiry",
),
t.Param(
"NWKAddrOfInterest",
t.NWK,
"Short address of the device being queried",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# request for the destination device's user descriptor
UserDescReq = t.CommandDef(
t.CommandType.SREQ,
0x08,
req_schema=(
t.Param(
"DstAddr",
t.NWK,
"Short address of the device generating the inquiry",
),
t.Param(
"NWKAddrOfInterest",
t.NWK,
"Short address of the device being queried",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# This command will cause the CC2480 device to issue an "End device announce"
# broadcast packet to the network. This is typically used by an end-device to
# announce itself to the network
EndDeviceAnnce = t.CommandDef(
t.CommandType.SREQ,
0x0A,
req_schema=(
t.Param("NWK", t.NWK, "Short address of the device"),
t.Param(
"IEEE",
t.EUI64,
"Extended address of the device generating the request",
),
t.Param("Capabilities", t.uint8_t, "MAC Capabilities"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# write a User Descriptor value to the targeted device
UserDescSet = t.CommandDef(
t.CommandType.SREQ,
0x0B,
req_schema=(
t.Param(
"DstAddr",
t.NWK,
"network address of the device generating the set request",
),
t.Param(
"NWK", t.NWK, "NWK address of the destination device being queried"
),
t.Param("UserDescriptor", t.ShortBytes, "User descriptor array"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# discover the location of a particular system server or servers as indicated by
# the ServerMask parameter. The destination addressing on this request is
# 'broadcast to all RxOnWhenIdle devices'
ServerDiscReq = t.CommandDef(
t.CommandType.SREQ,
0x0C,
req_schema=(
t.Param(
"ServerMask", t.uint16_t, "system server capabilities of the device"
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# request an End Device Bind with the destination device
EndDeviceBindReq = t.CommandDef(
t.CommandType.SREQ,
0x20,
req_schema=(
t.Param(
"DstAddr",
t.NWK,
"Short address of the device generating the request",
),
t.Param(
"LocalCoordinator",
t.NWK,
(
"local coordinator's short address. In the case of source "
"binding, it's the short address of the source address"
),
),
t.Param("IEEE", t.EUI64, "Local coordinator's IEEE address"),
t.Param("Endpoint", t.uint8_t, "device's endpoint"),
t.Param("ProfileId", t.uint16_t, "profile id of the device"),
t.Param("InputClusters", t.ClusterIdList, "Input cluster id list"),
t.Param("OutputClusters", t.ClusterIdList, "Output cluster id list"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# request a Bind
BindReq = t.CommandDef(
t.CommandType.SREQ,
0x21,
req_schema=(
t.Param("Dst", t.NWK, "Short address of the destination device"),
t.Param("Src", t.EUI64, "Binding source IEEE address"),
t.Param("SrcEndpoint", t.uint8_t, "binding source endpoint"),
t.Param("ClusterId", t.ClusterId, "Cluster id to match in messages"),
t.Param(
"Address", zigpy.zdo.types.MultiAddress, "Binding address/endpoint"
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# request a UnBind
UnBindReq = t.CommandDef(
t.CommandType.SREQ,
0x22,
req_schema=(
t.Param("Dst", t.NWK, "Short address of the destination device"),
t.Param("Src", t.EUI64, "Binding source IEEE address"),
t.Param("SrcEndpoint", t.uint8_t, "binding source endpoint"),
t.Param("ClusterId", t.ClusterId, "Cluster id to match in messages"),
t.Param(
"Address", zigpy.zdo.types.MultiAddress, "Unbinding address/endpoint"
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# request the destination device to perform a network discovery
MgmtNwkDiscReq = t.CommandDef(
t.CommandType.SREQ,
0x30,
req_schema=(
t.Param("Dst", t.NWK, "Short address of the destination device"),
t.Param("Channels", t.Channels, "Bitmask of channels to scan"),
t.Param("ScanDuration", t.uint8_t, "Scanning time"),
t.Param(
"StartIndex",
t.uint8_t,
"Specifies where to start in the response array",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# request the destination device to perform a LQI query of other devices
# in the network
MgmtLqiReq = t.CommandDef(
t.CommandType.SREQ,
0x31,
req_schema=(
t.Param("Dst", t.NWK, "Short address of the destination device"),
t.Param(
"StartIndex",
t.uint8_t,
"Specifies where to start in the response array",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# request the Routing Table of the destination device
MgmtRtgReq = t.CommandDef(
t.CommandType.SREQ,
0x32,
req_schema=(
t.Param("Dst", t.NWK, "Short address of the destination device"),
t.Param(
"StartIndex",
t.uint8_t,
"Specifies where to start in the response array",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# request the Binding Table of the destination device
MgmtBindReq = t.CommandDef(
t.CommandType.SREQ,
0x33,
req_schema=(
t.Param("Dst", t.NWK, "Short address of the destination device"),
t.Param(
"StartIndex",
t.uint8_t,
"Specifies where to start in the response array",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# request a Management Leave Request for the target device
MgmtLeaveReq = t.CommandDef(
t.CommandType.SREQ,
0x34,
req_schema=(
t.Param(
"DstAddr",
t.NWK,
"Short address of the device that will process the "
"mgmt leave (remote or self)",
),
t.Param(
"IEEE",
t.EUI64,
(
"The 64-bit IEEE address of the entity to be removed from the "
"network or 0x0000000000000000 if the device removes itself "
"from the network."
),
),
t.Param(
"RemoveChildren_Rejoin",
LeaveOptions,
"Specifies actions to be performed by "
"device when leaving the network.",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# request the Management Direct Join Request of a designated device
MgmtDirectJoinReq = t.CommandDef(
t.CommandType.SREQ,
0x35,
req_schema=(
t.Param("Dst", t.NWK, "Short address of the device to join"),
t.Param("IEEE", t.EUI64, "IEEE address of the device to join"),
t.Param("Capabilities", t.uint8_t, "MAC Capabilities"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# set the Permit Join for the destination device
MgmtPermitJoinReq = t.CommandDef(
t.CommandType.SREQ,
0x36,
req_schema=(
t.Param("AddrMode", t.AddrMode, "Address mode of DST: short or broadcast"),
t.Param("Dst", t.NWK, "Short address of the device to join"),
t.Param("Duration", t.uint8_t, "Specifies the duration to permit joining"),
t.Param(
"TCSignificance",
t.uint8_t,
"Trust Center Significance -- unused in the code!",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# allow updating of network configuration parameters or to request information
# from devices on network conditions in the local operating environment
MgmtNWKUpdateReq = t.CommandDef(
t.CommandType.SREQ,
0x37,
req_schema=(
t.Param("Dst", t.NWK, "Short address of the destination device"),
t.Param("DstAddrMode", t.AddrMode, "Destination Address mode"),
t.Param("Channels", t.Channels, "Bitmask of channels to scan"),
t.Param(
"ScanDuration",
t.uint8_t,
" - 0x00-0x05: Scanning time\n"
" - 0xFE: Command to switch channels\n"
" - 0xFF: Set a new channel mask and NWK manager addr",
),
t.Param(
"ScanCount",
t.uint8_t,
"The number of energy scans to be conducted and reported",
),
t.Param(
"NwkManagerAddr",
t.NWK,
"NWK address for the device with the Network Manager bit set",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# register for a ZDO callback
MsgCallbackRegister = t.CommandDef(
t.CommandType.SREQ,
0x3E,
req_schema=(
t.Param(
"ClusterId",
t.ClusterId,
"Cluster id for which to receive ZDO callback",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# de-register for a ZDO callback
MsgCallbackRemove = t.CommandDef(
t.CommandType.SREQ,
0x3F,
req_schema=(
t.Param(
"ClusterId",
t.ClusterId,
"Cluster id for which to receive ZDO callback",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# starts the device in the network
# XXX: In Z-Stack 3, this actually just calls `bdb_StartCommissioning()` and returns
# ZSuccess. It just happens that ZSuccess == StartupState.RestoredNetworkState == 0
StartupFromApp = t.CommandDef(
t.CommandType.SREQ,
0x40,
req_schema=(t.Param("StartDelay", t.uint16_t, "Startup delay"),),
rsp_schema=(t.Param("State", StartupState, "State after startup"),),
)
# Extended version of ZDO to indicate to router devices to create
# a distributed network
StartupFromAppExt = t.CommandDef(
t.CommandType.SREQ,
0x54,
req_schema=(
t.Param("StartDelay", t.uint16_t, "Startup delay"),
t.Param(
"Mode", t.Bool, "True -- ZR devices to create a distributed network"
),
),
rsp_schema=(t.Param("State", StartupState, "State after startup"),),
)
# set the application link key for a given device
SetLinkKey = t.CommandDef(
t.CommandType.SREQ,
0x23,
req_schema=(
t.Param("NWK", t.NWK, "Short address of the device"),
t.Param("IEEE", t.EUI64, "Extended address of the device"),
t.Param("LinkKeyData", t.KeyData, "128bit link key"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# remove the application link key for a given device
RemoveLinkKey = t.CommandDef(
t.CommandType.SREQ,
0x24,
req_schema=(t.Param("IEEE", t.EUI64, "Extended address of the device"),),
rsp_schema=t.STATUS_SCHEMA,
)
# get the application link key for a given device
GetLinkKey = t.CommandDef(
t.CommandType.SREQ,
0x25,
req_schema=(t.Param("IEEE", t.EUI64, "Extended address of the device"),),
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param("IEEE", t.EUI64, "Extended address of the device"),
t.Param("LinkKeyData", t.KeyData, "128bit link key"),
),
)
# initiate a network discovery (active scan)
NetworkDiscoveryReq = t.CommandDef(
t.CommandType.SREQ,
0x26,
req_schema=(
t.Param("Channels", t.Channels, "Bitmask of channels to scan"),
t.Param("ScanDuration", t.uint8_t, "Scanning time"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# request the device to join itself to a parent device on a network
JoinReq = t.CommandDef(
t.CommandType.SREQ,
0x27,
req_schema=(
t.Param("LogicalChannel", t.uint8_t, "Channel where the PAN is located"),
t.Param("PanId", t.PanId, "The PAN Id to join."),
t.Param("ExtendedPanId", t.ExtendedPanId, "64-bit extended PAN ID"),
t.Param(
"ChosenParent",
t.NWK,
"Short address of the parent device chosen to join",
),
t.Param("Depth", t.uint8_t, "Depth of the parent"),
t.Param("StackProfile", t.uint8_t, "Stack profile of the network to use"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# XXX: Undocumented
SendData = t.CommandDef(
t.CommandType.SREQ,
0x28,
req_schema=(
t.Param("Dst", t.NWK, "Short address of the destination"),
t.Param("TSN", t.uint8_t, "Transaction sequence number"),
t.Param("CommandId", t.uint16_t, "ZDO Command ID"),
t.Param(
"Data",
t.Bytes,
"Data to send",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# handles the ZDO security add link key extension message
SecAddLinkKey = t.CommandDef(
t.CommandType.SREQ,
0x42,
req_schema=(
t.Param("NWK", t.NWK, "Short address of the device"),
t.Param("IEEE", t.EUI64, "Extended address of the device"),
t.Param("LinkKeyData", t.KeyData, "128bit link key"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# handle the ZDO security entry lookup extended extension message
SecEntryLookupExt = t.CommandDef(
t.CommandType.SREQ,
0x43,
req_schema=(
t.Param("IEEE", t.EUI64, "Extended address of the device"),
t.Param("Entry", SecurityEntry, "Valid entry"),
),
rsp_schema=(
t.Param("AMI", t.uint16_t, "Address manager index"),
t.Param("KeyNVID", t.uint16_t, "Index to link key table in NV"),
t.Param("Option", t.uint8_t, "Authentication option for device"),
),
)
# handle the ZDO security remove device extended extension message
SecDeviceRemove = t.CommandDef(
t.CommandType.SREQ,
0x44,
req_schema=(t.Param("IEEE", t.EUI64, "Extended address of the device"),),
rsp_schema=t.STATUS_SCHEMA,
)
# handle the ZDO route discovery extension message
ExtRouteDisc = t.CommandDef(
t.CommandType.SREQ,
0x45,
req_schema=(
t.Param("Dst", t.NWK, "Short address of the destination"),
t.Param("Options", RouteDiscoveryOptions, "Route options"),
t.Param("Radius", t.uint8_t, "Broadcast radius"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# handle the ZDO route check extension messages
ExtRouteChk = t.CommandDef(
t.CommandType.SREQ,
0x46,
req_schema=(
t.Param("Dst", t.NWK, "Short address of the destination"),
t.Param("RtStatus", RouteStatus, "Status value for routing entries"),
t.Param("Options", RouteOptions, "Route options"),
),
rsp_schema=(t.Param("Status", RoutingStatus, "Route status"),),
)
# handle the ZDO extended remove group extension message
ExtRemoveGroup = t.CommandDef(
t.CommandType.SREQ,
0x47,
req_schema=(
t.Param("Endpoint", t.uint8_t, "Endpoint to look for"),
t.Param("GroupId", t.GroupId, "ID to look for group"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# handle the ZDO extended remove all group extension message
ExtRemoveAllGroups = t.CommandDef(
t.CommandType.SREQ,
0x48,
req_schema=(t.Param("Endpoint", t.uint8_t, "Endpoint to look for"),),
rsp_schema=t.STATUS_SCHEMA,
)
# handle the ZDO extension find all groups for endpoint message
ExtFindAllGroupsEndpoint = t.CommandDef(
t.CommandType.SREQ,
0x49,
req_schema=(
t.Param("Endpoint", t.uint8_t, "Endpoint to look for"),
# this parameter does not make sense
t.Param("Groups", t.uint16_t, "List to hold group IDs"),
),
rsp_schema=(t.Param("Groups", GroupIdList, "List of Group IDs"),),
)
# handle the ZDO extension find group message
ExtFindGroup = t.CommandDef(
t.CommandType.SREQ,
0x4A,
req_schema=(
t.Param("Endpoint", t.uint8_t, "Endpoint to look for"),
t.Param("GroupId", t.GroupId, "ID to look for group"),
),
rsp_schema=(t.Param("Group", t.Bytes, "Group information"),),
)
# handle the ZDO extension add group message
ExtAddGroup = t.CommandDef(
t.CommandType.SREQ,
0x4B,
req_schema=(
t.Param("Endpoint", t.uint8_t, "Endpoint to look for"),
t.Param("GroupId", t.GroupId, "ID to look for group"),
t.Param("GroupName", t.CharacterString, "Group name"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# handle the ZDO extension count all groups message
ExtCountAllGroups = t.CommandDef(
t.CommandType.SREQ,
0x4C,
req_schema=(),
rsp_schema=(t.Param("GroupCount", t.uint8_t, "Total number of groups"),),
)
# handle the ZDO extension Get/Set RxOnIdle to ZMac message
ExtRxIdle = t.CommandDef(
t.CommandType.SREQ,
0x4D,
req_schema=(
t.Param("SetFlag", t.uint8_t, "Set or get value"),
t.Param("SetValue", t.uint8_t, "Value to be set to ZMac message"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# handle the ZDO security update network key extension message
ExtUpdateNwkKey = t.CommandDef(
t.CommandType.SREQ,
0x4E,
req_schema=(
t.Param("Dst", t.NWK, "Short address of the destination"),
t.Param("KeySeqNum", t.uint8_t, "Key sequence number"),
t.Param("Key", t.KeyData, "Network key"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# handle the ZDO security switch network key extension message
ExtSwitchNwkKey = t.CommandDef(
t.CommandType.SREQ,
0x4F,
req_schema=(
t.Param("Dst", t.NWK, "Short address of the destination"),
t.Param("KeySeqNum", t.uint8_t, "Key sequence number"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# handle the ZDO extension network message
ExtNwkInfo = t.CommandDef(
t.CommandType.SREQ,
0x50,
req_schema=(),
rsp_schema=(
t.Param("Dst", t.NWK, "Short address of the destination"),
t.Param("PanId", t.PanId, "The PAN Id to join."),
t.Param("ParentNWK", t.NWK, "Short address of the parent"),
t.Param("ExtendedPanId", t.ExtendedPanId, "64-bit extended PAN ID"),
t.Param("ParentIEEE", t.EUI64, "IEEE address of the parent"),
t.Param("Channel", t.Channels, "Current Channel"),
),
)
# handle the ZDO extension Security Manager APS Remove Request message
ExtSecApsRemoveReq = t.CommandDef(
t.CommandType.SREQ,
0x51,
req_schema=(
t.Param("NWK", t.NWK, "Short address of the device"),
t.Param("IEEE", t.EUI64, "IEEE address of the device"),
t.Param("ParentNWK", t.NWK, "Short address of the parent"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# forces a network concentrator change by resetting zgConcentratorEnable and
# zgConcentratorDiscoveryTime from NV and set nwk event
ForceConcentratorChange = t.CommandDef(
t.CommandType.SREQ, 0x52, req_schema=(), rsp_schema=()
)
# set parameters not settable through NV
ExtSetParams = t.CommandDef(
t.CommandType.SREQ,
0x53,
req_schema=(t.Param("UseMulticast", t.Bool, "Set or reset of multicast"),),
rsp_schema=t.STATUS_SCHEMA,
)
# handle ZDO network address of interest request
NwkAddrOfInterestReq = t.CommandDef(
t.CommandType.SREQ,
0x29,
req_schema=(
t.Param("NWK", t.NWK, "Short address of the destination"),
t.Param(
"NWKAddrOfInterest",
t.NWK,
"Short address of the device being queried",
),
t.Param(
"Cmd",
t.uint8_t,
"A valid Cluster ID command as specified by profile",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# ZDO Callbacks
# return the results to the NwkAddrReq
NwkAddrRsp = t.CommandDef(
t.CommandType.AREQ,
0x80,
rsp_schema=(
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
t.Param("IEEE", t.EUI64, "Extended address of the source device"),
t.Param("NWK", t.NWK, "Short address of the source device"),
t.Param("NumAssoc", t.uint8_t, "Number of associated devices"),
t.Param(
"Index",
t.uint8_t,
"Starting index into the list of associated devices",
),
t.Param("Devices", NWKArray, "List of the associated devices"),
),
)
# return the results to the IEEEAddrReq
IEEEAddrRsp = t.CommandDef(
t.CommandType.AREQ,
0x81,
rsp_schema=(
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
t.Param("IEEE", t.EUI64, "Extended address of the source device"),
t.Param("NWK", t.NWK, "Short address of the source device"),
t.Param("NumAssoc", t.uint8_t, "Number of associated devices"),
t.Param(
"Index",
t.uint8_t,
"Starting index into the list of associated devices",
),
t.Param("Devices", NWKArray, "List of the associated devices"),
),
)
# return the results to the NodeDescReq
NodeDescRsp = t.CommandDef(
t.CommandType.AREQ,
0x82,
rsp_schema=(
t.Param("Src", t.NWK, "The message's source network address."),
t.Param(
"Status",
t.ZDOStatus,
"This field indicates either SUCCESS or FAILURE.",
),
t.Param("NWK", t.NWK, "Device's short address of this Node descriptor"),
t.Param(
"NodeDescriptor",
NullableNodeDescriptor,
"Node descriptor",
optional=True,
),
),
)
# return the results to the PowerDescReq
PowerDescRsp = t.CommandDef(
t.CommandType.AREQ,
0x83,
rsp_schema=(
t.Param("Src", t.NWK, "message's source network address"),
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
t.Param("NWK", t.NWK, "Short address of the device response describes"),
t.Param(
"PowerDescriptor",
zigpy.zdo.types.PowerDescriptor,
"Power descriptor response",
),
),
)
# return the results to the SimpleDescReq
SimpleDescRsp = t.CommandDef(
t.CommandType.AREQ,
0x84,
rsp_schema=(
t.Param("Src", t.NWK, "message's source network address"),
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
t.Param("NWK", t.NWK, "Short address of the device response describes"),
t.Param(
"SimpleDescriptor",
zigpy.zdo.types.SizePrefixedSimpleDescriptor,
"Simple descriptor",
),
),
)
# return the results to the ActiveEpReq
ActiveEpRsp = t.CommandDef(
t.CommandType.AREQ,
0x85,
rsp_schema=(
t.Param("Src", t.NWK, "message's source network address"),
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
t.Param("NWK", t.NWK, "Short address of the device response describes"),
t.Param("ActiveEndpoints", EndpointList, "Active endpoints list"),
),
)
# return the results to the MatchDescReq
MatchDescRsp = t.CommandDef(
t.CommandType.AREQ,
0x86,
rsp_schema=(
t.Param("Src", t.NWK, "message's source network address"),
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
t.Param("NWK", t.NWK, "Short address of the device response describes"),
t.Param("MatchList", EndpointList, "Endpoints list"),
),
)
# return the results to the ComplexDescReq
ComplexDescRsp = t.CommandDef(
t.CommandType.AREQ,
0x87,
rsp_schema=(
t.Param("Src", t.NWK, "message's source network address"),
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
t.Param("NWK", t.NWK, "Short address of the device response describes"),
t.Param("ComplexDesc", t.ShortBytes, "Complex descriptor"),
),
)
# return the results to the UserDescReq
UserDescRsp = t.CommandDef(
t.CommandType.AREQ,
0x88,
rsp_schema=(
t.Param("Src", t.NWK, "message's source network address"),
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
t.Param("NWK", t.NWK, "Short address of the device response describes"),
t.Param("UserDesc", t.ShortBytes, "User descriptor"),
),
)
# notify the user when the device receives a user descriptor
UserDescCnf = t.CommandDef(
t.CommandType.AREQ,
0x89,
rsp_schema=(
t.Param("Src", t.NWK, "message's source network address"),
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
t.Param("NWK", t.NWK, "Short address of the device response describes"),
),
)
# return the results to the ServerDiscReq
ServerDiscRsp = t.CommandDef(
t.CommandType.AREQ,
0x8A,
rsp_schema=(
t.Param("Src", t.NWK, "message's source network address"),
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
t.Param("ServerMask", t.ZDOStatus, "Server mask response"),
),
)
ParentAnnceRsp = t.CommandDef(
t.CommandType.AREQ,
0x9F,
rsp_schema=(
t.Param("Src", t.NWK, "message's source network address"),
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
t.Param("ChildInfo", ChildInfoList),
),
)
# return the results to the EndDeviceBindReq
EndDeviceBindRsp = t.CommandDef(
t.CommandType.AREQ,
0xA0,
rsp_schema=(
t.Param("Src", t.NWK, "message's source network address"),
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
),
)
# return the results to the BindReq
BindRsp = t.CommandDef(
t.CommandType.AREQ,
0xA1,
rsp_schema=(
t.Param("Src", t.NWK, "message's source network address"),
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
),
)
# return the results to the UnBindReq
UnBindRsp = t.CommandDef(
t.CommandType.AREQ,
0xA2,
rsp_schema=(
t.Param("Src", t.NWK, "message's source network address"),
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
),
)
# return the results to the MgmtNwkDiscReq
MgmtNwkDiscRsp = t.CommandDef(
t.CommandType.AREQ,
0xB0,
rsp_schema=(
t.Param("Src", t.NWK, "message's source network address"),
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
t.Param("NetworkCount", t.uint8_t, "Total number of entries available"),
t.Param("Index", t.uint8_t, "Where the response starts"),
t.Param("Networks", NetworkList, "Discovered networks list"),
),
)
# return the results to the MgmtLqiReq
MgmtLqiRsp = t.CommandDef(
t.CommandType.AREQ,
0xB1,
rsp_schema=(
t.Param("Src", t.NWK, "message's source network address"),
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
t.Param("Neighbors", zigpy.zdo.types.Neighbors, "Neighbors"),
),
)
# return the results to the MgmtRtgReq
MgmtRtgRsp = t.CommandDef(
t.CommandType.AREQ,
0xB2,
rsp_schema=(
t.Param("Src", t.NWK, "message's source network address"),
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
t.Param("Routes", zigpy.zdo.types.Routes, "Routes"),
),
)
# return the results to the MgmtBingReq
MgmtBindRsp = t.CommandDef(
t.CommandType.AREQ,
0xB3,
rsp_schema=(
t.Param("Src", t.NWK, "message's source network address"),
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
t.Param(
"BindTableEntries",
t.uint8_t,
"Total number of entries available on the device",
),
t.Param("StartIndex", t.uint8_t, "Index where the response starts"),
t.Param("BindTableList", BindEntryList, "list of BindEntries"),
),
)
# return the results to the MgmtLeaveReq
MgmtLeaveRsp = t.CommandDef(
t.CommandType.AREQ,
0xB4,
rsp_schema=(
t.Param("Src", t.NWK, "message's source network address"),
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
),
)
# return the results to the MgmtDirectJoinReq
MgmtDirectJoinRsp = t.CommandDef(
t.CommandType.AREQ,
0xB5,
rsp_schema=(
t.Param("Src", t.NWK, "message's source network address"),
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
),
)
# return the results to the MgmtPermitJoinReq
MgmtPermitJoinRsp = t.CommandDef(
t.CommandType.AREQ,
0xB6,
rsp_schema=(
t.Param("Src", t.NWK, "message's source network address"),
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
),
)
# return the results to the MgmtPermitJoinReq
MgmtNWKUpdateNotify = t.CommandDef(
t.CommandType.AREQ,
0xB8,
rsp_schema=(
t.Param("Src", t.NWK, "message's source network address"),
t.Param("Status", t.ZDOStatus, "Status"),
t.Param("ScannedChannels", t.Channels, "Scanned channels"),
t.Param("TotalTransmissions", t.uint16_t, "Total transmissions"),
t.Param("TransmissionFailures", t.uint16_t, "Transmission failures"),
t.Param(
"EnergyValues",
EnergyValues,
"The result of an energy measurement made on this channel",
),
),
)
# indicates ZDO state change
StateChangeInd = t.CommandDef(
t.CommandType.AREQ,
0xC0,
rsp_schema=(t.Param("State", t.DeviceState, "New ZDO state"),),
)
# indicates the ZDO End Device Announce
EndDeviceAnnceInd = t.CommandDef(
t.CommandType.AREQ,
0xC1,
rsp_schema=(
t.Param("Src", t.NWK, "Source address of the message."),
t.Param("NWK", t.NWK, "Specifies the device's short address"),
t.Param(
"IEEE",
t.EUI64,
"Extended address of the device generating the request",
),
t.Param("Capabilities", MACCapabilities, "MAC Capabilities"),
),
)
# indicates that Match Descriptor Response has been sent
MatchDescRspSent = t.CommandDef(
t.CommandType.AREQ,
0xC2,
rsp_schema=(
t.Param("NWK", t.NWK, "Device's network address"),
t.Param("InputClusters", t.ClusterIdList, "Input cluster id list"),
t.Param("OutputClusters", t.ClusterIdList, "Output cluster id list"),
),
)
# default message for error status
StatusErrorRsp = t.CommandDef(
t.CommandType.AREQ,
0xC3,
rsp_schema=(
t.Param("Src", t.NWK, "message's source network address"),
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
),
)
# indication to inform host device the receipt of a source route to a given device
SrcRtgInd = t.CommandDef(
t.CommandType.AREQ,
0xC4,
rsp_schema=(
t.Param(
"DstAddr",
t.NWK,
"Network address of the destination of the source route",
),
t.Param("Relays", t.NWKList, "List of relay devices"),
),
)
# indication to inform host device the receipt of a beacon notification
BeaconNotifyInd = t.CommandDef(
t.CommandType.AREQ,
0xC5,
rsp_schema=(t.Param("Beacons", BeaconList, "Beacons list"),),
)
# inform the host device of a ZDO join request result
JoinCnf = t.CommandDef(
t.CommandType.AREQ,
0xC6,
rsp_schema=(
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
t.Param("Nwk", t.NWK, "device's network address"),
t.Param("ParentNwk", t.NWK, "Parent's network address"),
),
)
# indication to inform host device the completion of network discovery scan
NwkDiscoveryCnf = t.CommandDef(
t.CommandType.AREQ,
0xC7,
rsp_schema=(
t.Param(
"Status", t.ZDOStatus, "Status is either Success (0) or Failure (1)"
),
),
)
# ???
ConcentratorInd = t.CommandDef(
t.CommandType.AREQ,
0xC8,
rsp_schema=(
t.Param("NWK", t.NWK, "Short address"),
t.Param("IEEE", t.EUI64, "IEEE address"),
t.Param("PktCost", t.uint8_t, "Packet cost"),
),
)
# an indication to inform the host of a device leaving the network
LeaveInd = t.CommandDef(
t.CommandType.AREQ,
0xC9,
rsp_schema=(
t.Param(
"NWK", t.NWK, "Short address of the source of the leave indication"
),
t.Param(
"IEEE",
t.EUI64,
"IEEE address of the source of the leave indication",
),
t.Param("Request", t.Bool, "True -- request, False -- indication"),
t.Param("Remove", t.Bool, "True -- Remove children"),
t.Param("Rejoin", t.Bool, "True -- Rejoin"),
),
)
# ZDO callback for a Cluster Id that the host requested to receive
# with a MsgCallbackRegister request
MsgCbIncoming = t.CommandDef(
t.CommandType.AREQ,
0xFF,
rsp_schema=(
t.Param("Src", t.NWK, "Source address of the ZDO message"),
t.Param(
"IsBroadcast",
t.Bool,
"Indicates whether the message was a broadcast",
),
t.Param("ClusterId", t.ClusterId, "Cluster Id of this ZDO message"),
t.Param("SecurityUse", t.uint8_t, "Not used"),
t.Param("TSN", t.uint8_t, "Transaction sequence number"),
t.Param(
"MacDst", t.NWK, "Mac destination short address of the ZDO message"
),
t.Param(
"Data",
t.Bytes,
"Data that corresponds to the cluster ID of the message",
),
),
)
# a ZDO callback for TC Device Indication
TCDevInd = t.CommandDef(
t.CommandType.AREQ,
0xCA,
rsp_schema=(
t.Param("SrcNwk", t.NWK, "device's network address"),
t.Param("SrcIEEE", t.EUI64, "IEEE address of the source"),
t.Param("ParentNwk", t.NWK, "Parent's network address"),
),
)
# a ZDO callback for Permit Join Indication
PermitJoinInd = t.CommandDef(
t.CommandType.AREQ,
0xCB,
rsp_schema=(t.Param("Duration", t.uint8_t, "Permit join duration"),),
)
# set rejoin backoff duration and rejoin scan duration for an end device
SetRejoinParams = t.CommandDef(
t.CommandType.SREQ,
# in documentation CmdId=0x26 which conflict with discover req
0xCC,
req_schema=(
t.Param(
"BackoffDuraation",
t.uint32_t,
"Rejoin backoff duration for end device",
),
t.Param("ScanDuration", t.uint32_t, "Rejoin scan duration for end device"),
),
rsp_schema=t.STATUS_SCHEMA,
) | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/commands/zdo.py | zdo.py |
import zigpy_znp.types as t
class AttributeValue(t.FixedList, item_type=t.uint8_t, length=16):
pass
class MAC(t.CommandsBase, subsystem=t.Subsystem.MAC):
# MAC Reset command to reset MAC state machine
ResetReq = t.CommandDef(
t.CommandType.SREQ,
0x01,
req_schema=(
t.Param(
"SetDefault",
t.Bool,
"TRUE - Set the MAC pib values to default values",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# initialize the MAC
Init = t.CommandDef(
t.CommandType.SREQ, 0x02, req_schema=(), rsp_schema=t.STATUS_SCHEMA
)
# start the MAC as a coordinator or end device
StartReq = t.CommandDef(
t.CommandType.SREQ,
0x03,
req_schema=(
t.Param(
"StartTime",
t.uint32_t,
(
"The time to begin transmitting beacons relative to "
"the received beacon"
),
),
t.Param(
"PanId",
t.PanId,
(
"The PAN Id to use. This parameter is ignored if Pan "
"Coordinator is FALSE"
),
),
t.Param("LogicalChannel", t.uint8_t, "The logical channel to use"),
t.Param("ChannelPage", t.uint8_t, "The channel page to use"),
t.Param(
"BeaconOrder",
t.uint8_t,
"The exponent used to calculate the beacon interval",
),
t.Param(
"SuperFrameOrder",
t.uint8_t,
"The exponent used to calculate the superframe duration",
),
t.Param(
"PanCoordinator",
t.Bool,
"Set to TRUE to start a network as PAN coordinator",
),
t.Param(
"BatteryLifeExt",
t.uint8_t,
"full backoff periods following the interframe spacing",
),
t.Param("CoordRealignment", t.uint8_t, "Coordinator realignment"),
t.Param("RealignKeySource", t.KeySource, "Key Source of this data frame"),
# ToDo: Enum for for RealignSecurityLevel
t.Param(
"RealignSecurityLevel",
t.uint8_t,
"Security level of this data frame",
),
# ToDo: Make this an enum
t.Param("RealignKeyIdMode", t.uint8_t, "Key Id Mode of this frame"),
t.Param("RealignKeyIndex", t.uint8_t, "Key index of this frame"),
t.Param("BeaconKeySource", t.KeySource, "Key source of this data frame"),
# ToDo: Make this an enum
t.Param(
"BeaconSecurityLevel",
t.uint8_t,
"Security Level of this data frame",
),
t.Param("BeaconKeyIdMode", t.uint8_t, "Key Id Mode of this data frame"),
t.Param("BeaconKeyIndex", t.uint8_t, "Key index of this data frame"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# request synchronization to the current network beacon
SyncReq = t.CommandDef(
t.CommandType.SREQ,
0x04,
req_schema=(
t.Param("LogicalChannel", t.uint8_t, "The logical channel to use"),
t.Param("ChannelPage", t.uint8_t, "The channel page to use"),
t.Param(
"TrackBeacon",
t.Bool,
(
"Set to TRUE to continue tracking beacons after synchronizing "
"with the first beacon. Set to FALSE to only synchronize with "
"the first beacon"
),
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# send (on behalf of the next higher layer) MAC Data Frame packet
DataReq = t.CommandDef(
t.CommandType.SREQ,
0x05,
req_schema=(
t.Param(
"DstAddrModeAddress",
t.AddrModeAddress,
"Destination address mode and address",
),
t.Param("DstPanId", t.PanId, "The PAN Id of destination"),
t.Param("SrcAddrMode", t.AddrMode, "Format of the source address"),
t.Param("Handle", t.uint8_t, "Handle of the packet"),
# ToDo: Make this a proper Flags Enum
t.Param("TxOption", t.uint8_t, "Transmitting options"),
t.Param(
"LogicalChannel",
t.uint8_t,
"Channel that data frame will be transmitted",
),
t.Param("Power", t.uint8_t, "Power level to use for transmission"),
t.Param("KeySource", t.KeySource, "Key Source of this data frame"),
# ToDo: Make this an enum
t.Param("SecurityLevel", t.uint8_t, "Security level of this data frame"),
# ToDo: Make this an enum
t.Param("KeyIdMode", t.uint8_t, "Key Id Mode of this frame"),
t.Param("KeyIndex", t.uint8_t, "Key index of this frame"),
t.Param("MSDU", t.ShortBytes, "Actual data that will be sent"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# request (on behalf of the next higher layer) an association with a coordinator
AssociateReq = t.CommandDef(
t.CommandType.SREQ,
0x06,
req_schema=(
t.Param("LogicalChannel", t.uint8_t, "The logical channel to use"),
t.Param("ChannelPage", t.uint8_t, "The channel page to use"),
t.Param(
"CoordAddrModeAddress",
t.AddrModeAddress,
"Coordinator address mode and address",
),
t.Param("CoordPanId", t.PanId, "The PAN Id of the coordinator"),
# ToDo: make this a bitflag enum
t.Param("CapabilityInformation", t.uint8_t, "BitFlag Coordinator"),
t.Param("KeySource", t.KeySource, "Key Source of this data frame"),
# ToDo: Make this an enum
t.Param("SecurityLevel", t.uint8_t, "Security level of this data frame"),
# ToDo: Make this an enum
t.Param("KeyIdMode", t.uint8_t, "Key Id Mode of this frame"),
t.Param("KeyIndex", t.uint8_t, "Key index of this frame"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# This command is sent by the host to response to the MAC_ASSOCIATE_IND
AssociateRsp = t.CommandDef(
t.CommandType.SREQ,
0x50,
req_schema=(
t.Param(
"IEEE",
t.EUI64,
"Extended address of the device requesting association",
),
t.Param("NWK", t.NWK, "Short address of the associated device"),
# ToDo: make this an enum
t.Param("AssocStatus", t.uint8_t, "Status of the associaiton"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# request (on behalf of the next higher layer) a disassociation of the device
# from the coordinator
DisAssociateReq = t.CommandDef(
t.CommandType.SREQ,
0x07,
req_schema=(
t.Param(
"DeviceAddrModeAddress",
t.AddrModeAddress,
"Device address mode and address",
),
t.Param("DevicePanId", t.PanId, "Device's PAN Id"),
# ToDo: Make this an enum
t.Param("DisassociateReason", t.uint8_t, "Reason for disassociation"),
t.Param("TxIndirect", t.Bool, "Indirect Transmission"),
t.Param("KeySource", t.KeySource, "Key Source of this data frame"),
# ToDo: Make this an enum
t.Param("SecurityLevel", t.uint8_t, "Security level of this data frame"),
# ToDo: Make this an enum
t.Param("KeyIdMode", t.uint8_t, "Key Id Mode of this frame"),
t.Param("KeyIndex", t.uint8_t, "Key index of this frame"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# read (on behalf of the next higher layer) a MAC PIB attribute
GetReq = t.CommandDef(
t.CommandType.SREQ,
0x08,
req_schema=(
# ToDo: Make this an enum
t.Param("Attribute", t.uint8_t, "MAC PIB Attribute to get"),
),
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param("Value", AttributeValue, "Value of the attribute"),
),
)
# request the device to write a MAC PIB value
SetReq = t.CommandDef(
t.CommandType.SREQ,
0x09,
req_schema=(
# ToDo: Make this an enum
t.Param("Attribute", t.uint8_t, "MAC PIB Attribute to set"),
t.Param("Value", AttributeValue, "Value of the attribute"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# send a request to the device to perform a network scan
ScanReq = t.CommandDef(
t.CommandType.SREQ,
0x0C,
req_schema=(
t.Param(
"ScanChannels",
t.Channels,
"Bitmask of channels to scan when starting the device",
),
# ToDo: Make this an enum
t.Param("ScanType", t.uint8_t, "Specifies the scan type"),
t.Param(
"ScanDuration", t.uint8_t, "The exponent used in the scan duration"
),
t.Param("ChannelPage", t.uint8_t, "The channel page to use"),
t.Param("MaxResults", t.uint8_t, "Max results (UNDOCUMENTED)"),
t.Param("KeySource", t.KeySource, "Key Source of this data frame"),
# ToDo: Make this an enum
t.Param("SecurityLevel", t.uint8_t, "Security level of this data frame"),
# ToDo: Make this an enum
t.Param("KeyIdMode", t.uint8_t, "Key Id Mode of this frame"),
t.Param("KeyIndex", t.uint8_t, "Key index of this frame"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# This command is sent by the host to response to the ORPHAN_IND
OrphanRsp = t.CommandDef(
t.CommandType.SREQ,
0x51,
req_schema=(
t.Param(
"IEEE",
t.EUI64,
"Extended address of the device requesting association",
),
t.Param("NWK", t.NWK, "Short address of the associated device"),
t.Param(
"AssociatedMember",
t.Bool,
"True is the orphan is a associated member",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# send a MAC data request poll
PollReq = t.CommandDef(
t.CommandType.SREQ,
0x0D,
req_schema=(
t.Param(
"CoordAddrModeAddress",
t.AddrModeAddress,
"Coordinator address mode and address",
),
t.Param("CoordPanId", t.PanId, "The PAN Id of the coordinator"),
t.Param("KeySource", t.KeySource, "Key Source of this data frame"),
# ToDo: Make this an enum
t.Param("SecurityLevel", t.uint8_t, "Security level of this data frame"),
# ToDo: Make this an enum
t.Param("KeyIdMode", t.uint8_t, "Key Id Mode of this frame"),
t.Param("KeyIndex", t.uint8_t, "Key index of this frame"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# send a request to the device to purge a data frame
PurgeReq = t.CommandDef(
t.CommandType.SREQ,
0x0E,
req_schema=(t.Param("MsduHandle", t.uint8_t, "MSDU handle"),),
rsp_schema=t.STATUS_SCHEMA,
)
# send a request to the device to set Rx gain
SetRxGainReq = t.CommandDef(
t.CommandType.SREQ,
0x0F,
req_schema=(t.Param("Mode", t.Bool, "PA/PNA mode - True/False"),),
rsp_schema=t.STATUS_SCHEMA,
)
# MAC Callbacks
# send (on behalf of the next higher layer) an indication of the synchronization
# loss
SyncLossInd = t.CommandDef(
t.CommandType.AREQ,
0x80,
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param(
"PanId",
t.PanId,
"The PAN Id to use. This parameter is ignored if Pan",
),
t.Param("LogicalChannel", t.uint8_t, "The logical channel to use"),
t.Param("ChannelPage", t.uint8_t, "The channel page to use"),
t.Param("KeySource", t.KeySource, "Key Source of this data frame"),
# ToDo: Make this an enum
t.Param("SecurityLevel", t.uint8_t, "Security level of this data frame"),
# ToDo: Make this an enum
t.Param("KeyIdMode", t.uint8_t, "Key Id Mode of this frame"),
t.Param("KeyIndex", t.uint8_t, "Key index of this frame"),
),
)
# send (on behalf of the next higher layer) an association indication message
AssociateInd = t.CommandDef(
t.CommandType.AREQ,
0x81,
rsp_schema=(
t.Param("IEEE", t.EUI64, "Extended address of the device"),
t.Param("Capabilities", t.uint8_t, "Operating capabilities of the device"),
t.Param("KeySource", t.KeySource, "Key Source of this data frame"),
# ToDo: Make this an enum
t.Param("SecurityLevel", t.uint8_t, "Security level of this data frame"),
# ToDo: Make this an enum
t.Param("KeyIdMode", t.uint8_t, "Key Id Mode of this frame"),
t.Param("KeyIndex", t.uint8_t, "Key index of this frame"),
),
)
# send (on behalf of the next higher layer) an association confirmation message
AssociateCnf = t.CommandDef(
t.CommandType.AREQ,
0x82,
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param("NWK", t.NWK, "Short address of the device"),
t.Param("KeySource", t.KeySource, "Key Source of this data frame"),
# ToDo: Make this an enum
t.Param("SecurityLevel", t.uint8_t, "Security level of this data frame"),
# ToDo: Make this an enum
t.Param("KeyIdMode", t.uint8_t, "Key Id Mode of this frame"),
t.Param("KeyIndex", t.uint8_t, "Key index of this frame"),
),
)
# send (on behalf of the next higher layer) a MAC beacon notify indication
BeaconNotifyInd = t.CommandDef(
t.CommandType.AREQ,
0x83,
rsp_schema=(
t.Param("BSN", t.uint8_t, "BSN"),
t.Param("TimeStamp", t.uint32_t, "The timestamp of the message"),
t.Param(
"CoordinatorExtendedAddress",
t.AddrModeAddress,
"Extended address of coordinator",
),
t.Param(
"PanId",
t.PanId,
"The PAN Id to use. This parameter is ignored if Pan",
),
t.Param("Superframe", t.uint16_t, "Superframe specification"),
t.Param("LogicalChannel", t.uint8_t, "The logical channel to use"),
t.Param("GTSPermit", t.Bool, "True/False - Permit/Not permit GTS"),
t.Param("LQI", t.uint8_t, "Link quality of the message"),
t.Param("SecurityFailure", t.uint8_t, "Security failure???"),
t.Param("KeySource", t.KeySource, "Key Source of this data frame"),
# ToDo: Make this an enum
t.Param("SecurityLevel", t.uint8_t, "Security level of this data frame"),
# ToDo: Make this an enum
t.Param("KeyIdMode", t.uint8_t, "Key Id Mode of this frame"),
t.Param("KeyIndex", t.uint8_t, "Key index of this frame"),
t.Param("PendingAddrSpec", t.uint8_t, "Pending address spec"),
t.Param(
"AddressList",
t.uint8_t,
"List of address associate with the device",
),
t.Param("NSDU", t.ShortBytes, "Beacon payload"),
),
)
# send (on behalf of the next higher layer) a MAC data confirmation
DataCnf = t.CommandDef(
t.CommandType.AREQ,
0x84,
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param("Handle", t.uint8_t, "Handle of the message"),
t.Param("TimeStamp", t.uint32_t, "The timestamp of the message"),
t.Param("TimeStamp2", t.uint16_t, "16 bit timestamp of the message"),
),
)
# send (on behalf of the next higher layer) a MAC data indication
DataInd = t.CommandDef(
t.CommandType.AREQ,
0x85,
rsp_schema=(
t.Param("SrcAddrModeAddr", t.AddrModeAddress, "Source address"),
t.Param("DstAddrModeAddr", t.AddrModeAddress, "Destination address"),
t.Param("TimeStamp", t.uint32_t, "The timestamp of the message"),
t.Param("TimeStamp2", t.uint16_t, "16 bit timestamp of the message"),
t.Param("SrcPanId", t.PanId, "PAN Id of the source address"),
t.Param("DstPanId", t.PanId, "PAN Id of the destination address"),
t.Param("LQI", t.uint8_t, "Link quality of the message"),
t.Param("Correlation", t.uint8_t, "Correlation"),
t.Param("RSSI", t.int8s, "RSSI"),
t.Param("DSN", t.uint8_t, "DSN"),
t.Param("KeySource", t.KeySource, "Key Source of this data frame"),
# ToDo: Make this an enum
t.Param("SecurityLevel", t.uint8_t, "Security level of this data frame"),
# ToDo: Make this an enum
t.Param("KeyIdMode", t.uint8_t, "Key Id Mode of this frame"),
t.Param("KeyIndex", t.uint8_t, "Key index of this frame"),
t.Param("Data", t.ShortBytes, "Actual data that will be sent"),
),
)
# send (on behalf of the next higher layer) a MAC disassociation indication
DisassociateReq = t.CommandDef(
t.CommandType.AREQ,
0x86,
rsp_schema=(
t.Param("IEEE", t.EUI64, "EUI64 address of the device leaving the network"),
# ToDo: Make this an enum
t.Param("DisassociateReason", t.uint8_t, "Reason for disassociation"),
t.Param("KeySource", t.KeySource, "Key Source of this data frame"),
# ToDo: Make this an enum
t.Param("SecurityLevel", t.uint8_t, "Security level of this data frame"),
# ToDo: Make this an enum
t.Param("KeyIdMode", t.uint8_t, "Key Id Mode of this frame"),
t.Param("KeyIndex", t.uint8_t, "Key index of this frame"),
),
)
# send (on behalf of the next higher layer) a MAC disassociate confirm
DisassociateCnf = t.CommandDef(
t.CommandType.AREQ,
0x87,
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param(
"DeviceAddrModeAddr",
t.AddrModeAddress,
"Address mode address of the device",
),
t.Param("PanId", t.PanId, "The PAN Id of the device"),
),
)
# send (on behalf of the next higher layer) a MAC orphan indication
OrphanInd = t.CommandDef(
t.CommandType.AREQ,
0x8A,
rsp_schema=(
t.Param("IEEE", t.EUI64, "Extended address of the orphan device"),
t.Param("KeySource", t.KeySource, "Key Source of this data frame"),
# ToDo: Make this an enum
t.Param("SecurityLevel", t.uint8_t, "Security level of this data frame"),
# ToDo: Make this an enum
t.Param("KeyIdMode", t.uint8_t, "Key Id Mode of this frame"),
t.Param("KeyIndex", t.uint8_t, "Key index of this frame"),
),
)
# send (on behalf of the next higher layer) a MAC poll confirmation
PollCnf = t.CommandDef(t.CommandType.AREQ, 0x8B, rsp_schema=t.STATUS_SCHEMA)
# TODO: investigate the actual structure of this command. The source code indicates
# that the response type differs heavily based on the ScanType.
# Also, ResultListMaxLength does not appear to be present.
"""
# send (on behalf of the next higher layer) a MAC scan confirmation
ScanCnf = t.CommandDef(
t.CommandType.AREQ,
0x8C,
rsp_schema=(
t.Param(
"Status", t.Status, "Status is either Success (0) or Failure (1)"
),
t.Param("ED", t.uint8_t, "ED max energy"),
t.Param("ScanType", t.ScanType, "Specifies the scan type"),
t.Param("ChannelPage", t.uint8_t, "The channel page to use"),
t.Param(
"UnScannedChannelList",
t.Channels,
"List of the un-scanned channels",
),
t.Param(
"ResultListCount", t.uint8_t, "Number of items in the result list"
),
t.Param(
"ResultListMaxLength",
t.uint8_t,
"Max length of the result list in bytes"
),
t.Param("ResultList", t.LVList(t.uint8_t), "Result list"),
),
)
"""
# send (on behalf of the next higher layer) a MAC communication indicator
CommStatusInd = t.CommandDef(
t.CommandType.AREQ,
0x8D,
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param("DstAddrMode", t.AddrMode, "Destination address mode"),
t.Param("SrcIEEE", t.EUI64, "Source address"),
t.Param("DstIEEE", t.EUI64, "Destination address"),
t.Param("TimeStamp", t.uint32_t, "The timestamp of the message"),
t.Param("DevicePanId", t.PanId, "PAN Id of the device"),
t.Param("Reason", t.uint8_t, "Reason of communication indication"),
t.Param("KeySource", t.KeySource, "Key Source of this data frame"),
# ToDo: Make this an enum
t.Param("SecurityLevel", t.uint8_t, "Security level of this data frame"),
# ToDo: Make this an enum
t.Param("KeyIdMode", t.uint8_t, "Key Id Mode of this frame"),
t.Param("KeyIndex", t.uint8_t, "Key index of this frame"),
),
)
# send (on behalf of the next higher layer) a MAC start confirmation
StartCnf = t.CommandDef(t.CommandType.AREQ, 0x8E, rsp_schema=t.STATUS_SCHEMA)
# send (on behalf of the next higher layer) a MAC Rx enable confirmation
RxEnableCnf = t.CommandDef(t.CommandType.AREQ, 0x8F, rsp_schema=t.STATUS_SCHEMA)
# send (on behalf of the next higher layer) a MAC purge confirmation
PurgeCnf = t.CommandDef(
t.CommandType.AREQ,
0x9A,
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param("Handle", t.uint8_t, "Handle of the message"),
),
) | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/commands/mac.py | mac.py |
import zigpy_znp.types as t
class SAPI(t.CommandsBase, subsystem=t.Subsystem.SAPI):
# reset the device by using a soft reset (i.e. a jump to the reset vector) vice a
# hardware reset (i.e. watchdog reset.)
ZBSystemReset = t.CommandDef(t.CommandType.AREQ, 0x09, req_schema=())
# start the ZigBee stack
ZBStartReq = t.CommandDef(t.CommandType.SREQ, 0x00, req_schema=(), rsp_schema=())
# control the joining permissions and thus allows or disallows new devices
# from joining the network
ZBPermitJoiningReq = t.CommandDef(
t.CommandType.SREQ,
0x08,
req_schema=(
t.Param(
"NWK",
t.NWK,
(
"Short address of the device for which the joining "
"permissions should be set"
),
),
t.Param(
"duration",
t.uint8_t,
"amount of time in seconds to allow new device to join",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# establishes or removes a 'binding' between two devices. Once bound, an
# application can send messages to a device by referencing the commandId
# for the binding
ZBBindDevice = t.CommandDef(
t.CommandType.SREQ,
0x01,
req_schema=(
t.Param("Create", t.Bool, "True to create binding, False to remove"),
t.Param("CommandId", t.uint16_t, "The identifier of the binding"),
t.Param("IEEE", t.EUI64, "IEEE address of the device to bind to"),
),
rsp_schema=(),
)
# puts the device into the Allow Binding Mode for a given period of time. A peer
# device can establish a binding to a device in the Allow Binding Mode by calling
# zb_BindDevice with a destination address of NULL
ZBAllowBind = t.CommandDef(
t.CommandType.SREQ,
0x02,
req_schema=(
t.Param(
"duration",
t.uint8_t,
"amount of time in seconds to allow new device to join",
),
),
rsp_schema=(),
)
# initiates transmission of data to a peer device
ZBSendDataRequest = t.CommandDef(
t.CommandType.SREQ,
0x03,
req_schema=(
t.Param("Destination", t.NWK, "Short address of the destination"),
t.Param("CommandId", t.uint16_t, "The command id to send with the message"),
t.Param(
"Handle",
t.uint8_t,
"A handle used to Identify the send data request",
),
t.Param("Ack", t.Bool, "True if requesting ACK from the destination"),
t.Param(
"Radius", t.uint8_t, "The max number of hops the packet can travel"
),
t.Param("Data", t.ShortBytes, "Data"),
),
rsp_schema=(),
)
# get a configuration property from nonvolatile memory
ZBReadConfiguration = t.CommandDef(
t.CommandType.SREQ,
0x04,
req_schema=(
t.Param("ConfigId", t.uint8_t, "ID of the configuration property to read"),
),
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param("ConfigId", t.uint8_t, "ID of the configuration property to read"),
t.Param("Value", t.ShortBytes, "Value"),
),
)
# write a configuration property from nonvolatile memory
ZBWriteConfiguration = t.CommandDef(
t.CommandType.SREQ,
0x05,
req_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param("ConfigId", t.uint8_t, "ID of the configuration property to read"),
t.Param("Value", t.ShortBytes, "Value"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# retrieves a Device Information Property
ZBGetDeviceInfo = t.CommandDef(
t.CommandType.SREQ,
0x06,
req_schema=(
t.Param("Param", t.uint8_t, "The identifier for deice information"),
),
rsp_schema=(
t.Param("Param", t.uint8_t, "The identifier for deice information"),
t.Param("Value", t.uint16_t, "Value"),
),
)
# determine the short address for a device in the network. The device
# initiating a call to zb_FindDeviceRequest and the device being discovered must
# both be a member of the same network. When the search is complete,
# the zv_FindDeviceConfirm callback function is called
ZBFindDeviceReq = t.CommandDef(
t.CommandType.SREQ,
0x07,
req_schema=(t.Param("SearchKey", t.EUI64, "IEEE address of the device"),),
rsp_schema=(),
)
# SAPI CallBacks
# this callback is called by the ZigBee stack after a start request
# operation completes
ZBStartConfirm = t.CommandDef(t.CommandType.AREQ, 0x80, req_schema=t.STATUS_SCHEMA)
# This callback is called by the ZigBee stack after a bind operation completes
ZBBindConfirm = t.CommandDef(
t.CommandType.AREQ,
0x81,
rsp_schema=(
t.Param("CommandId", t.uint16_t, "The command id to send with the message"),
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
),
)
# This callback indicates another device attempted to bind to this device
ZBAllowBindConfirm = t.CommandDef(
t.CommandType.AREQ,
0x82,
rsp_schema=(
t.Param("Source", t.NWK, "Source Nwk of the device attempted to bind"),
),
)
# This callback indicates the data has been sent
ZBSendConfirm = t.CommandDef(
t.CommandType.AREQ,
0x83,
rsp_schema=(
t.Param(
"Handle",
t.uint8_t,
"A handle used to Identify the send data request",
),
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
),
)
# This callback is called asynchronously by the ZigBee stack to notify the
# application when data is received from a peer device
ZBRecieveDataInd = t.CommandDef(
t.CommandType.AREQ,
0x87,
rsp_schema=(
t.Param("Source", t.NWK, "NWK address of the source device"),
t.Param("CommandId", t.uint16_t, "The command id associated with the data"),
t.Param("Data", t.LongBytes, "Data"),
),
)
# This callback is called by the ZigBee stack when a find device operation
# completes
ZBFindDeviceConfirm = t.CommandDef(
t.CommandType.AREQ,
0x85,
rsp_schema=(
t.Param("SearchType", t.uint8_t, "The type of search that was performed"),
t.Param("SearchKey", t.uint16_t, "Value that the search was executed on"),
t.Param("Result", t.EUI64, "The result of the search"),
),
) | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/commands/sapi.py | sapi.py |
import zigpy_znp.types as t
class ZGP(t.CommandsBase, subsystem=t.Subsystem.ZGP):
DataReq = t.CommandDef(
t.CommandType.SREQ,
0x01,
req_schema=(
t.Param("Action", t.Bool, "True/False -- add/remove GPDF into the queue"),
t.Param("TXOptions", t.uint8_t, "ZGP TX Options"),
t.Param(
"ApplicationId",
t.uint8_t,
(
"ApplicationID of the GPD to which the ASDU will be sent; "
"ApplicationID 0x00 indicates the usage of the SrcID; "
"ApplicationID 0x02 indicates the usage of the GPD IEEE"
),
),
t.Param(
"SrcId",
t.uint32_t,
"The identifier of the GPD entity to which the ASDU will be sent",
),
t.Param("IEEE", t.EUI64, "IEEE address"),
t.Param(
"Endpoint",
t.uint8_t,
"GPD ep used in combination with GPD IEEE for APPid of 0b010",
),
t.Param("CommandId", t.uint8_t, "GPD command id"),
t.Param("ASDU", t.ShortBytes, "GPD ASDU"),
t.Param("Handle", t.uint8_t, "GPEP handle to match req to confirmation"),
t.Param("LifeTime", t.uint24_t, "The lifetime of the packet"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# send security data into # the dGP stub
SecRsp = t.CommandDef(
t.CommandType.SREQ,
0x02,
req_schema=(
t.Param(
"Status",
t.uint8_t,
"The status code as returned by the GP endpoint",
),
t.Param("Handle", t.uint8_t, "GPEP handle to match req to confirmation"),
t.Param(
"ApplicationId",
t.uint8_t,
(
"ApplicationID of the GPD to which the ASDU will be sent; "
"ApplicationID 0x00 indicates the usage of the SrcID; "
"ApplicationID 0x02 indicates the usage of the GPD IEEE"
),
),
t.Param(
"SrcId",
t.uint32_t,
"The identifier of the GPD entity to which the ASDU will be sent",
),
t.Param("IEEE", t.EUI64, "IEEE address"),
t.Param(
"Endpoint",
t.uint8_t,
"GPD ep used in combination with GPD IEEE for APPid of 0b010",
),
t.Param("SecurityLevel", t.uint8_t, "Security level for GPDF processing"),
t.Param("KeyType", t.uint8_t, "The security key type"),
t.Param("KeyData", t.KeyData, "Security key"),
t.Param("FrameCounter", t.uint32_t, "The security frame counter value"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# ZGP Callbacks
# Green power confirm is a message that provides a mechanism for the Green Power
# EndPoint in the host processor to understand the status of a previous request
# to send a GPDF
DataCnf = t.CommandDef(
t.CommandType.AREQ,
0x05,
req_schema=(
t.Param(
"Status",
t.uint8_t,
"The status code as returned by the GP endpoint",
),
t.Param("Handle", t.uint8_t, "handle to match req to confirmation"),
),
)
# This message provides a mechanism for dGP stub to request security data from the
# Green Power EndPoint in the host processor
SecReq = t.CommandDef(
t.CommandType.AREQ,
0x03,
req_schema=(
t.Param(
"ApplicationId",
t.uint8_t,
(
"ApplicationID of the GPD to which the ASDU will be sent; "
"ApplicationID 0x00 indicates the usage of the SrcID; "
"ApplicationID 0x02 indicates the usage of the GPD IEEE"
),
),
t.Param(
"SrcId",
t.uint32_t,
"The identifier of the GPD entity to which the ASDU will be sent",
),
t.Param("IEEE", t.EUI64, "IEEE address"),
t.Param(
"Endpoint",
t.uint8_t,
"GPD ep used in combination with GPD IEEE for APPid of 0b010",
),
t.Param("SecurityLevel", t.uint8_t, "Security level for GPDF processing"),
t.Param("KeyType", t.uint8_t, "The security key type"),
t.Param("FrameCounter", t.uint32_t, "The security frame counter value"),
t.Param(
"Handle", t.uint8_t, "dGP stub handle to match req to confirmation"
),
),
)
# This message provides a mechanism for identifying and conveying a received
# GPDF to the Green Power EndPoint in the host processor
DataInd = t.CommandDef(
t.CommandType.AREQ,
0x04,
req_schema=(
t.Param("Status", t.uint8_t, "The status code as returned by the dGP stub"),
t.Param(
"RSSI",
t.int8s,
"The RSSI delivered by the MAC on receipt of this frame",
),
t.Param("LQI", t.uint8_t, "Link quality measured during reception"),
t.Param("SeqNum", t.uint8_t, "The sequence number from MAC header of MPDU"),
t.Param("SrcAddrMode", t.AddrMode, "The source addressing mode of MPDU"),
t.Param("SrcPanId", t.PanId, "The source PAN Id"),
# ToDo: ugh, this could be ieee depending on addressing mode
t.Param("SrcNWK", t.NWK, "The source address of the GPD entity"),
t.Param(
"DstAddrMode", t.AddrMode, "The destination addressing mode of MPDU"
),
t.Param("DstPanId", t.PanId, "The destination PAN Id"),
# ToDo: ugh, this could be ieee depending on addressing mode
t.Param("DstNWK", t.NWK, "The destination address of the GPD entity"),
t.Param("MPDU", t.ShortBytes, "GP MPDU"),
),
) | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/commands/zgp.py | zgp.py |
import zigpy_znp.types as t
class BootloaderBuildType(t.enum_uint8):
NON_BOOTLOADER_BUILD = 0
BUILT_AS_BIN = 1
BUILT_AS_HEX = 2
class ADCChannel(t.enum_uint8):
"""The ADC channel."""
AIN0 = 0x00
AIN1 = 0x01
AIN2 = 0x02
AIN3 = 0x03
AIN4 = 0x04
AIN5 = 0x05
AIN6 = 0x06
AIN7 = 0x07
Temperature = 0x0E
Voltage = 0x0F
class ADCResolution(t.enum_uint8):
"""Resolution of the ADC channel."""
bits_8 = 0x00
bits_10 = 0x01
bits_12 = 0x02
bits_14 = 0x03
class GPIOPinMode(t.enum_flag_uint8):
"""Pin state. Any pin with an unspecified state bit is pulled up."""
Tristate0 = 0b0000_0001
Tristate1 = 0b0000_0010
Tristate2 = 0b0000_0100
Tristate3 = 0b0000_1000
PullDown0 = 0b0001_0000
PullDown1 = 0b0010_0000
PullDown2 = 0b0100_0000
PullDown3 = 0b1000_0000
class GPIOPinDirection(t.enum_flag_uint8):
"""Pin direction. Any pin with an unspecified direction bit is an input pin."""
Output0 = 0b0000_0001
Output1 = 0b0000_0010
Output2 = 0b0000_0100
Output3 = 0b0000_1000
class GPIOOperation(t.enum_uint8):
"""Specifies the type of operation to perform on the GPIO pins."""
SetDirection = 0x00
SetTristateMode = 0x01
Set = 0x02
Clear = 0x03
Toggle = 0x04
Read = 0x05
HiD = 0x12 # ???
class StackTuneOperation(t.enum_uint8):
"""The tuning operation to be executed."""
# XXX: [Value] should correspond to the valid values specified by the
# ZMacTransmitPower_t enumeration (0xFD - 0x16)
PowerLevel = 0x00
# Set RxOnWhenIdle off/on if the value of Value is 0/1;
# otherwise return the 0x01 current setting of RxOnWhenIdle.
SetRxOnWhenIdle = 0x01
class SYS(t.CommandsBase, subsystem=t.Subsystem.SYS):
# reset the target device
ResetReq = t.CommandDef(
t.CommandType.AREQ,
0x00,
req_schema=(
t.Param(
"Type",
t.ResetType,
(
"This command will reset the device by using a hardware reset "
"(i.e. watchdog reset) if 'Type' is zero. Otherwise a soft "
"reset (i.e. a jump to the reset vector) is done. This "
"is especially useful in the CC2531, for instance, so that the "
"USB host does not have to contend with the USB H/W resetting "
"(and thus causing the USB host to reenumerate the device "
"which can cause an open virtual serial port to hang.)"
),
),
),
)
# issue PING requests to verify if a device is active and check the capability of
# the device
Ping = t.CommandDef(
t.CommandType.SREQ,
0x01,
req_schema=(),
rsp_schema=(
t.Param(
"Capabilities",
t.MTCapabilities,
"Represents the intefaces this device can handle",
),
),
)
# request for the device's version string
Version = t.CommandDef(
t.CommandType.SREQ,
0x02,
req_schema=(),
rsp_schema=(
t.Param("TransportRev", t.uint8_t, "Transport protocol revision"),
t.Param("ProductId", t.uint8_t, "Product ID"),
t.Param("MajorRel", t.uint8_t, "Software major release number"),
t.Param("MinorRel", t.uint8_t, "Software minor release number"),
t.Param("MaintRel", t.uint8_t, "Software maintenance release number"),
# Optional stuff
t.Param(
"CodeRevision",
t.uint32_t,
"User-supplied code revision number",
optional=True,
),
t.Param(
"BootloaderBuildType",
BootloaderBuildType,
"Bootloader build type",
optional=True,
),
t.Param(
"BootloaderRevision",
t.uint32_t,
"Bootloader revision. 0 - not provided, 0xFFFFFFFF - not supported",
optional=True,
),
),
)
# set the extended address of the device
SetExtAddr = t.CommandDef(
t.CommandType.SREQ,
0x03,
req_schema=(t.Param("ExtAddr", t.EUI64, "The device's extended address"),),
rsp_schema=t.STATUS_SCHEMA,
)
# get the extended address of the device
GetExtAddr = t.CommandDef(
t.CommandType.SREQ,
0x04,
req_schema=(),
rsp_schema=(t.Param("ExtAddr", t.EUI64, "The device's extended address"),),
)
# read a single memory location in the target RAM. The command accepts an address
# value and returns the memory value present in the target RAM at that address
RamRead = t.CommandDef(
t.CommandType.SREQ,
0x05,
req_schema=(
t.Param("Address", t.uint16_t, "Address of the memory to read"),
t.Param("Len", t.uint8_t, "The number of bytes to read"),
),
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param("Value", t.ShortBytes, "The value read from memory address"),
),
)
# write to a particular location in the target RAM. The command accepts an
# address location and a memory value. The memory value is written to the address
# location in the target RAM
RamWrite = t.CommandDef(
t.CommandType.SREQ,
0x06,
req_schema=(
t.Param("Address", t.uint16_t, "Address of the memory to read"),
t.Param("Value", t.ShortBytes, "The value read from memory address"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# create and initialize an item in non-volatile memory. The NV item will be created
# if it does not already exist. The data for the new NV item will be left
# uninitialized if the InitLen parameter is zero. When InitLen is non-zero, the
# data for the NV item will be initialized (starting at offset of zero) with the
# values from InitData. Note that it is not necessary to initialize the entire NV
# item (InitLen < ItemLen). It is also possible to create an NV item that is larger
# than the maximum length InitData - use the SYS_OSAL_NV_WRITE command to finish
# the initialization
OSALNVItemInit = t.CommandDef(
t.CommandType.SREQ,
0x07,
req_schema=(
t.Param("Id", t.uint16_t, "The Id of the NV Item"),
t.Param("ItemLen", t.uint16_t, "Number of bytes in the NV item"),
t.Param("Value", t.ShortBytes, "The value of the NV item"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# read a single memory item in the target non-volatile memory. The command accepts
# an attribute Id value and returns the memory value present in the target for the
# specified attribute Id
OSALNVRead = t.CommandDef(
t.CommandType.SREQ,
0x08,
req_schema=(
t.Param("Id", t.uint16_t, "The Id of the NV item"),
t.Param(
"Offset",
t.uint8_t,
"Number of bytes offset from the beginning of the NV value",
),
),
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param("Value", t.ShortBytes, "The value of the NV item"),
),
)
# write to a particular item in non-volatile memory. The command accepts an
# attribute Id and an attribute value. The attribute value is written to the
# location specified for the attribute Id in the target
OSALNVWrite = t.CommandDef(
t.CommandType.SREQ,
0x09,
req_schema=(
t.Param("Id", t.uint16_t, "The Id of the NV item"),
t.Param(
"Offset",
t.uint8_t,
"Number of bytes offset from the beginning of the NV value",
),
t.Param("Value", t.ShortBytes, "The value of the NV item"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# delete an item from the non-volatile memory. The ItemLen parameter must match
# the length of the NV item or the command will fail
OSALNVDelete = t.CommandDef(
t.CommandType.SREQ,
0x12,
req_schema=(
t.Param("Id", t.uint16_t, "The Id of the NV item"),
t.Param("ItemLen", t.uint16_t, "Number of bytes in the NV item"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# get the length of an item in non-volatile memory. A returned length of zero
# indicates that the NV item does not exist
OSALNVLength = t.CommandDef(
t.CommandType.SREQ,
0x13,
req_schema=(t.Param("Id", t.uint16_t, "The Id of the NV item"),),
rsp_schema=(t.Param("ItemLen", t.uint16_t, "Number of bytes in the NV item"),),
)
SetJammerParameters = t.CommandDef(
t.CommandType.SREQ,
0x15,
req_schema=(
t.Param(
"ContinuousEvents",
t.uint16_t,
"Number of continuous events needed to detect Jamming",
),
t.Param("HighNoiseLevel", t.uint8_t, "Noise Level needed to be a Jam"),
t.Param(
"DetectPeriodTime",
t.uint32_t,
"The time between each noise level reading",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# start a timer event. The event will expired after the indicated amount of time
# and a notification will be sent back to the tester
OSALStartTimer = t.CommandDef(
t.CommandType.SREQ,
0x0A,
req_schema=(
t.Param("Id", t.uint8_t, "The Id of the timer event (0-3)"),
t.Param("Timeout", t.uint16_t, "Timer timeout in millliseconds"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# stop a timer event
OSALStopTimer = t.CommandDef(
t.CommandType.SREQ,
0x0B,
req_schema=(t.Param("Id", t.uint8_t, "The Id of the timer event (0-3)"),),
rsp_schema=t.STATUS_SCHEMA,
)
# get a random 16-bit number
Random = t.CommandDef(
t.CommandType.SREQ,
0x0C,
req_schema=(),
rsp_schema=(t.Param("Value", t.uint16_t, "The random value"),),
)
# read a value from the ADC based on specified channel and resolution
ADCRead = t.CommandDef(
t.CommandType.SREQ,
0x0D,
req_schema=(
t.Param("Channel", ADCChannel, "The channel of ADC to read"),
t.Param(
"Resolution",
ADCResolution,
"Resolution of the reading: 8/10/12/14 bits",
),
),
rsp_schema=(t.Param("Value", t.uint16_t, "Value of ADC channel"),),
)
# control the 4 GPIO pins on the CC2530-ZNP build
GPIO = t.CommandDef(
t.CommandType.SREQ,
0x0E,
req_schema=(
t.Param(
"Operation",
GPIOOperation,
"Specifies type of operation on GPIO pins",
),
t.Param("Value", t.uint8_t, "GPIO value for specified operation"),
),
rsp_schema=(t.Param("Value", t.uint8_t, "GPIO value"),),
)
# tune intricate or arcane settings at runtime
StackTune = t.CommandDef(
t.CommandType.SREQ,
0x0F,
req_schema=(
t.Param(
"Operation",
StackTuneOperation,
"Specifies type of operation on GPIO pins",
),
t.Param("Value", t.uint8_t, "Tuning value"),
),
rsp_schema=(
(t.Param("Value", t.uint8_t, "Applicable status of the tuning operation"),)
),
)
# set the target system date and time. The time can be specified in
# "seconds since 00:00:00 on January 1, 2000"
# or in parsed date/time components
SetTime = t.CommandDef(
t.CommandType.SREQ,
0x10,
req_schema=(
t.Param(
"UTCTime",
t.uint32_t,
"Number of seconds since 00:00:00 on Jan 2000",
),
t.Param("Hour", t.uint8_t, "Hour of the day (0 -- 23)"),
t.Param("Minute", t.uint8_t, "Minute of the hour (0 -- 59)"),
t.Param("Second", t.uint8_t, "Seconds of the minute (0 -- 59)"),
t.Param("Month", t.uint8_t, "Month of the year (1 -- 12)"),
t.Param("Day", t.uint8_t, "Day of the month (1 -- 31)"),
t.Param("Year", t.uint16_t, "Year (2000 -- )"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# get the target system date and time. The time is returned in
# "seconds since 00:00:00 on January 1, 2000" and parsed date/time components
GetTime = t.CommandDef(
t.CommandType.SREQ,
0x11,
req_schema=(),
rsp_schema=(
t.Param(
"UTCTime",
t.uint32_t,
"Number of seconds since 00:00:00 on Jan 2000",
),
t.Param("Hour", t.uint8_t, "Hour of the day (0 -- 23)"),
t.Param("Minute", t.uint8_t, "Minute of the hour (0 -- 59)"),
t.Param("Second", t.uint8_t, "Seconds of the minute (0 -- 59)"),
t.Param("Month", t.uint8_t, "Month of the year (1 -- 12)"),
t.Param("Day", t.uint8_t, "Day of the month (1 -- 31)"),
t.Param("Year", t.uint16_t, "Year (2000 -- )"),
),
)
# set the target system radio transmit power. The returned TX power is the actual
# setting applied to the radio - nearest characterized value for the specific
# radio
SetTxPower = t.CommandDef(
t.CommandType.SREQ,
0x14,
req_schema=(t.Param("TXPower", t.int8s, "Requested TX power setting, in dBm"),),
# XXX: Z-Stack 3.30+ returns SUCCESS or INVALID_PARAMETER.
# Z-Stack 1.2 and 3.0 return the cloest TX power setting.
rsp_schema=(
t.Param("StatusOrPower", t.int8s, "Status code or applied power setting"),
),
)
# initialize the statistics table in NV memory
ZDiagsInitStats = t.CommandDef(
t.CommandType.SREQ, 0x17, req_schema=(), rsp_schema=t.STATUS_SCHEMA
)
# clear the statistics table. To clear data in NV (including the Boot
# Counter) the clearNV flag shall be set to TRUE
ZDiagsClearStats = t.CommandDef(
t.CommandType.SREQ,
0x18,
req_schema=(
t.Param("ClearNV", t.Bool, "True -- clear statistics in NV memory"),
),
rsp_schema=(t.Param("SycClock", t.uint32_t, "Milliseconds since last reset"),),
)
# read a specific system (attribute) ID statistics and/or metrics value
ZDiagsGetStats = t.CommandDef(
t.CommandType.SREQ,
0x19,
req_schema=(
# as defined in ZDiags.h
t.Param("AttributeId", t.uint16_t, "System diagnostics attribute ID"),
),
rsp_schema=(t.Param("Value", t.uint32_t, "Value of the requested attribute"),),
)
# restore the statistics table from NV into the RAM table
ZDiagsRestoreStatsNV = t.CommandDef(
t.CommandType.SREQ, 0x1A, req_schema=(), rsp_schema=t.STATUS_SCHEMA
)
# save the statistics table from RAM to NV
ZDiagsSaveStatsToNV = t.CommandDef(
t.CommandType.SREQ,
0x1B,
req_schema=(),
rsp_schema=(t.Param("SycClock", t.uint32_t, "Milliseconds since last reset"),),
)
# Same as OSALNVRead but with a 16-bit offset
OSALNVReadExt = t.CommandDef(
t.CommandType.SREQ,
0x1C,
req_schema=(
t.Param("Id", t.uint16_t, "The Id of the NV item"),
t.Param(
"Offset",
t.uint16_t,
"Number of bytes offset from the beginning of the NV value",
),
),
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param("Value", t.ShortBytes, "The value of the NV item"),
),
)
# Same as OSALNVWrite but with a 16-bit offset
OSALNVWriteExt = t.CommandDef(
t.CommandType.SREQ,
0x1D,
req_schema=(
t.Param("Id", t.uint16_t, "The Id of the NV item"),
t.Param(
"Offset",
t.uint16_t, # XXX: don't trust the documentation! This *not* 8 bits.
"Number of bytes offset from the beginning of the NV value",
),
t.Param("Value", t.LongBytes, "The value of the NV item"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# attempt to create an item in non-volatile memory
NVCreate = t.CommandDef(
t.CommandType.SREQ,
0x30,
req_schema=(
t.Param("SysId", t.uint8_t, "System ID of the NV item"),
t.Param("ItemId", t.uint16_t, "Item ID of the NV item"),
t.Param("SubId", t.uint16_t, "Sub ID of the NV item"),
t.Param("Length", t.uint32_t, "Number of bytes in the NV item"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# attempt to delete an item in non-volatile memory
NVDelete = t.CommandDef(
t.CommandType.SREQ,
0x31,
req_schema=(
t.Param("SysId", t.uint8_t, "System ID of the NV item"),
t.Param("ItemId", t.uint16_t, "Item ID of the NV item"),
t.Param("SubId", t.uint16_t, "Sub ID of the NV item"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# get the length of an item in non-volatile memory
NVLength = t.CommandDef(
t.CommandType.SREQ,
0x32,
req_schema=(
t.Param("SysId", t.uint8_t, "System ID of the NV item"),
t.Param("ItemId", t.uint16_t, "Item ID of the NV item"),
t.Param("SubId", t.uint16_t, "Sub ID of the NV item"),
),
rsp_schema=(t.Param("Length", t.uint32_t, "Length of NV item"),),
)
# read an item in non-volatile memory
NVRead = t.CommandDef(
t.CommandType.SREQ,
0x33,
req_schema=(
t.Param("SysId", t.uint8_t, "System ID of the NV item"),
t.Param("ItemId", t.uint16_t, "Item ID of the NV item"),
t.Param("SubId", t.uint16_t, "Sub ID of the NV item"),
t.Param(
"Offset",
t.uint16_t,
"Number of bytes offset from the beginning of the NV value",
),
t.Param("Length", t.uint8_t, "Length of data to read"),
),
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param("Value", t.ShortBytes, "Value of the NV item read"),
),
)
# write an item in non-volatile memory
NVWrite = t.CommandDef(
t.CommandType.SREQ,
0x34,
req_schema=(
t.Param("SysId", t.uint8_t, "System ID of the NV item"),
t.Param("ItemId", t.uint16_t, "Item ID of the NV item"),
t.Param("SubId", t.uint16_t, "Sub ID of the NV item"),
t.Param(
"Offset",
t.uint16_t,
"Number of bytes offset from the beginning of the NV value",
),
# XXX: the spec has length as a 16-bit integer but then shows it as
# an 8-bit integer in the table below, which matches the code
t.Param("Value", t.ShortBytes, "Value of the NV item to write"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# update an item in non-volatile memory
NVUpdate = t.CommandDef(
t.CommandType.SREQ,
0x35,
req_schema=(
t.Param("SysId", t.uint8_t, "System ID of the NV item"),
t.Param("ItemId", t.uint16_t, "Item ID of the NV item"),
t.Param("SubId", t.uint16_t, "Sub ID of the NV item"),
t.Param("Value", t.ShortBytes, "Value of the NV item to update"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# compact the active page in non-volatile memory
NVCompact = t.CommandDef(
t.CommandType.SREQ,
0x36,
req_schema=(
t.Param(
"Threshold",
t.uint16_t,
"Compaction occurs when NV bytes are less than this value",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# MT SYS Callbacks
# This command is sent by the device to indicate the reset
ResetInd = t.CommandDef(
t.CommandType.AREQ,
0x80,
rsp_schema=(
t.Param("Reason", t.ResetReason, "Reason for the reset"),
t.Param("TransportRev", t.uint8_t, "Transport protocol revision"),
t.Param("ProductId", t.uint8_t, "Product ID"),
t.Param("MajorRel", t.uint8_t, "Software major release number"),
t.Param("MinorRel", t.uint8_t, "Software minor release number"),
t.Param("MaintRel", t.uint8_t, "Software maintenance release number"),
),
)
# This command is sent by the device to indicate a specific time has been expired
OSALTimerExpired = t.CommandDef(
t.CommandType.AREQ,
0x81,
rsp_schema=(t.Param("Id", t.uint8_t, "The Id of the timer event (0-3)"),),
)
JammerInd = t.CommandDef(
t.CommandType.AREQ,
0x82,
rsp_schema=(
t.Param(
"JammerInd",
t.Bool,
"TRUE if jammer detected, FALSE if changed to undetected",
),
),
) | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/commands/sys.py | sys.py |
import zigpy_znp.types as t
class TransmitOptions(t.enum_flag_uint8):
NONE = 0
# Will force the message to use Wildcard ProfileID
WILDCARD_PROFILEID = 0x02
# Will force APS to callback to preprocess before calling NWK layer
APS_PREPROCESS = 0x04
LIMIT_CONCENTRATOR = 0x08
ACK_REQUEST = 0x10
# Suppress Route Discovery for intermediate routes (route discovery performed for
# initiating device)
SUPPRESS_ROUTE_DISC_NETWORK = 0x20
ENABLE_SECURITY = 0x40
SKIP_ROUTING = 0x80
class LatencyReq(t.enum_uint8):
NoLatencyReqs = 0x00
FastBeacons = 0x01
SlowBeacons = 0x02
class AF(t.CommandsBase, subsystem=t.Subsystem.AF):
# This command enables the tester to register an application's endpoint description
Register = t.CommandDef(
t.CommandType.SREQ,
0x00,
req_schema=(
t.Param("Endpoint", t.uint8_t, "Endpoint Id of the device"),
t.Param("ProfileId", t.uint16_t, "Application Profile ID"),
t.Param("DeviceId", t.uint16_t, "Device Description ID"),
t.Param("DeviceVersion", t.uint8_t, "Device version number"),
t.Param(
"LatencyReq",
LatencyReq,
(
"Specifies latency reqs: 0x00 - None, "
"0x01 -- fast beacons, "
"0x02 -- slow beacons"
),
),
t.Param("InputClusters", t.ClusterIdList, "Input cluster list"),
t.Param("OutputClusters", t.ClusterIdList, "Output cluster list"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# This command is used by the tester to build and send a message through AF layer
DataRequest = t.CommandDef(
t.CommandType.SREQ,
0x01,
req_schema=(
t.Param("DstAddr", t.NWK, "Short address of the destination device"),
t.Param("DstEndpoint", t.uint8_t, "Endpoint of the destination device"),
t.Param("SrcEndpoint", t.uint8_t, "Endpoint of the source device"),
t.Param("ClusterId", t.ClusterId, "Cluster ID"),
t.Param("TSN", t.uint8_t, "Transaction Sequence Number"),
t.Param(
"Options",
TransmitOptions,
(
"Transmit options bitmask: bit 4 -- APS Ack, "
"bit 5 -- Route Discovery, "
"bit 6 -- APS security, "
"bit 7 -- Skip routing"
),
),
t.Param(
"Radius",
t.uint8_t,
"Specifies the number of hops allowed delivering the message",
),
t.Param("Data", t.ShortBytes, "Data request"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# This extended form of the AF_DATA_REQUEST must be used to send an
# inter-pan message
DataRequestExt = t.CommandDef(
t.CommandType.SREQ,
0x02,
req_schema=(
t.Param(
"DstAddrModeAddress",
t.AddrModeAddress,
"Destination address mode and address",
),
t.Param("DstEndpoint", t.uint8_t, "Endpoint of the destination device"),
t.Param(
"DstPanId",
t.PanId,
(
"PanId of the destination device: 0x0000==Intra-Pan, "
"otherwise Inter-Pan"
),
),
t.Param("SrcEndpoint", t.uint8_t, "Endpoint of the source device"),
t.Param("ClusterId", t.ClusterId, "Cluster ID"),
t.Param("TSN", t.uint8_t, "Transaction Sequence Number"),
t.Param(
"Options",
TransmitOptions,
(
"Transmit options bitmask: bit 4 -- APS Ack, "
"bit 5 -- Route Discovery, "
"bit 6 -- APS security, "
"bit 7 -- Skip routing"
),
),
t.Param(
"Radius",
t.uint8_t,
"Specifies the number of hops allowed delivering the message",
),
t.Param("Data", t.LongBytes, "Data request"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# This command is used by the tester to build and send a message through AF layer
# using source routing
DataRequestSrcRtg = t.CommandDef(
t.CommandType.SREQ,
0x03,
req_schema=(
t.Param("DstAddr", t.NWK, "Short address of the destination device"),
t.Param("DstEndpoint", t.uint8_t, "Endpoint of the destination device"),
t.Param("SrcEndpoint", t.uint8_t, "Endpoint of the source device"),
t.Param("ClusterId", t.ClusterId, "Cluster ID"),
t.Param("TSN", t.uint8_t, "Transaction Sequence Number"),
t.Param(
"Options",
TransmitOptions,
(
"Transmit options bitmask: bit 4 -- APS Ack, "
"bit 5 -- Route Discovery, "
"bit 6 -- APS security, "
"bit 7 -- Skip routing"
),
),
t.Param(
"Radius",
t.uint8_t,
"Specifies the number of hops allowed delivering the message",
),
t.Param("SourceRoute", t.NWKList, "Relay list"),
t.Param("Data", t.ShortBytes, "Data request"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# XXX: UNDOCUMENTED
Delete = t.CommandDef(
t.CommandType.SREQ,
0x04,
req_schema=(t.Param("Endpoint", t.uint8_t, "Application Endpoint to delete"),),
rsp_schema=t.STATUS_SCHEMA,
)
# Inter-Pan control command and data
InterPanCtl = t.CommandDef(
t.CommandType.SREQ,
0x10,
req_schema=(
t.Param(
"Command",
t.InterPanCommand,
(
"0x00 InterPanClr Proxy call to StubAPS_SetIntraPanChannel() to"
" switch channel back to the NIB-specified channel. "
"0x01 InterPanSet Proxy call to StubAPS_SetInterPanChannel() "
"with the 1-byte channel specified. "
"0x02 InterPanReg If the 1-byte Endpoint specified by the data "
"argument is found by invoking afFindEndPointDesc(), then proxy"
" a call to StubAPS_RegisterApp() with the pointer to the "
"endPointDesc_t found (i.e. the Endpoint must already be "
"registered with AF)"
),
),
t.Param("Data", t.Bytes, "Data"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# Huge AF data request data buffer store command and data
DataStore = t.CommandDef(
t.CommandType.SREQ,
0x11,
req_schema=(
t.Param(
"Index",
t.uint16_t,
(
"Specifies the index into the outgoing data request data buffer"
"to start the storing of this chunk of data"
),
),
t.Param("Data", t.ShortBytes, "Data"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# Huge AF incoming message data buffer retrieve command
DataRetrieve = t.CommandDef(
t.CommandType.SREQ,
0x12,
req_schema=(
t.Param("TimeStamp", t.uint32_t, "The timestamp of the message"),
t.Param(
"Index",
t.uint16_t,
(
"Specifies the index into the outgoing data request data buffer"
"to start the storing of this chunk of data"
),
),
t.Param(
"Length",
t.uint8_t,
(
"A length of zero is special and triggers the freeing of the "
"corresponding incoming message"
),
),
),
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param("Data", t.ShortBytes, "Data"),
),
)
# proxy for afAPSF_ConfigSet()
APSFConfigSet = t.CommandDef(
t.CommandType.SREQ,
0x13,
req_schema=(
t.Param("Endpoint", t.uint8_t, "Endpoint for which to set fragmentation"),
t.Param(
"FrameDelay",
t.uint8_t,
"APS Fragmentation inter-frame delay in milliseconds",
),
t.Param("WindowSize", t.uint8_t, "APS Fragmentation window size"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# AF Callbacks
# This command is sent by the device to the user after it receives a data request
DataConfirm = t.CommandDef(
t.CommandType.AREQ,
0x80,
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param("Endpoint", t.uint8_t, "Endpoint of the device"),
t.Param("TSN", t.uint8_t, "Transaction Sequence Number"),
),
)
# This callback message is in response to incoming data to any of the registered
# endpoints on this device
IncomingMsg = t.CommandDef(
t.CommandType.AREQ,
0x81,
rsp_schema=(
t.Param("GroupId", t.GroupId, "The group ID of the device"),
t.Param("ClusterId", t.ClusterId, "Cluster ID"),
t.Param(
"SrcAddr", t.NWK, "Short address of the device sending the message"
),
t.Param("SrcEndpoint", t.uint8_t, "Endpoint of the source device"),
t.Param("DstEndpoint", t.uint8_t, "Endpoint of the destination device"),
t.Param(
"WasBroadcast", t.Bool, "Was the incoming message broadcast or not"
),
t.Param("LQI", t.uint8_t, "Link quality measured during reception"),
t.Param("SecurityUse", t.Bool, "Is security in use or not"),
t.Param("TimeStamp", t.uint32_t, "The timestamp of the message"),
t.Param("TSN", t.uint8_t, "Transaction Sequence Number"),
t.Param("Data", t.ShortBytes, "Data"),
# https://e2e.ti.com/support/wireless-connectivity/zigbee-and-thread/f/158/t/455787
t.Param("MacSrcAddr", t.NWK, "UNDOCUMENTED: MAC Source address"),
t.Param(
"MsgResultRadius", t.uint8_t, "UNDOCUMENTED: Messages result radius"
),
),
)
# This callback message is in response to incoming data to any of the registered
# endpoints on this device when the code is compiled with the INTER_PAN
# flag defined
IncomingMsgExt = t.CommandDef(
t.CommandType.AREQ,
0x82,
rsp_schema=(
t.Param("GroupId", t.GroupId, "The group ID of the device"),
t.Param("ClusterId", t.ClusterId, "Cluster ID"),
t.Param(
"SrcAddrModeAddress",
t.AddrModeAddress,
"Address of the device sending the message",
),
t.Param("SrcEndpoint", t.uint8_t, "Endpoint of the source device"),
t.Param("SrcPanId", t.PanId, "Source PanId of the message"),
t.Param("DstEndpoint", t.uint8_t, "Endpoint of the destination device"),
t.Param(
"WasBroadcast", t.Bool, "Was the incoming message broadcast or not"
),
t.Param("LQI", t.uint8_t, "Link quality measured during reception"),
t.Param("SecurityUse", t.uint8_t, "Is security in use or not"),
t.Param("TimeStamp", t.uint32_t, "The timestamp of the message"),
t.Param("TSN", t.uint8_t, "Transaction Sequence Number"),
t.Param("Data", t.LongBytes, "Data"),
# https://e2e.ti.com/support/wireless-connectivity/zigbee-and-thread/f/158/t/455787
t.Param("MacSrcAddr", t.NWK, "UNDOCUMENTED: MAC Source address"),
t.Param(
"MsgResultRadius", t.uint8_t, "UNDOCUMENTED: Messages result radius"
),
),
)
# sent by the device to the user when it determines that an error occurred during
# a reflected message
ReflectError = t.CommandDef(
t.CommandType.AREQ,
0x83,
rsp_schema=(
t.Param("Status", t.Status, "Status is either Success (0) or Failure (1)"),
t.Param("Endpoint", t.uint8_t, "Endpoint of the device"),
t.Param("TSN", t.uint8_t, "Transaction Sequence Number"),
t.Param("AddrMode", t.AddrMode, "Format of the address"),
t.Param("Dst", t.NWK, "Destination address -- depends on AddrMode"),
),
) | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/commands/af.py | af.py |
import zigpy_znp.types as t
class TimeoutIndex(t.enum_uint8):
Seconds_10 = 0x00
Minutes_2 = 0x01
Minutes_4 = 0x02
Minutes_8 = 0x03
Minutes_16 = 0x04
Minutes_32 = 0x05
Minutes_64 = 0x06
Minutes_128 = 0x07
Minutes_256 = 0x08
Minutes_512 = 0x09
Minutes_1024 = 0x0A
Minutes_2048 = 0x0B
Minutes_4096 = 0x0C
Minutes_8192 = 0x0D
Minutes_16384 = 0x0E
class CentralizedLinkKeyMode(t.enum_uint8):
UseDefault = 0x00
UseProvidedInstallCode = 0x01
UseProvidedInstallCodeAndFallbackToDefault = 0x02
UseProvidedAPSLinkKey = 0x03
UseProvidedAPSLinkKeyAndFallbackToDefault = 0x04
class BDBCommissioningStatus(t.enum_uint8):
Success = 0x00
InProgress = 0x01
NoNetwork = 0x02
TLTargetFailure = 0x03
TLNotAaCapable = 0x04
TLNoScanResponse = 0x05
TLNotPermitted = 0x06
TCLKExFailure = 0x07
FormationFailure = 0x08
FBTargetInProgress = 0x09
FBInitiatorInProgress = 0x0A
FBNoIdentifyQueryResponse = 0x0B
FBBindingTableFull = 0x0C
NetworkRestored = 0x0D
Failure = 0x0E
class BDBCommissioningMode(t.enum_flag_uint8):
NONE = 0
InitiatorTouchLink = 1 << 0
NwkSteering = 1 << 1
NwkFormation = 1 << 2
FindingBinding = 1 << 3
Touchlink = 1 << 4
ParentLost = 1 << 5
class InstallCodeFormat(t.enum_uint8):
InstallCodeAndCRC = 0x01
KeyDerivedFromInstallCode = 0x02
class AppConfig(t.CommandsBase, subsystem=t.Subsystem.APPConfig):
# sets the network frame counter to the value specified in the Frame Counter Value.
# For projects with multiple instances of frame counter, the message sets the
# frame counter of the current network
SetNwkFrameCounter = t.CommandDef(
t.CommandType.SREQ,
0xFF,
req_schema=(t.Param("FrameCounterValue", t.uint32_t, "network frame counter"),),
rsp_schema=t.STATUS_SCHEMA,
)
# Set the default value used by parent device to expire legacy child devices
SetDefaultRemoteEndDeviceTimeout = t.CommandDef(
t.CommandType.SREQ,
0x01,
req_schema=(
t.Param("TimeoutIndex", TimeoutIndex, "0x00 -- 10s otherwise 2^N minutes"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# Sets in ZED the timeout value to be send to parent device for child expiring
SetEndDeviceTimeout = t.CommandDef(
t.CommandType.SREQ,
0x02,
req_schema=(
t.Param("TimeoutIndex", TimeoutIndex, "0x00 -- 10s otherwise 2^N minutes"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# Set the AllowRejoin TC policy
SetAllowRejoinTCPolicy = t.CommandDef(
t.CommandType.SREQ,
0x03,
req_schema=(
t.Param(
"AllowRejoin",
t.Bool,
"whether or not the Trust center allows rejoins with well-known key",
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# Set the commissioning methods to be executed. Initialization of BDB is executed
# with this call, regardless of its parameters
BDBStartCommissioning = t.CommandDef(
t.CommandType.SREQ,
0x05,
req_schema=(t.Param("Mode", BDBCommissioningMode, "Commissioning mode"),),
rsp_schema=t.STATUS_SCHEMA,
)
# Set BDB primary or secondary channel masks
BDBSetChannel = t.CommandDef(
t.CommandType.SREQ,
0x08,
req_schema=(
t.Param("IsPrimary", t.Bool, "True -- is primary channel"),
t.Param("Channel", t.Channels, "Channel set mask"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# Add a preconfigured key (plain key or IC) to Trust Center device
BDBAddInstallCode = t.CommandDef(
t.CommandType.SREQ,
0x04,
req_schema=(
t.Param(
"InstallCodeFormat",
InstallCodeFormat,
("0x01 -- Install code + CRC 0x02 -- Key derived from install code"),
),
t.Param("IEEE", t.EUI64, "IEEE address of the joining device"),
t.Param(
"InstallCode", t.Bytes, "16 bytes for derived key, 18 for IC + CRC"
),
),
rsp_schema=t.STATUS_SCHEMA,
)
# Set the policy flag on Trust Center device to mandate or not the TCLK
# exchange procedure
BDBSetTcRequireKeyExchange = t.CommandDef(
t.CommandType.SREQ,
0x09,
req_schema=(
t.Param("BdbTrustCenterRequireKeyExchange", t.Bool, "Require key exchange"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# Sets the policy to mandate or not the usage of an Install Code upon joining
BDBSetJoinUsesInstallCodeKey = t.CommandDef(
t.CommandType.SREQ,
0x06,
req_schema=(t.Param("BdbJoinUsesInstallCodeKey", t.Bool, "Use install code"),),
rsp_schema=t.STATUS_SCHEMA,
)
# On joining devices, set the default key or an install code to attempt
# to join the network
BDBSetActiveDefaultCentralizedKey = t.CommandDef(
t.CommandType.SREQ,
0x07,
req_schema=(
t.Param(
"CentralizedLinkKeyModes",
CentralizedLinkKeyMode,
(
"which key will be used when performing association "
"to a centralized network"
),
),
t.Param("InstallCode", t.Bytes, "key in any of its formats"),
),
rsp_schema=t.STATUS_SCHEMA,
)
# Instruct the ZED to try to rejoin its previews network. Use only in ZED devices
BDBZedAttemptRecoverNWK = t.CommandDef(
t.CommandType.SREQ, 0x0A, req_schema=(), rsp_schema=t.STATUS_SCHEMA
)
# MT_APP_CONFIG Callbacks
# Callback to receive notifications from BDB process
BDBCommissioningNotification = t.CommandDef(
t.CommandType.AREQ,
0x80,
rsp_schema=(
t.Param(
"Status",
BDBCommissioningStatus,
"Status of the commissioning mode notified",
),
t.Param(
"Mode",
BDBCommissioningMode,
"Commissioning mode to which status is related",
),
t.Param(
"RemainingModes",
BDBCommissioningMode,
(
"Bitmask of the remaining commissioning modes after "
"this notification"
),
),
),
) | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/commands/app_config.py | app_config.py |
from __future__ import annotations
import typing
import logging
import dataclasses
import zigpy.state
import zigpy.zdo.types as zdo_t
import zigpy_znp.const as const
import zigpy_znp.types as t
from zigpy_znp.api import ZNP
from zigpy_znp.types.nvids import ExNvIds, OsalNvIds
LOGGER = logging.getLogger(__name__)
@dataclasses.dataclass(frozen=True)
class StoredDevice:
node_info: zigpy.state.NodeInfo
key: zigpy.state.Key | None
is_child: bool = False
def replace(self, **kwargs) -> StoredDevice:
return dataclasses.replace(self, **kwargs)
def rotate(lst: list, n: int) -> list:
return lst[n:] + lst[:n]
def compute_key(ieee: t.EUI64, tclk_seed: t.KeyData, shift: int) -> t.KeyData:
rotated_tclk_seed = rotate(tclk_seed, n=shift)
return t.KeyData([a ^ b for a, b in zip(rotated_tclk_seed, 2 * ieee.serialize())])
def compute_tclk_seed(ieee: t.EUI64, key: t.KeyData, shift: int) -> t.KeyData:
rotated_tclk_seed = bytes(a ^ b for a, b in zip(key, 2 * ieee.serialize()))
return t.KeyData(rotate(rotated_tclk_seed, n=-shift))
def find_key_shift(ieee: t.EUI64, key: t.KeyData, tclk_seed: t.KeyData) -> int | None:
for shift in range(0x00, 0x0F + 1):
if tclk_seed == compute_tclk_seed(ieee, key, shift):
return shift
return None
def count_seed_matches(
keys: typing.Sequence[zigpy.state.Key], tclk_seed: t.KeyData
) -> int:
count = 0
for key in keys:
if find_key_shift(key.partner_ieee, key.key, tclk_seed) is not None:
count += 1
return count
def iter_seed_candidates(
keys: typing.Sequence[zigpy.state.Key],
) -> typing.Iterable[tuple[int, t.KeyData]]:
for key in keys:
# Derive a seed from each candidate. All rotations of a seed are equivalent.
tclk_seed = compute_tclk_seed(key.partner_ieee, key.key, 0)
# And see how many other keys share this same seed
count = count_seed_matches(keys, tclk_seed)
yield count, tclk_seed
async def read_nwk_frame_counter(znp: ZNP, *, ext_pan_id: t.EUI64 = None) -> t.uint32_t:
if ext_pan_id is None and znp.network_info is not None:
ext_pan_id = znp.network_info.extended_pan_id
if znp.version == 1.2:
key_info = await znp.nvram.osal_read(
OsalNvIds.NWKKEY, item_type=t.NwkActiveKeyItems
)
return key_info.FrameCounter
global_entry = None
if znp.version == 3.0:
entries = znp.nvram.osal_read_table(
OsalNvIds.LEGACY_NWK_SEC_MATERIAL_TABLE_START,
OsalNvIds.LEGACY_NWK_SEC_MATERIAL_TABLE_END,
item_type=t.NwkSecMaterialDesc,
)
else:
entries = znp.nvram.read_table(
item_id=ExNvIds.NWK_SEC_MATERIAL_TABLE,
item_type=t.NwkSecMaterialDesc,
)
async for entry in entries:
if entry.ExtendedPanID == ext_pan_id:
# Always prefer the entry for our current network
return entry.FrameCounter
elif entry.ExtendedPanID == t.EUI64.convert("FF:FF:FF:FF:FF:FF:FF:FF"):
# But keep track of the global entry if it already exists
global_entry = entry
if global_entry is None:
raise KeyError("No security material entry was found for this network")
return global_entry.FrameCounter
async def write_nwk_frame_counter(
znp: ZNP, counter: t.uint32_t, *, ext_pan_id: t.EUI64 = None
) -> None:
if znp.version == 1.2:
key_info = await znp.nvram.osal_read(
OsalNvIds.NWKKEY, item_type=t.NwkActiveKeyItems
)
key_info.FrameCounter = counter
await znp.nvram.osal_write(OsalNvIds.NWKKEY, key_info)
return
if ext_pan_id is None:
ext_pan_id = znp.network_info.extended_pan_id
entry = t.NwkSecMaterialDesc(
FrameCounter=counter,
ExtendedPanID=ext_pan_id,
)
fill_entry = t.NwkSecMaterialDesc(
FrameCounter=0x00000000,
ExtendedPanID=t.EUI64.convert("00:00:00:00:00:00:00:00"),
)
# The security material tables are quite small (4 values) so it's simpler to just
# write them completely when updating the frame counter.
if znp.version == 3.0:
await znp.nvram.osal_write_table(
start_nvid=OsalNvIds.LEGACY_NWK_SEC_MATERIAL_TABLE_START,
end_nvid=OsalNvIds.LEGACY_NWK_SEC_MATERIAL_TABLE_END,
values=[entry],
fill_value=fill_entry,
)
else:
await znp.nvram.write_table(
item_id=ExNvIds.NWK_SEC_MATERIAL_TABLE,
values=[entry],
fill_value=fill_entry,
)
async def read_addr_manager_entries(znp: ZNP) -> typing.Sequence[t.AddrMgrEntry]:
if znp.version >= 3.30:
entries = [
entry
async for entry in znp.nvram.read_table(
item_id=ExNvIds.ADDRMGR,
item_type=t.AddrMgrEntry,
)
]
else:
entries = list(
await znp.nvram.osal_read(
OsalNvIds.ADDRMGR, item_type=t.AddressManagerTable
)
)
return entries
async def read_hashed_link_keys( # type:ignore[misc]
znp: ZNP, tclk_seed: t.KeyData
) -> typing.AsyncGenerator[zigpy.state.Key, None]:
if znp.version >= 3.30:
entries = znp.nvram.read_table(
item_id=ExNvIds.TCLK_TABLE,
item_type=t.TCLKDevEntry,
)
else:
entries = znp.nvram.osal_read_table(
start_nvid=OsalNvIds.LEGACY_TCLK_TABLE_START,
end_nvid=OsalNvIds.LEGACY_TCLK_TABLE_END,
item_type=t.TCLKDevEntry,
)
async for entry in entries:
if entry.extAddr == t.EUI64.convert("00:00:00:00:00:00:00:00"):
continue
# XXX: why do both of these types appear?
# assert entry.keyType == t.KeyType.NWK
# assert entry.keyType == t.KeyType.NONE
yield zigpy.state.Key(
key=compute_key(entry.extAddr, tclk_seed, entry.SeedShift_IcIndex),
tx_counter=entry.txFrmCntr,
rx_counter=entry.rxFrmCntr,
partner_ieee=entry.extAddr,
seq=0,
)
async def read_unhashed_link_keys(
znp: ZNP, addr_mgr_entries: typing.Sequence[t.AddrMgrEntry]
) -> typing.AsyncGenerator[zigpy.state.Key, None]:
if znp.version == 3.30:
link_key_offset_base = 0x0000
table = znp.nvram.read_table(
item_id=ExNvIds.APS_KEY_DATA_TABLE,
item_type=t.APSKeyDataTableEntry,
)
elif znp.version == 3.0:
link_key_offset_base = OsalNvIds.LEGACY_APS_LINK_KEY_DATA_START
table = znp.nvram.osal_read_table(
start_nvid=OsalNvIds.LEGACY_APS_LINK_KEY_DATA_START,
end_nvid=OsalNvIds.LEGACY_APS_LINK_KEY_DATA_END,
item_type=t.APSKeyDataTableEntry,
)
else:
return
aps_key_data_table = [entry async for entry in table]
# The link key table's size is dynamic so it has junk at the end
link_key_table_raw = await znp.nvram.osal_read(
OsalNvIds.APS_LINK_KEY_TABLE, item_type=t.Bytes
)
link_key_table = znp.nvram.deserialize(
link_key_table_raw, item_type=t.APSLinkKeyTable, allow_trailing=True
)
LOGGER.debug("Read APS link key table: %s", link_key_table)
for entry in link_key_table:
if entry.AuthenticationState != t.AuthenticationOption.AuthenticatedCBCK:
continue
key_table_entry = aps_key_data_table[entry.LinkKeyNvId - link_key_offset_base]
addr_mgr_entry = addr_mgr_entries[entry.AddressManagerIndex]
assert addr_mgr_entry.type & t.AddrMgrUserType.Security
yield zigpy.state.Key(
partner_ieee=addr_mgr_entry.extAddr,
key=key_table_entry.Key,
tx_counter=key_table_entry.TxFrameCounter,
rx_counter=key_table_entry.RxFrameCounter,
seq=0,
)
async def read_devices(
znp: ZNP, *, tclk_seed: t.KeyData | None
) -> typing.Sequence[StoredDevice]:
addr_mgr = await read_addr_manager_entries(znp)
devices = {}
for entry in addr_mgr:
if entry.extAddr in (
t.EUI64.convert("00:00:00:00:00:00:00:00"),
t.EUI64.convert("FF:FF:FF:FF:FF:FF:FF:FF"),
):
continue
elif entry.type == t.AddrMgrUserType.Default:
continue
elif entry.type in (
t.AddrMgrUserType.Assoc,
t.AddrMgrUserType.Assoc | t.AddrMgrUserType.Security,
t.AddrMgrUserType.Security,
):
is_child = bool(entry.type & t.AddrMgrUserType.Assoc)
devices[entry.extAddr] = StoredDevice(
node_info=zigpy.state.NodeInfo(
nwk=entry.nwkAddr,
ieee=entry.extAddr,
logical_type=(
zdo_t.LogicalType.EndDevice
if is_child
else zdo_t.LogicalType.Router
),
),
key=None,
is_child=is_child,
)
else:
raise ValueError(f"Unexpected entry type: {entry.type}")
async for key in read_hashed_link_keys(znp, tclk_seed):
if key.partner_ieee not in devices:
LOGGER.warning(
"Skipping hashed link key %s (tx: %s, rx: %s) for unknown device %s",
":".join(f"{b:02x}" for b in key.key),
key.tx_counter,
key.rx_counter,
key.partner_ieee,
)
continue
devices[key.partner_ieee] = devices[key.partner_ieee].replace(key=key)
async for key in read_unhashed_link_keys(znp, addr_mgr):
if key.partner_ieee not in devices:
LOGGER.warning(
"Skipping unhashed link key %s (tx: %s, rx: %s) for unknown device %s",
":".join(f"{b:02x}" for b in key.key),
key.tx_counter,
key.rx_counter,
key.partner_ieee,
)
continue
devices[key.partner_ieee] = devices[key.partner_ieee].replace(key=key)
return list(devices.values())
async def write_addr_manager_entries(
znp: ZNP, entries: typing.Sequence[t.AddrMgrEntry]
) -> None:
if znp.version >= 3.30:
await znp.nvram.write_table(
item_id=ExNvIds.ADDRMGR,
values=entries,
fill_value=const.EMPTY_ADDR_MGR_ENTRY_ZSTACK3,
)
return
# On older devices this "table" is a single array in NVRAM whose size is dependent
# on compile-time constants
old_entries = await znp.nvram.osal_read(
OsalNvIds.ADDRMGR, item_type=t.AddressManagerTable
)
if znp.version >= 3.30:
new_entries = len(old_entries) * [const.EMPTY_ADDR_MGR_ENTRY_ZSTACK3]
else:
new_entries = len(old_entries) * [const.EMPTY_ADDR_MGR_ENTRY_ZSTACK1]
# Purposefully throw an `IndexError` if we are trying to write too many entries
for index, entry in enumerate(entries):
new_entries[index] = entry
await znp.nvram.osal_write(OsalNvIds.ADDRMGR, t.AddressManagerTable(new_entries))
def find_optimal_tclk_seed(
devices: typing.Sequence[StoredDevice], tclk_seed: t.KeyData
) -> t.KeyData:
keys = [d.key for d in devices if d.key]
if not keys:
return tclk_seed
best_count, best_seed = max(sorted(iter_seed_candidates(keys)))
tclk_count = count_seed_matches(keys, tclk_seed)
assert tclk_count <= best_count
# Prefer the existing TCLK seed if it's as good as the others
if tclk_count == best_count:
return tclk_seed
return best_seed
async def write_devices(
znp: ZNP,
devices: typing.Sequence[StoredDevice],
counter_increment: t.uint32_t = 2500,
tclk_seed: t.KeyData = None,
) -> t.KeyData:
hashed_link_key_table = []
aps_key_data_table = []
link_key_table = t.APSLinkKeyTable()
for index, dev in enumerate(devices):
if dev.key is None:
continue
shift = find_key_shift(dev.node_info.ieee, dev.key.key, tclk_seed)
if shift is not None:
# Hashed link keys can be written into the TCLK table
hashed_link_key_table.append(
t.TCLKDevEntry(
txFrmCntr=dev.key.tx_counter + counter_increment,
rxFrmCntr=dev.key.rx_counter,
extAddr=dev.node_info.ieee,
keyAttributes=t.KeyAttributes.VERIFIED_KEY,
keyType=t.KeyType.NONE,
SeedShift_IcIndex=shift,
)
)
else:
# Unhashed link keys are written to another table
aps_key_data_table.append(
t.APSKeyDataTableEntry(
Key=dev.key.key,
TxFrameCounter=dev.key.tx_counter + counter_increment,
RxFrameCounter=dev.key.rx_counter,
)
)
if znp.version >= 3.30:
start = 0x0000
else:
start = OsalNvIds.LEGACY_APS_LINK_KEY_DATA_START
offset = len(aps_key_data_table) - 1
# And their position within the above table is stored in this table
link_key_table.append(
t.APSLinkKeyTableEntry(
AddressManagerIndex=index,
LinkKeyNvId=start + offset,
AuthenticationState=t.AuthenticationOption.AuthenticatedCBCK,
)
)
addr_mgr_entries = []
for dev in devices:
entry = t.AddrMgrEntry(
type=t.AddrMgrUserType.Default,
nwkAddr=dev.node_info.nwk,
extAddr=dev.node_info.ieee,
)
if dev.key is not None:
entry.type |= t.AddrMgrUserType.Security
if dev.is_child:
entry.type |= t.AddrMgrUserType.Assoc
addr_mgr_entries.append(entry)
await write_addr_manager_entries(znp, addr_mgr_entries)
# Z-Stack Home 1.2 does not store keys
if znp.version < 3.0:
return
# Make sure the new table is the same size as the old table. Because this type is
# prefixed by the number of entries, the trailing table bytes are not kept track of
# but still necessary, as the table has a static maximum capacity.
old_link_key_table = await znp.nvram.osal_read(
OsalNvIds.APS_LINK_KEY_TABLE, item_type=t.Bytes
)
unpadded_link_key_table = znp.nvram.serialize(link_key_table)
new_link_key_table_value = unpadded_link_key_table.ljust(
len(old_link_key_table), b"\x00"
)
if len(new_link_key_table_value) > len(old_link_key_table):
raise RuntimeError("New link key table is larger than the current one")
await znp.nvram.osal_write(OsalNvIds.APS_LINK_KEY_TABLE, new_link_key_table_value)
tclk_fill_value = t.TCLKDevEntry(
txFrmCntr=0,
rxFrmCntr=0,
extAddr=t.EUI64.convert("00:00:00:00:00:00:00:00"),
keyAttributes=t.KeyAttributes.DEFAULT_KEY,
keyType=t.KeyType.NONE,
SeedShift_IcIndex=0,
)
aps_key_data_fill_value = t.APSKeyDataTableEntry(
Key=t.KeyData.convert("00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00"),
TxFrameCounter=0,
RxFrameCounter=0,
)
if znp.version > 3.0:
await znp.nvram.write_table(
item_id=ExNvIds.TCLK_TABLE,
values=hashed_link_key_table,
fill_value=tclk_fill_value,
)
await znp.nvram.write_table(
item_id=ExNvIds.APS_KEY_DATA_TABLE,
values=aps_key_data_table,
fill_value=aps_key_data_fill_value,
)
else:
await znp.nvram.osal_write_table(
start_nvid=OsalNvIds.LEGACY_TCLK_TABLE_START,
end_nvid=OsalNvIds.LEGACY_TCLK_TABLE_END,
values=hashed_link_key_table,
fill_value=tclk_fill_value,
)
await znp.nvram.osal_write_table(
start_nvid=OsalNvIds.LEGACY_APS_LINK_KEY_DATA_START,
end_nvid=OsalNvIds.LEGACY_APS_LINK_KEY_DATA_END,
values=aps_key_data_table,
fill_value=aps_key_data_fill_value,
) | zigpy-znp | /zigpy-znp-0.11.4.tar.gz/zigpy-znp-0.11.4/zigpy_znp/znp/security.py | security.py |
# zigpy
[](https://github.com/zigpy/zigpy/workflows/CI/badge.svg?branch=dev)
[](https://codecov.io/gh/zigpy/zigpy)
**[zigpy](https://github.com/zigpy/zigpy)** is a hardware independent **[Zigbee protocol stack](https://en.wikipedia.org/wiki/Zigbee)** integration project to implement **[Zigbee](https://www.zigbee.org/)** standard specifications as a Python 3 library.
Zigbee integration via zigpy allows you to connect one of many off-the-shelf Zigbee Coordinator adapters using one of the available Zigbee radio library modules compatible with zigpy to control Zigbee based devices. There is currently support for controlling Zigbee device types such as binary sensors (e.g., motion and door sensors), sensors (e.g., temperature sensors), lights, switches, buttons, covers, fans, climate control equipment, locks, and intruder alarm system devices. Note that Zigbee Green Power devices [currently are unsupported](https://github.com/zigpy/zigpy/issues/341).
Zigbee stacks and hardware from many different hardware chip manufacturers are supported via radio libraries which translate their proprietary communication protocol into a common API which is shared among all radio libraries for zigpy. If some Zigbee stack or Zigbee Coordinator hardware for other manufacturers is not supported by yet zigpy it is possible for any independent developer to step-up and develop a new radio library for zigpy which translates its proprietary communication protocol into the common API that zigpy can understand.
zigpy contains common code implementing ZCL (Zigbee Cluster Library) and ZDO (Zigbee Device Object) application state management which is being used by various radio libraries implementing the actual interface with the radio modules from different manufacturers. The separate radio libraries interface with radio hardware adapters/modules over USB and GPIO using different native UART serial protocols.
The **[ZHA integration component for Home Assistant](https://www.home-assistant.io/integrations/zha/)**, the [Zigbee Plugin for Domoticz](https://www.domoticz.com/wiki/ZigbeeForDomoticz), and the [Zigbee Plugin for Jeedom](https://doc.jeedom.com/en_US/plugins/automation%20protocol/zigbee/) (competing open-source home automation software) are all using [zigpy libraries](https://github.com/zigpy/) as dependencies, as such they could be used as references of different implementations if looking to integrate a Zigbee solution into your application.
### Zigbee device OTA updates
zigpy have ability to download and perform Zigbee OTAU (Over-The-Air Updates) of Zigbee devices firmware. The Zigbee OTA update firmware image files should conform to standard Zigbee OTA format and OTA provider source URLs need to be published for public availability. Updates from a local OTA update directory also is also supported and can be used as an option for offline firmware updates if user provide correct Zigbee OTA formatted firmware files themselves.
Support for automatic download from existing online OTA providers in zigpy OTA provider code is currently only available for IKEA, Inovelli, LEDVANCE/OSRAM, SALUS/Computime, and SONOFF/ITEAD devices. Support for additional OTA providers for other manufacturers devices could be added to zigpy in the future, if device manufacturers publish their firmware images publicly and developers contribute the needed download code for them.
## How to install and test, report bugs, or contribute to this project
For specific instructions on how-to install and test zigpy or contribute bug-reports and code to this project please see the guidelines in the CONTRIBUTING.md file:
- [Guidelines in CONTRIBUTING.md](./CONTRIBUTING.md)
This CONTRIBUTING.md file will contain information about using zigpy, testing new releases, troubleshooting and bug-reporting as, as well as library + code instructions for developers and more. This file also contain short summeries and links to other related projects that directly or indirectly depends in zigpy libraries.
You can contribute to this project either as an end-user, a tester (advanced user contributing constructive issue/bug-reports) or as a developer contributing code.
## Compatible Zigbee coordinator hardware
Radio libraries for zigpy are separate projects with their own repositories and include **[bellows](https://github.com/zigpy/bellows)** (for communicating with Silicon Labs EmberZNet based radios), **[zigpy-deconz](https://github.com/zigpy/zigpy-deconz)** (for communicating with deCONZ based radios from Dresden Elektronik), and **[zigpy-xbee](https://github.com/zigpy/zigpy-xbee)** (for communicating with XBee based Zigbee radios), **[zigpy-zigate](https://github.com/zigpy/zigpy-zigate)** for communicating with ZiGate based radios, **[zigpy-znp](https://github.com/zha-ng/zigpy-znp)** or **[zigpy-cc](https://github.com/zigpy/zigpy-cc)** for communicating with Texas Instruments based radios that have Z-Stack ZNP coordinator firmware.
Note! Zigbee 3.0 support or not in zigpy depends primarily on your Zigbee coordinator hardware and its firmware. Some Zigbee coordinator hardware support Zigbee 3.0 but might be shipped with an older firmware which does not, in which case may want to upgrade the firmware manually yourself. Some other Zigbee coordinator hardware may not support a firmware that is capable of Zigbee 3.0 at all but can still be fully functional and feature complete for your needs, (this is very common as many if not most Zigbee devices do not yet Zigbee 3.0 or are backwards-compable with a Zigbee profile that is support by your Zigbee coordinator hardware and its firmware). As a general rule, newer Zigbee coordinator hardware released can normally support Zigbee 3.0 firmware and it is up to its manufacturer to make such firmware available for them.
### Compatible zigpy radio libraries
- **Digi XBee** based Zigbee radios via the [zigpy-xbee](https://github.com/zigpy/zigpy-xbee) library for zigpy.
- **dresden elektronik** deCONZ based Zigbee radios via the [zigpy-deconz](https://github.com/zigpy/zigpy-deconz) library for zigpy.
- **Silicon Labs** (EmberZNet) based Zigbee radios using the EZSP protocol via the [bellows](https://github.com/zigpy/bellows) library for zigpy.
- **Texas Instruments** based Zigbee radios with all compatible Z-Stack firmware via the [zigpy-znp](https://github.com/zha-ng/zigpy-znp) library for zigpy.
- **ZiGate** based ZigBee radios via the [zigpy-zigate](https://github.com/zigpy/zigpy-zigate) library for zigpy.
### Legacy or obsolete zigpy radio libraries
- Texas Instruments with Z-Stack legacy firmware via the [zigpy-cc](https://github.com/zigpy/zigpy-cc) library for zigpy.
## Release packages available via PyPI
New packages of tagged versions are also released via the "zigpy" project on PyPI
- https://pypi.org/project/zigpy/
- https://pypi.org/project/zigpy/#history
- https://pypi.org/project/zigpy/#files
Older packages of tagged versions are still available on the "zigpy-homeassistant" project on PyPI
- https://pypi.org/project/zigpy-homeassistant/
Packages of tagged versions of the radio libraries are released via separate projects on PyPI
- https://pypi.org/project/zigpy/
- https://pypi.org/project/bellows/
- https://pypi.org/project/zigpy-cc/
- https://pypi.org/project/zigpy-deconz/
- https://pypi.org/project/zigpy-xbee/
- https://pypi.org/project/zigpy-zigate/
- https://pypi.org/project/zigpy-znp/
| zigpy | /zigpy-0.57.1.tar.gz/zigpy-0.57.1/README.md | README.md |
from zigzag.classes.stages import *
import argparse
def main():
# Get the onnx model, the mapping and accelerator arguments
parser = argparse.ArgumentParser(description="Setup zigzag inputs")
parser.add_argument('--model', metavar='path', required=True, help='path to onnx model, e.g. inputs/examples/my_onnx_model.onnx')
parser.add_argument('--mapping', metavar='path', required=True, help='path to mapping file, e.g., inputs.examples.my_mapping')
parser.add_argument('--accelerator', metavar='path', required=True, help='module path to the accelerator, e.g. inputs.examples.accelerator1')
args = parser.parse_args()
# Initialize the logger
import logging as _logging
_logging_level = _logging.INFO
# _logging_format = '%(asctime)s - %(name)s.%(funcName)s +%(lineno)s - %(levelname)s - %(message)s'
_logging_format = '%(asctime)s - %(funcName)s +%(lineno)s - %(levelname)s - %(message)s'
_logging.basicConfig(level=_logging_level,
format=_logging_format)
# Initialize the MainStage which will start execution.
# The first argument of this init is the list of stages that will be executed in sequence.
# The second argument of this init are the arguments required for these different stages.
mainstage = MainStage([ # Initializes the MainStage as entry point
ONNXModelParserStage, # Parses the ONNX Model into the workload
AcceleratorParserStage, # Parses the accelerator
SimpleSaveStage, # Saves all received CMEs information to a json
WorkloadStage, # Iterates through the different layers in the workload
SpatialMappingConversionStage, # Generates multiple spatial mappings (SM)
MinimalLatencyStage, # Reduces all CMEs, returning minimal latency one
LomaStage, # Generates multiple temporal mappings (TM)
CostModelStage # Evaluates generated SM and TM through cost model
],
accelerator_path=args.accelerator, # required by AcceleratorParserStage
onnx_model_path=args.model, # required by ONNXModelParserStage
mapping_path=args.mapping, # required by ONNXModelParserStage
dump_filename_pattern="outputs/{datetime}.json", # output file save pattern
loma_lpf_limit=6, # required by LomaStage
loma_show_progress_bar=True, # shows a progress bar while iterating over temporal mappings
)
# Launch the MainStage
mainstage.run()
if __name__ == "__main__":
main() | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/__main__.py | __main__.py |
from zigzag.classes.stages import *
import re
def get_hardware_performance_zigzag(
workload,
accelerator,
mapping,
opt="latency",
dump_filename_pattern="outputs/{datetime}.json",
pickle_filename="outputs/list_of_cmes.pickle",
):
# Initialize the logger
import logging as _logging
_logging_level = _logging.INFO
_logging_format = (
"%(asctime)s - %(funcName)s +%(lineno)s - %(levelname)s - %(message)s"
)
_logging.basicConfig(level=_logging_level, format=_logging_format)
# Sanity check on the optimization criterion
if opt == "energy":
opt_stage = MinimalEnergyStage
elif opt == "latency":
opt_stage = MinimalLatencyStage
elif opt == "EDP":
opt_stage = MinimalEDPStage
else:
raise NotImplementedError(
"Optimization criterion 'opt' should be either 'energy' or 'latency' or 'EDP'."
)
# Check workload format and based on it select the correct workload parser stage
try:
if workload.split(".")[-1] == "onnx":
workload_parser_stage = ONNXModelParserStage
else:
workload_parser_stage = WorkloadParserStage
except:
workload_parser_stage = WorkloadParserStage
mainstage = MainStage(
[ # Initialize the MainStage as entry point
workload_parser_stage, # Parse the ONNX Model into the workload
AcceleratorParserStage, # Parse the accelerator module/passthrough given accelerator
SimpleSaveStage, # Save the summed CME energy and latency to a json
PickleSaveStage, # Save all received CMEs in a list to a pickle file
SumStage, # Sum up the received best CME across all layers of the workload
WorkloadStage, # Iterate through the different layers in the workload
CompleteSaveStage, # Save each processed layer to a json
opt_stage, # Reduce all CMEs, returning minimal energy/latency one
SpatialMappingGeneratorStage, # Generate multiple spatial mappings (SM)
opt_stage, # Reduce all CMEs, returning minimal energy/latency one
LomaStage, # Generate multiple temporal mappings (TM)
# TemporalOrderingConversionStage, # Based on the fixed temporal mapping order, generate one temporal mapping (TM)
CostModelStage, # Evaluate generated SM and TM through cost model
],
accelerator=accelerator, # required by AcceleratorParserStage
workload=workload, # required by workload_parser_stage
mapping=mapping, # required by workload_parser_stage
dump_filename_pattern=dump_filename_pattern, # output file save pattern
pickle_filename=pickle_filename, # filename for pickled list of cmes
loma_lpf_limit=6, # required by LomaStage
loma_show_progress_bar=True,
# If we need access the same input data multiple times from the innermost memory level and the data size is smaller than the memory read bw,
# take into account only one-time access cost (assume the data can stay at the output pins of the memory as long as it is needed).
# By default, if the parameter is not defined, it will be set as False internally.
access_same_data_considered_as_no_access=True,
)
# Launch the MainStage
answers = mainstage.run()
# Get CME from answer
cmes = answers
return cmes[0][0].energy_total, cmes[0][0].latency_total2, cmes
def get_hardware_performance_zigzag_pe_array_scaling(
workload,
accelerator,
mapping,
pe_array_scaling,
opt="latency",
dump_filename_pattern="outputs/{datetime}.json",
pickle_filename="outputs/list_of_cmes.pickle",
):
# Initialize the logger
import logging as _logging
_logging_level = _logging.INFO
_logging_format = (
"%(asctime)s - %(funcName)s +%(lineno)s - %(levelname)s - %(message)s"
)
_logging.basicConfig(level=_logging_level, format=_logging_format)
# Sanity check on the optimization criterion
if opt == "energy":
opt_stage = MinimalEnergyStage
elif opt == "latency":
opt_stage = MinimalLatencyStage
elif opt == "EDP":
opt_stage = MinimalEDPStage
else:
raise NotImplementedError(
"Optimization criterion 'opt' should be either 'energy' or 'latency' or 'EDP'."
)
# Check workload format and based on it select the correct workload parser stage
try:
if workload.split(".")[-1] == "onnx":
workload_parser_stage = ONNXModelParserStage
else:
workload_parser_stage = WorkloadParserStage
except:
workload_parser_stage = WorkloadParserStage
mainstage = MainStage(
[ # Initialize the MainStage as entry point
workload_parser_stage, # Parse the ONNX Model into the workload
AcceleratorParserStage, # Parse the accelerator module/passthrough given accelerator
PEArrayScalingStage, # Scale the PE array of the given accelerator
SimpleSaveStage, # Save the summed CME energy and latency to a json
PickleSaveStage, # Save all received CMEs in a list to a pickle file
SumStage, # Sum up the received best CME across all layers of the workload
WorkloadStage, # Iterate through the different layers in the workload
CompleteSaveStage, # Save each processed layer to a json
opt_stage, # Reduce all CMEs, returning minimal energy/latency one
SpatialMappingGeneratorStage, # Generate multiple spatial mappings (SM)
opt_stage, # Reduce all CMEs, returning minimal energy/latency one
LomaStage, # Generate multiple temporal mappings (TM)
# TemporalOrderingConversionStage, # Based on the fixed temporal mapping order, generate one temporal mapping (TM)
CostModelStage, # Evaluate generated SM and TM through cost model
],
accelerator=accelerator, # required by AcceleratorParserStage
workload=workload, # required by workload_parser_stage
mapping=mapping, # required by workload_parser_stage
dump_filename_pattern=dump_filename_pattern, # output file save pattern
pickle_filename=pickle_filename, # filename for pickled list of cmes
loma_lpf_limit=6, # required by LomaStage
loma_show_progress_bar=True,
# If we need access the same input data multiple times from the innermost memory level and the data size is smaller than the memory read bw,
# take into account only one-time access cost (assume the data can stay at the output pins of the memory as long as it is needed).
# By default, if the parameter is not defined, it will be set as False internally.
access_same_data_considered_as_no_access=True,
pe_array_scaling=pe_array_scaling,
)
# Launch the MainStage
answers = mainstage.run()
# Get CME from answer
cmes = answers
return cmes[0][0].energy_total, cmes[0][0].latency_total2, cmes
if __name__ == "__main__":
workload = "zigzag/inputs/examples/workload/mobilenetv2.onnx"
# workload = 'inputs.examples.workload.resnet18'
accelerator = "zigzag.inputs.examples.hardware.TPU_like"
mapping = "zigzag.inputs.examples.mapping.tpu_like"
hw_name = accelerator.split(".")[-1]
wl_name = re.split(r"/|\.", workload)[-1]
if wl_name == "onnx":
wl_name = re.split(r"/|\.", workload)[-2]
experiment_id = f"{hw_name}-{wl_name}"
pkl_name = f"{experiment_id}-saved_list_of_cmes"
answer = get_hardware_performance_zigzag_pe_array_scaling(
workload,
accelerator,
mapping,
pe_array_scaling=2,
opt="EDP",
dump_filename_pattern=f"outputs/{experiment_id}-layer_?.json",
pickle_filename=f"outputs/{pkl_name}.pickle",
)
# print(f'Answer = {answer}')
# import pickle
# path = f"outputs/{pkl_name}.pickle"
# with open(path, 'rb') as f:
# data = pickle.load(f)
# f.close() | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/api.py | api.py |
workload = {
0: { # conv1, stride 2
"operator_type": "Conv",
"equation": "O[b][k][oy][ox]+=W[k][c][fy][fx]*I[b][c][iy][ix]",
"dimension_relations": ["ix=2*ox+1*fx", "iy=2*oy+1*fy"],
"loop_dim_size": {
"B": 1,
"K": 64,
"C": 3,
"OY": 112,
"OX": 112,
"FY": 7,
"FX": 7,
},
"operand_precision": {"O": 16, "O_final": 8, "W": 8, "I": 8},
"operand_source": {"W": [], "I": []},
"constant_operands": ["I", "W"],
},
1: { # max pool, stride 2
"operator_type": "Pooling",
"equation": "O[b][g][oy][ox]+=W[fx][fy]*I[b][g][iy][ix]",
"dimension_relations": ["ix=2*ox+1*fx", "iy=2*oy+1*fy"],
"loop_dim_size": {"B": 1, "G": 64, "OY": 56, "OX": 56, "FX": 3, "FY": 3},
"operand_precision": {"O": 16, "O_final": 8, "I": 8, "W": 0},
"operand_source": {"W": [], "I": [0]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "G": "K"}},
},
2: { # conv2_1
"operator_type": "Conv",
"equation": "O[b][k][oy][ox]+=W[k][c][fy][fx]*I[b][c][iy][ix]",
"dimension_relations": ["ix=1*ox+1*fx", "iy=1*oy+1*fy"],
"loop_dim_size": {
"B": 1,
"K": 64,
"C": 64,
"OY": 56,
"OX": 56,
"FY": 3,
"FX": 3,
},
"operand_precision": {"O": 16, "O_final": 8, "W": 8, "I": 8},
"operand_source": {"W": [], "I": [1]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "C": "G"}},
},
3: { # conv2_2
"operator_type": "Conv",
"equation": "O[b][k][oy][ox]+=W[k][c][fy][fx]*I[b][c][iy][ix]",
"dimension_relations": ["ix=1*ox+1*fx", "iy=1*oy+1*fy"],
"loop_dim_size": {
"B": 1,
"K": 64,
"C": 64,
"OY": 56,
"OX": 56,
"FY": 3,
"FX": 3,
},
"operand_precision": {"O": 16, "O_final": 8, "W": 8, "I": 8},
"operand_source": {"W": [], "I": [2]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "C": "K"}},
},
4: { # Addition of layer 1 (residual path) and layer 3 (main path)
"operator_type": "Add",
"equation": "O[b][g][oy][ox]=X[b][g][oy][ox]+Y[b][g][oy][ox]",
"dimension_relations": [],
"loop_dim_size": {"B": 1, "G": 64, "OY": 56, "OX": 56},
"operand_precision": {"O": 16, "O_final": 8, "X": 8, "Y": 8},
"operand_source": {"X": [1], "Y": [3]},
"constant_operands": [],
"operand_source_dimension_mapping": {
"X": {"OX": "OX", "OY": "OY", "G": "K"},
"Y": {"OX": "OX", "OY": "OY", "G": "K"},
},
},
5: { # conv2_3
"operator_type": "Conv",
"equation": "O[b][k][oy][ox]+=W[k][c][fy][fx]*I[b][c][iy][ix]",
"dimension_relations": ["ix=1*ox+1*fx", "iy=1*oy+1*fy"],
"loop_dim_size": {
"B": 1,
"K": 64,
"C": 64,
"OY": 56,
"OX": 56,
"FY": 3,
"FX": 3,
},
"operand_precision": {"O": 16, "O_final": 8, "W": 8, "I": 8},
"operand_source": {"W": [], "I": [4]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "C": "G"}},
},
6: { # conv2_4
"operator_type": "Conv",
"equation": "O[b][k][oy][ox]+=W[k][c][fy][fx]*I[b][c][iy][ix]",
"dimension_relations": ["ix=1*ox+1*fx", "iy=1*oy+1*fy"],
"loop_dim_size": {
"B": 1,
"K": 64,
"C": 64,
"OY": 56,
"OX": 56,
"FY": 3,
"FX": 3,
},
"operand_precision": {"O": 16, "O_final": 8, "W": 8, "I": 8},
"operand_source": {"W": [], "I": [5]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "C": "K"}},
},
7: { # Addition of layer 4 (residual connection) and layer 6 (main path)
"operator_type": "Add",
"equation": "O[b][g][oy][ox]=X[b][g][oy][ox]+Y[b][g][oy][ox]",
"dimension_relations": [],
"loop_dim_size": {"B": 1, "G": 64, "OY": 56, "OX": 56},
"operand_precision": {"O": 16, "O_final": 8, "X": 8, "Y": 8},
"operand_source": {"X": [4], "Y": [6]},
"constant_operands": [],
"operand_source_dimension_mapping": {
"X": {"OX": "OX", "OY": "OY", "G": "G"},
"Y": {"OX": "OX", "OY": "OY", "G": "K"},
},
},
8: { # conv3_1, stride 2
"operator_type": "Conv",
"equation": "O[b][k][oy][ox]+=W[k][c][fy][fx]*I[b][c][iy][ix]",
"dimension_relations": ["ix=2*ox+1*fx", "iy=2*oy+1*fy"],
"loop_dim_size": {
"B": 1,
"K": 128,
"C": 64,
"OY": 28,
"OX": 28,
"FY": 3,
"FX": 3,
},
"operand_precision": {"O": 16, "O_final": 8, "W": 8, "I": 8},
"operand_source": {"W": [], "I": [7]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "C": "G"}},
},
9: { # conv3_2
"operator_type": "Conv",
"equation": "O[b][k][oy][ox]+=W[k][c][fy][fx]*I[b][c][iy][ix]",
"dimension_relations": ["ix=1*ox+1*fx", "iy=1*oy+1*fy"],
"loop_dim_size": {
"B": 1,
"K": 128,
"C": 128,
"OY": 28,
"OX": 28,
"FY": 3,
"FX": 3,
},
"operand_precision": {"O": 16, "O_final": 8, "W": 8, "I": 8},
"operand_source": {"W": [], "I": [8]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "C": "K"}},
},
10: { # conv downsample of layer 7
"operator_type": "Conv_downsample",
"equation": "O[b][k][oy][ox]+=W[k][c][fy][fx]*I[b][c][iy][ix]",
"dimension_relations": ["ix=2*ox+1*fx", "iy=2*oy+1*fy"],
"loop_dim_size": {
"B": 1,
"K": 128,
"C": 64,
"OY": 28,
"OX": 28,
"FY": 3,
"FX": 3,
},
"operand_precision": {"O": 16, "O_final": 8, "W": 8, "I": 8},
"operand_source": {"W": [], "I": [7]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "C": "G"}},
},
11: { # Addition of layer 10 (residual connection) and layer 9 (main path)
"operator_type": "Add",
"equation": "O[b][g][oy][ox]=X[b][g][oy][ox]+Y[b][g][oy][ox]",
"dimension_relations": [],
"loop_dim_size": {"B": 1, "G": 128, "OY": 28, "OX": 28},
"operand_precision": {"O": 16, "O_final": 8, "X": 8, "Y": 8},
"operand_source": {"X": [10], "Y": [9]},
"constant_operands": [],
"operand_source_dimension_mapping": {
"X": {"OX": "OX", "OY": "OY", "G": "K"},
"Y": {"OX": "OX", "OY": "OY", "G": "K"},
},
},
12: { # conv3_3
"operator_type": "Conv",
"equation": "O[b][k][oy][ox]+=W[k][c][fy][fx]*I[b][c][iy][ix]",
"dimension_relations": ["ix=1*ox+1*fx", "iy=1*oy+1*fy"],
"loop_dim_size": {
"B": 1,
"K": 128,
"C": 128,
"OY": 28,
"OX": 28,
"FY": 3,
"FX": 3,
},
"operand_precision": {"O": 16, "O_final": 8, "W": 8, "I": 8},
"operand_source": {"W": [], "I": [11]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "C": "G"}},
},
13: { # conv3_4
"operator_type": "Conv",
"equation": "O[b][k][oy][ox]+=W[k][c][fy][fx]*I[b][c][iy][ix]",
"dimension_relations": ["ix=1*ox+1*fx", "iy=1*oy+1*fy"],
"loop_dim_size": {
"B": 1,
"K": 128,
"C": 128,
"OY": 28,
"OX": 28,
"FY": 3,
"FX": 3,
},
"operand_precision": {"O": 16, "O_final": 8, "W": 8, "I": 8},
"operand_source": {"W": [], "I": [12]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "C": "K"}},
},
14: { # Addition of layer 11 (residual connection) and layer 13 (main path)
"operator_type": "Add",
"equation": "O[b][g][oy][ox]=X[b][g][oy][ox]+Y[b][g][oy][ox]",
"dimension_relations": [],
"loop_dim_size": {"B": 1, "G": 128, "OY": 28, "OX": 28},
"operand_precision": {"O": 16, "O_final": 8, "X": 8, "Y": 8},
"operand_source": {"X": [11], "Y": [13]},
"constant_operands": [],
"operand_source_dimension_mapping": {
"X": {"OX": "OX", "OY": "OY", "G": "G"},
"Y": {"OX": "OX", "OY": "OY", "G": "K"},
},
},
15: { # conv4_1, stride 2
"operator_type": "Conv",
"equation": "O[b][k][oy][ox]+=W[k][c][fy][fx]*I[b][c][iy][ix]",
"dimension_relations": ["ix=2*ox+1*fx", "iy=2*oy+1*fy"],
"loop_dim_size": {
"B": 1,
"K": 256,
"C": 128,
"OY": 14,
"OX": 14,
"FY": 3,
"FX": 3,
},
"operand_precision": {"O": 16, "O_final": 8, "W": 8, "I": 8},
"operand_source": {"W": [], "I": [14]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "C": "G"}},
},
16: { # conv4_2
"operator_type": "Conv",
"equation": "O[b][k][oy][ox]+=W[k][c][fy][fx]*I[b][c][iy][ix]",
"dimension_relations": ["ix=1*ox+1*fx", "iy=1*oy+1*fy"],
"loop_dim_size": {
"B": 1,
"K": 256,
"C": 256,
"OY": 14,
"OX": 14,
"FY": 3,
"FX": 3,
},
"operand_precision": {"O": 16, "O_final": 8, "W": 8, "I": 8},
"operand_source": {"W": [], "I": [15]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "C": "K"}},
},
17: { # conv downsample of layer 14
"operator_type": "Conv_downsample",
"equation": "O[b][k][oy][ox]+=W[k][c][fy][fx]*I[b][c][iy][ix]",
"dimension_relations": ["ix=2*ox+1*fx", "iy=2*oy+1*fy"],
"loop_dim_size": {
"B": 1,
"K": 256,
"C": 128,
"OY": 14,
"OX": 14,
"FY": 3,
"FX": 3,
},
"operand_precision": {"O": 16, "O_final": 8, "W": 8, "I": 8},
"operand_source": {"W": [], "I": [14]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "C": "G"}},
},
18: { # Addition of layer 17 (residual connection) and layer 16 (main path)
"operator_type": "Add",
"equation": "O[b][g][oy][ox]=X[b][g][oy][ox]+Y[b][g][oy][ox]",
"dimension_relations": [],
"loop_dim_size": {"B": 1, "G": 256, "OY": 14, "OX": 14},
"operand_precision": {"O": 16, "O_final": 8, "X": 8, "Y": 8},
"operand_source": {"X": [17], "Y": [16]},
"constant_operands": [],
"operand_source_dimension_mapping": {
"X": {"OX": "OX", "OY": "OY", "G": "K"},
"Y": {"OX": "OX", "OY": "OY", "G": "K"},
},
},
19: { # conv4_3
"operator_type": "Conv",
"equation": "O[b][k][oy][ox]+=W[k][c][fy][fx]*I[b][c][iy][ix]",
"dimension_relations": ["ix=1*ox+1*fx", "iy=1*oy+1*fy"],
"loop_dim_size": {
"B": 1,
"K": 256,
"C": 256,
"OY": 14,
"OX": 14,
"FY": 3,
"FX": 3,
},
"operand_precision": {"O": 16, "O_final": 8, "W": 8, "I": 8},
"operand_source": {"W": [], "I": [18]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "C": "G"}},
},
20: { # conv4_4
"operator_type": "Conv",
"equation": "O[b][k][oy][ox]+=W[k][c][fy][fx]*I[b][c][iy][ix]",
"dimension_relations": ["ix=1*ox+1*fx", "iy=1*oy+1*fy"],
"loop_dim_size": {
"B": 1,
"K": 256,
"C": 256,
"OY": 14,
"OX": 14,
"FY": 3,
"FX": 3,
},
"operand_precision": {"O": 16, "O_final": 8, "W": 8, "I": 8},
"operand_source": {"W": [], "I": [19]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "C": "K"}},
},
21: { # Addition of layer 18 (residual connection) and layer 20 (main path)
"operator_type": "Add",
"equation": "O[b][g][oy][ox]=X[b][g][oy][ox]+Y[b][g][oy][ox]",
"dimension_relations": [],
"loop_dim_size": {"B": 1, "G": 256, "OY": 14, "OX": 14},
"operand_precision": {"O": 16, "O_final": 8, "X": 8, "Y": 8},
"operand_source": {"X": [18], "Y": [20]},
"constant_operands": [],
"operand_source_dimension_mapping": {
"X": {"OX": "OX", "OY": "OY", "G": "G"},
"Y": {"OX": "OX", "OY": "OY", "G": "K"},
},
},
22: { # conv5_1, stride 2
"operator_type": "Conv",
"equation": "O[b][k][oy][ox]+=W[k][c][fy][fx]*I[b][c][iy][ix]",
"dimension_relations": ["ix=2*ox+1*fx", "iy=2*oy+1*fy"],
"loop_dim_size": {
"B": 1,
"K": 512,
"C": 256,
"OY": 7,
"OX": 7,
"FY": 3,
"FX": 3,
},
"operand_precision": {"O": 16, "O_final": 8, "W": 8, "I": 8},
"operand_source": {"W": [], "I": [21]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "C": "G"}},
},
23: { # conv5_2
"operator_type": "Conv",
"equation": "O[b][k][oy][ox]+=W[k][c][fy][fx]*I[b][c][iy][ix]",
"dimension_relations": ["ix=1*ox+1*fx", "iy=1*oy+1*fy"],
"loop_dim_size": {
"B": 1,
"K": 512,
"C": 512,
"OY": 7,
"OX": 7,
"FY": 3,
"FX": 3,
},
"operand_precision": {"O": 16, "O_final": 8, "W": 8, "I": 8},
"operand_source": {"W": [], "I": [22]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "C": "K"}},
},
24: { # conv downsample of layer 21
"operator_type": "Conv_downsample",
"equation": "O[b][k][oy][ox]+=W[k][c][fy][fx]*I[b][c][iy][ix]",
"dimension_relations": ["ix=2*ox+1*fx", "iy=2*oy+1*fy"],
"loop_dim_size": {
"B": 1,
"K": 512,
"C": 256,
"OY": 7,
"OX": 7,
"FY": 3,
"FX": 3,
},
"operand_precision": {"O": 16, "O_final": 8, "W": 8, "I": 8},
"operand_source": {"W": [], "I": [21]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "C": "G"}},
},
25: { # Addition of layer 24 (residual connection) and layer 23 (main path)
"operator_type": "Add",
"equation": "O[b][g][oy][ox]=X[b][g][oy][ox]+Y[b][g][oy][ox]",
"dimension_relations": [],
"loop_dim_size": {"B": 1, "G": 512, "OY": 7, "OX": 7},
"operand_precision": {"O": 16, "O_final": 8, "X": 8, "Y": 8},
"operand_source": {"X": [24], "Y": [23]},
"constant_operands": [],
"operand_source_dimension_mapping": {
"X": {"OX": "OX", "OY": "OY", "G": "G"},
"Y": {"OX": "OX", "OY": "OY", "G": "K"},
},
},
26: { # conv5_3
"operator_type": "Conv",
"equation": "O[b][k][oy][ox]+=W[k][c][fy][fx]*I[b][c][iy][ix]",
"dimension_relations": ["ix=1*ox+1*fx", "iy=1*oy+1*fy"],
"loop_dim_size": {
"B": 1,
"K": 512,
"C": 512,
"OY": 7,
"OX": 7,
"FY": 3,
"FX": 3,
},
"operand_precision": {"O": 16, "O_final": 8, "W": 8, "I": 8},
"operand_source": {"W": [], "I": [25]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "C": "G"}},
},
27: { # conv4_4
"operator_type": "Conv",
"equation": "O[b][k][oy][ox]+=W[k][c][fy][fx]*I[b][c][iy][ix]",
"dimension_relations": ["ix=1*ox+1*fx", "iy=1*oy+1*fy"],
"loop_dim_size": {
"B": 1,
"K": 512,
"C": 512,
"OY": 7,
"OX": 7,
"FY": 3,
"FX": 3,
},
"operand_precision": {"O": 16, "O_final": 8, "W": 8, "I": 8},
"operand_source": {"W": [], "I": [26]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "C": "K"}},
},
28: { # Addition of layer 25 (residual connection) and layer 27 (main path)
"operator_type": "Add",
"equation": "O[b][g][oy][ox]=X[b][g][oy][ox]+Y[b][g][oy][ox]",
"dimension_relations": [],
"loop_dim_size": {"B": 1, "G": 512, "OY": 7, "OX": 7},
"operand_precision": {"O": 16, "O_final": 8, "X": 8, "Y": 8},
"operand_source": {"X": [25], "Y": [27]},
"constant_operands": [],
"operand_source_dimension_mapping": {
"X": {"OX": "OX", "OY": "OY", "G": "G"},
"Y": {"OX": "OX", "OY": "OY", "G": "K"},
},
},
29: { # aver pool
"operator_type": "Pooling",
"equation": "O[b][g][oy][ox]+=W[fx][fy]*I[b][g][iy][ix]",
"dimension_relations": ["ix=1*ox+1*fx", "iy=1*oy+1*fy"],
"loop_dim_size": {"B": 1, "G": 512, "OY": 1, "OX": 1, "FX": 7, "FY": 7},
"operand_precision": {"O": 16, "O_final": 8, "I": 8, "W": 0},
"operand_source": {"W": [], "I": [28]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "G": "K"}},
},
30: { # fc
"operator_type": "Conv",
"equation": "O[b][k][oy][ox]+=W[k][c][fy][fx]*I[b][c][iy][ix]",
"dimension_relations": ["ix=1*ox+1*fx", "iy=1*oy+1*fy"],
"loop_dim_size": {
"B": 1,
"K": 1000,
"C": 512,
"OY": 1,
"OX": 1,
"FY": 1,
"FX": 1,
},
"operand_precision": {"O": 16, "O_final": 8, "W": 8, "I": 8},
"operand_source": {"W": [], "I": [29]},
"constant_operands": ["W"],
"operand_source_dimension_mapping": {"I": {"IX": "OX", "IY": "OY", "C": "G"}},
},
} | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/inputs/examples/workload/resnet18.py | resnet18.py |
import os
from zigzag.classes.hardware.architecture.memory_hierarchy import MemoryHierarchy
from zigzag.classes.hardware.architecture.memory_level import MemoryLevel
from zigzag.classes.hardware.architecture.operational_unit import Multiplier
from zigzag.classes.hardware.architecture.operational_array import MultiplierArray
from zigzag.classes.hardware.architecture.memory_instance import MemoryInstance
from zigzag.classes.hardware.architecture.accelerator import Accelerator
from zigzag.classes.hardware.architecture.core import Core
def memory_hierarchy_dut(multiplier_array):
"""Memory hierarchy variables"""
""" size=#bit, bw=(read bw, write bw), cost=(read word energy, write work energy) """
reg_IW1 = MemoryInstance(
name="rf_1B",
size=8,
r_bw=8,
w_bw=8,
r_cost=0.01,
w_cost=0.01,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
)
reg_O1 = MemoryInstance(
name="rf_2B",
size=16,
r_bw=16,
w_bw=16,
r_cost=0.02,
w_cost=0.02,
area=0,
r_port=2,
w_port=2,
rw_port=0,
latency=1,
)
##################################### on-chip memory hierarchy building blocks #####################################
sram_32KB_512_1r_1w = MemoryInstance(
name="sram_32KB",
size=32768 * 8,
r_bw=512,
w_bw=512,
r_cost=22.9,
w_cost=52.01,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
min_r_granularity=64,
min_w_granularity=64,
)
sram_2M_with_16_128K_bank_128_1r_1w = MemoryInstance(
name="sram_2MB",
size=131072 * 16 * 8,
r_bw=128 * 16,
w_bw=128 * 16,
r_cost=26.01 * 16,
w_cost=23.65 * 16,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
min_r_granularity=64,
min_w_granularity=64,
)
#######################################################################################################################
dram = MemoryInstance(
name="dram",
size=10000000000,
r_bw=64,
w_bw=64,
r_cost=700,
w_cost=750,
area=0,
r_port=0,
w_port=0,
rw_port=1,
latency=1,
)
memory_hierarchy_graph = MemoryHierarchy(operational_array=multiplier_array)
"""
fh: from high = wr_in_by_high
fl: from low = wr_in_by_low
th: to high = rd_out_to_high
tl: to low = rd_out_to_low
"""
# we don't have unrolled I-Reg to better support G unrolling
# memory_hierarchy_graph.add_memory(memory_instance=reg_IW1, operands=('I1',),
# port_alloc=({'fh': 'w_port_1', 'tl': 'r_port_1', 'fl': None, 'th': None},),
# served_dimensions={(0, 0, 0, 0)})
memory_hierarchy_graph.add_memory(
memory_instance=reg_IW1,
operands=("I2",),
port_alloc=({"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},),
served_dimensions={(0, 0, 1, 0), (0, 0, 0, 1)},
)
memory_hierarchy_graph.add_memory(
memory_instance=reg_O1,
operands=("O",),
port_alloc=(
{"fh": "w_port_1", "tl": "r_port_1", "fl": "w_port_2", "th": "r_port_2"},
),
served_dimensions={(0, 1, 0, 0)},
)
##################################### on-chip highest memory hierarchy initialization #####################################
memory_hierarchy_graph.add_memory(
memory_instance=sram_32KB_512_1r_1w,
operands=("I2",),
port_alloc=({"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},),
served_dimensions="all",
)
memory_hierarchy_graph.add_memory(
memory_instance=sram_2M_with_16_128K_bank_128_1r_1w,
operands=("I1", "O"),
port_alloc=(
{"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},
{"fh": "w_port_1", "tl": "r_port_1", "fl": "w_port_1", "th": "r_port_1"},
),
served_dimensions="all",
)
####################################################################################################################
memory_hierarchy_graph.add_memory(
memory_instance=dram,
operands=("I1", "I2", "O"),
port_alloc=(
{"fh": "rw_port_1", "tl": "rw_port_1", "fl": None, "th": None},
{"fh": "rw_port_1", "tl": "rw_port_1", "fl": None, "th": None},
{
"fh": "rw_port_1",
"tl": "rw_port_1",
"fl": "rw_port_1",
"th": "rw_port_1",
},
),
served_dimensions="all",
)
from zigzag.visualization.graph.memory_hierarchy import (
visualize_memory_hierarchy_graph,
)
# visualize_memory_hierarchy_graph(memory_hierarchy_graph)
return memory_hierarchy_graph
def multiplier_array_dut():
"""Multiplier array variables"""
multiplier_input_precision = [8, 8]
multiplier_energy = 0.04
multiplier_area = 1
dimensions = {
"D1": 8,
"D2": 8,
"D3": 4,
"D4": 4,
} # {'D1': ('K', 8), 'D2': ('C', 8), 'D3': ('OX', 4), 'D4': ('OY', 4),}
multiplier = Multiplier(
multiplier_input_precision, multiplier_energy, multiplier_area
)
multiplier_array = MultiplierArray(multiplier, dimensions)
return multiplier_array
def cores_dut():
multiplier_array1 = multiplier_array_dut()
memory_hierarchy1 = memory_hierarchy_dut(multiplier_array1)
core1 = Core(1, multiplier_array1, memory_hierarchy1)
return {core1}
cores = cores_dut()
acc_name = os.path.basename(__file__)[:-3]
accelerator = Accelerator(acc_name, cores) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/inputs/examples/hardware/Edge_TPU_like.py | Edge_TPU_like.py |
import os
from zigzag.classes.hardware.architecture.memory_hierarchy import MemoryHierarchy
from zigzag.classes.hardware.architecture.memory_level import MemoryLevel
from zigzag.classes.hardware.architecture.operational_unit import Multiplier
from zigzag.classes.hardware.architecture.operational_array import MultiplierArray
from zigzag.classes.hardware.architecture.memory_instance import MemoryInstance
from zigzag.classes.hardware.architecture.accelerator import Accelerator
from zigzag.classes.hardware.architecture.core import Core
def memory_hierarchy_dut(multiplier_array):
"""Memory hierarchy variables"""
""" size=#bit, bw=(read bw, write bw), cost=(read word energy, write work energy) """
reg_IW1 = MemoryInstance(
name="rf_1B",
size=8,
r_bw=8,
w_bw=8,
r_cost=0.01,
w_cost=0.01,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
)
reg_O1 = MemoryInstance(
name="rf_2B",
size=16,
r_bw=16,
w_bw=16,
r_cost=0.02,
w_cost=0.02,
area=0,
r_port=2,
w_port=2,
rw_port=0,
latency=1,
)
##################################### on-chip memory hierarchy building blocks #####################################
sram_64KB_with_8_8K_64_1r_1w = MemoryInstance(
name="sram_64KB",
size=8192 * 8 * 8,
r_bw=64 * 8,
w_bw=64 * 8,
r_cost=3.32 * 8,
w_cost=3.85 * 8,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
min_r_granularity=64,
min_w_granularity=64,
)
sram_32KB_with_4_8K_64_1r_1w = MemoryInstance(
name="sram_32KB",
size=8192 * 4 * 8,
r_bw=64 * 4,
w_bw=64 * 4,
r_cost=3.32 * 4,
w_cost=3.85 * 4,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
min_r_granularity=64,
min_w_granularity=64,
)
sram_1M_with_8_128K_bank_128_1r_1w_A = MemoryInstance(
name="sram_1MB_A",
size=131072 * 8 * 8,
r_bw=128 * 8,
w_bw=128 * 8,
r_cost=26.01 * 8,
w_cost=23.65 * 8,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
min_r_granularity=64,
min_w_granularity=64,
)
sram_1M_with_8_128K_bank_128_1r_1w_W = MemoryInstance(
name="sram_1MB_W",
size=131072 * 8 * 8,
r_bw=128 * 8,
w_bw=128 * 8,
r_cost=26.01 * 8,
w_cost=23.65 * 8,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
min_r_granularity=64,
min_w_granularity=64,
)
#######################################################################################################################
dram = MemoryInstance(
name="dram",
size=10000000000,
r_bw=64,
w_bw=64,
r_cost=700,
w_cost=750,
area=0,
r_port=0,
w_port=0,
rw_port=1,
latency=1,
)
memory_hierarchy_graph = MemoryHierarchy(operational_array=multiplier_array)
"""
fh: from high = wr_in_by_high
fl: from low = wr_in_by_low
th: to high = rd_out_to_high
tl: to low = rd_out_to_low
"""
# we don't have unrolled I-Reg to better support G unrolling
# memory_hierarchy_graph.add_memory(memory_instance=reg_IW1, operands=('I1',),
# port_alloc=({'fh': 'w_port_1', 'tl': 'r_port_1', 'fl': None, 'th': None},),
# served_dimensions={(0, 0, 0, 0)})
memory_hierarchy_graph.add_memory(
memory_instance=reg_IW1,
operands=("I2",),
port_alloc=({"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},),
served_dimensions={(0, 0, 1, 0), (0, 0, 0, 1)},
)
memory_hierarchy_graph.add_memory(
memory_instance=reg_O1,
operands=("O",),
port_alloc=(
{"fh": "w_port_1", "tl": "r_port_1", "fl": "w_port_2", "th": "r_port_2"},
),
served_dimensions={(0, 1, 0, 0)},
)
##################################### on-chip highest memory hierarchy initialization #####################################
memory_hierarchy_graph.add_memory(
memory_instance=sram_64KB_with_8_8K_64_1r_1w,
operands=("I2",),
port_alloc=({"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},),
served_dimensions="all",
)
memory_hierarchy_graph.add_memory(
memory_instance=sram_1M_with_8_128K_bank_128_1r_1w_W,
operands=("I2",),
port_alloc=({"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},),
served_dimensions="all",
)
memory_hierarchy_graph.add_memory(
memory_instance=sram_32KB_with_4_8K_64_1r_1w,
operands=("I1",),
port_alloc=({"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},),
served_dimensions="all",
)
memory_hierarchy_graph.add_memory(
memory_instance=sram_1M_with_8_128K_bank_128_1r_1w_A,
operands=("I1", "O"),
port_alloc=(
{"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},
{"fh": "w_port_1", "tl": "r_port_1", "fl": "w_port_1", "th": "r_port_1"},
),
served_dimensions="all",
)
####################################################################################################################
memory_hierarchy_graph.add_memory(
memory_instance=dram,
operands=("I1", "I2", "O"),
port_alloc=(
{"fh": "rw_port_1", "tl": "rw_port_1", "fl": None, "th": None},
{"fh": "rw_port_1", "tl": "rw_port_1", "fl": None, "th": None},
{
"fh": "rw_port_1",
"tl": "rw_port_1",
"fl": "rw_port_1",
"th": "rw_port_1",
},
),
served_dimensions="all",
)
from zigzag.visualization.graph.memory_hierarchy import (
visualize_memory_hierarchy_graph,
)
# visualize_memory_hierarchy_graph(memory_hierarchy_graph)
return memory_hierarchy_graph
def multiplier_array_dut():
"""Multiplier array variables"""
multiplier_input_precision = [8, 8]
multiplier_energy = 0.04
multiplier_area = 1
dimensions = {
"D1": 32,
"D2": 2,
"D3": 4,
"D4": 4,
} # {'D1': ('K', 32), 'D2': ('C', 2), 'D3': ('OX', 4), 'D4': ('OY', 4),}
multiplier = Multiplier(
multiplier_input_precision, multiplier_energy, multiplier_area
)
multiplier_array = MultiplierArray(multiplier, dimensions)
return multiplier_array
def cores_dut():
multiplier_array1 = multiplier_array_dut()
memory_hierarchy1 = memory_hierarchy_dut(multiplier_array1)
core1 = Core(1, multiplier_array1, memory_hierarchy1)
return {core1}
cores = cores_dut()
acc_name = os.path.basename(__file__)[:-3]
accelerator = Accelerator(acc_name, cores) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/inputs/examples/hardware/Meta_prototype.py | Meta_prototype.py |
import os
from zigzag.classes.hardware.architecture.memory_hierarchy import MemoryHierarchy
from zigzag.classes.hardware.architecture.memory_level import MemoryLevel
from zigzag.classes.hardware.architecture.operational_unit import Multiplier
from zigzag.classes.hardware.architecture.operational_array import MultiplierArray
from zigzag.classes.hardware.architecture.memory_instance import MemoryInstance
from zigzag.classes.hardware.architecture.accelerator import Accelerator
from zigzag.classes.hardware.architecture.core import Core
def memory_hierarchy_dut(multiplier_array):
"""Memory hierarchy variables"""
""" size=#bit, bw=(read bw, write bw), cost=(read word energy, write work energy) """
reg_W_128B = MemoryInstance(
name="rf_128B",
size=128 * 8,
r_bw=8,
w_bw=8,
r_cost=0.095,
w_cost=0.095,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
)
reg_O_2B = MemoryInstance(
name="rf_2B",
size=16,
r_bw=16,
w_bw=16,
r_cost=0.021,
w_cost=0.021,
area=0,
r_port=2,
w_port=2,
rw_port=0,
latency=1,
)
##################################### on-chip memory hierarchy building blocks #####################################
# sram_32KB_512_1r_1w = \
# MemoryInstance(name="sram_32KB", size=32768 * 8, r_bw=512, w_bw=512, r_cost=22.9, w_cost=52.01, area=0,
# r_port=1, w_port=1, rw_port=0, latency=1, min_r_granularity=64, min_w_granularity=64)
sram_2M_with_16_128K_bank_128_1r_1w = MemoryInstance(
name="sram_2MB",
size=131072 * 16 * 8,
r_bw=128 * 16,
w_bw=128 * 16,
r_cost=26.01 * 16,
w_cost=23.65 * 16,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
min_r_granularity=64,
min_w_granularity=64,
)
#######################################################################################################################
dram = MemoryInstance(
name="dram",
size=10000000000,
r_bw=64,
w_bw=64,
r_cost=700,
w_cost=750,
area=0,
r_port=0,
w_port=0,
rw_port=1,
latency=1,
)
memory_hierarchy_graph = MemoryHierarchy(operational_array=multiplier_array)
"""
fh: from high = wr_in_by_high
fl: from low = wr_in_by_low
th: to high = rd_out_to_high
tl: to low = rd_out_to_low
"""
memory_hierarchy_graph.add_memory(
memory_instance=reg_W_128B,
operands=("I2",),
port_alloc=({"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},),
served_dimensions={(0, 0)},
)
memory_hierarchy_graph.add_memory(
memory_instance=reg_O_2B,
operands=("O",),
port_alloc=(
{"fh": "w_port_1", "tl": "r_port_1", "fl": "w_port_2", "th": "r_port_2"},
),
served_dimensions={(0, 1)},
)
##################################### on-chip highest memory hierarchy initialization #####################################
memory_hierarchy_graph.add_memory(
memory_instance=sram_2M_with_16_128K_bank_128_1r_1w,
operands=("I1", "O"),
port_alloc=(
{"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},
{"fh": "w_port_1", "tl": "r_port_1", "fl": "w_port_1", "th": "r_port_1"},
),
served_dimensions="all",
)
####################################################################################################################
memory_hierarchy_graph.add_memory(
memory_instance=dram,
operands=("I1", "I2", "O"),
port_alloc=(
{"fh": "rw_port_1", "tl": "rw_port_1", "fl": None, "th": None},
{"fh": "rw_port_1", "tl": "rw_port_1", "fl": None, "th": None},
{
"fh": "rw_port_1",
"tl": "rw_port_1",
"fl": "rw_port_1",
"th": "rw_port_1",
},
),
served_dimensions="all",
)
from zigzag.visualization.graph.memory_hierarchy import (
visualize_memory_hierarchy_graph,
)
visualize_memory_hierarchy_graph(memory_hierarchy_graph)
return memory_hierarchy_graph
def multiplier_array_dut():
"""Multiplier array variables"""
multiplier_input_precision = [8, 8]
multiplier_energy = 0.04
multiplier_area = 1
dimensions = {"D1": 32, "D2": 32} # {'D1': ('K', 32), 'D2': ('C', 32)}
multiplier = Multiplier(
multiplier_input_precision, multiplier_energy, multiplier_area
)
multiplier_array = MultiplierArray(multiplier, dimensions)
return multiplier_array
def cores_dut():
multiplier_array1 = multiplier_array_dut()
memory_hierarchy1 = memory_hierarchy_dut(multiplier_array1)
core1 = Core(1, multiplier_array1, memory_hierarchy1)
return {core1}
cores = cores_dut()
acc_name = os.path.basename(__file__)[:-3]
accelerator = Accelerator(acc_name, cores) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/inputs/examples/hardware/TPU_like.py | TPU_like.py |
from zigzag.classes.hardware.architecture.memory_hierarchy import MemoryHierarchy
from zigzag.classes.hardware.architecture.memory_level import MemoryLevel
from zigzag.classes.hardware.architecture.operational_unit import Multiplier
from zigzag.classes.hardware.architecture.operational_array import MultiplierArray
from zigzag.classes.hardware.architecture.memory_instance import MemoryInstance
from zigzag.classes.hardware.architecture.accelerator import Accelerator
from zigzag.classes.hardware.architecture.core import Core
def memory_hierarchy_latency_test1(multiplier_array):
"""Memory hierarchy variables"""
""" size=#bit, bw=(read bw, write bw), cost=(read word energy, write work energy) """
rf1 = MemoryInstance(
name="rf_64B",
size=512,
r_bw=8,
w_bw=8,
r_cost=1.0,
w_cost=1.5,
area=0.3,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
) # rd E per bit 0.125
rf2 = MemoryInstance(
name="rf_16B",
size=128,
r_bw=24,
w_bw=24,
r_cost=1.5,
w_cost=2,
area=0.95,
r_port=1,
w_port=1,
rw_port=1,
latency=1,
) # rd E per bit 0.0625
# lb1 = MemoryInstance(name="sram_64KB", size=524288, r_bw=128, w_bw=128, r_cost=20, w_cost=25, area=6, r_port=1, w_port=1, rw_port=0, latency=1) # rd E per bit 0.16
lb2 = MemoryInstance(
name="sram_8KB",
size=65536,
r_bw=128,
w_bw=128,
r_cost=10,
w_cost=15,
r_port=0,
area=3,
w_port=0,
rw_port=2,
latency=1,
) # rd E per bit 0.08
lb2_64KB = MemoryInstance(
name="sram_64KB",
size=524288,
r_bw=128,
w_bw=128,
r_cost=20,
w_cost=25,
area=6,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
) # rd E per bit 0.08
gb = MemoryInstance(
name="sram_1M",
size=8388608,
r_bw=384,
w_bw=384,
r_cost=100,
w_cost=130,
area=25,
r_port=0,
w_port=0,
rw_port=2,
latency=1,
) # rd E per bit 0.26
dram = MemoryInstance(
name="dram",
size=10000000000,
r_bw=64,
w_bw=64,
r_cost=1000,
w_cost=1000,
area=0,
r_port=0,
w_port=0,
rw_port=1,
latency=1,
) # rd E per bit 16
memory_hierarchy_graph = MemoryHierarchy(operational_array=multiplier_array)
"""
fh: from high = wr_in_by_high
fl: from low = wr_in_by_low
th: to high = rd_out_to_high
tl: to low = rd_out_to_low
"""
memory_hierarchy_graph.add_memory(
memory_instance=rf1,
operands=("I1",),
port_alloc=({"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},),
served_dimensions=set(),
)
memory_hierarchy_graph.add_memory(
memory_instance=rf1,
operands=("I2",),
port_alloc=({"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},),
served_dimensions=set(),
)
memory_hierarchy_graph.add_memory(
memory_instance=rf2,
operands=("O",),
port_alloc=(
{"fh": "rw_port_1", "tl": "r_port_1", "fl": "w_port_1", "th": "rw_port_1"},
),
served_dimensions=set(),
)
memory_hierarchy_graph.add_memory(
memory_instance=lb2,
operands=("O",),
port_alloc=(
{
"fh": "rw_port_1",
"tl": "rw_port_2",
"fl": "rw_port_2",
"th": "rw_port_1",
},
),
served_dimensions="all",
)
memory_hierarchy_graph.add_memory(
memory_instance=lb2_64KB,
operands=("I2",),
port_alloc=({"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},),
served_dimensions="all",
)
memory_hierarchy_graph.add_memory(
memory_instance=gb,
operands=("I1", "O"),
port_alloc=(
{"fh": "rw_port_1", "tl": "rw_port_2", "fl": None, "th": None},
{
"fh": "rw_port_1",
"tl": "rw_port_2",
"fl": "rw_port_2",
"th": "rw_port_1",
},
),
served_dimensions="all",
)
memory_hierarchy_graph.add_memory(
memory_instance=dram,
operands=("I1", "I2", "O"),
port_alloc=(
{"fh": "rw_port_1", "tl": "rw_port_1", "fl": None, "th": None},
{"fh": "rw_port_1", "tl": "rw_port_1", "fl": None, "th": None},
{
"fh": "rw_port_1",
"tl": "rw_port_1",
"fl": "rw_port_1",
"th": "rw_port_1",
},
),
served_dimensions="all",
)
# from visualization.graph.memory_hierarchy import visualize_memory_hierarchy_graph
# visualize_memory_hierarchy_graph(memory_hierarchy_graph)
return memory_hierarchy_graph
def multiplier_array_latency_test1():
"""Multiplier array variables"""
multiplier_input_precision = [8, 8]
multiplier_energy = 0.5
multiplier_area = 0.1
dimensions = {"D1": 14, "D2": 12}
multiplier = Multiplier(
multiplier_input_precision, multiplier_energy, multiplier_area
)
multiplier_array = MultiplierArray(multiplier, dimensions)
return multiplier_array
def cores_dut():
multiplier_array1 = multiplier_array_latency_test1()
memory_hierarchy1 = memory_hierarchy_latency_test1(multiplier_array1)
core1 = Core(1, multiplier_array1, memory_hierarchy1)
return {core1}
cores = cores_dut()
accelerator = Accelerator("Eyeriss-like-simple", cores) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/inputs/examples/hardware/Eyeriss_like.py | Eyeriss_like.py |
import os
from zigzag.classes.hardware.architecture.memory_hierarchy import MemoryHierarchy
from zigzag.classes.hardware.architecture.memory_level import MemoryLevel
from zigzag.classes.hardware.architecture.operational_unit import Multiplier
from zigzag.classes.hardware.architecture.operational_array import MultiplierArray
from zigzag.classes.hardware.architecture.memory_instance import MemoryInstance
from zigzag.classes.hardware.architecture.accelerator import Accelerator
from zigzag.classes.hardware.architecture.core import Core
def memory_hierarchy_dut(multiplier_array):
"""Memory hierarchy variables"""
""" size=#bit, bw=(read bw, write bw), cost=(read word energy, write work energy) """
reg_W1 = MemoryInstance(
name="rf_1B",
size=8,
r_bw=8,
w_bw=8,
r_cost=0.01,
w_cost=0.01,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
)
reg_O4 = MemoryInstance(
name="rf_4B",
size=32,
r_bw=16,
w_bw=16,
r_cost=0.022,
w_cost=0.022,
area=0,
r_port=2,
w_port=2,
rw_port=0,
latency=1,
)
##################################### on-chip memory hierarchy building blocks #####################################
sram_1KB_256_1r_1w_I = MemoryInstance(
name="sram_1KB_I",
size=1024 * 8,
r_bw=256,
w_bw=256,
r_cost=4.78,
w_cost=5.59,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
min_r_granularity=64,
min_w_granularity=64,
)
sram_1KB_256_1r_1w_W = MemoryInstance(
name="sram_1KB_W",
size=1024 * 8,
r_bw=256,
w_bw=256,
r_cost=4.78,
w_cost=5.59,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
min_r_granularity=64,
min_w_granularity=64,
)
sram_1M_with_8_128K_bank_128_1r_1w_A = MemoryInstance(
name="sram_1MB_A",
size=131072 * 8 * 8,
r_bw=128 * 8,
w_bw=128 * 8,
r_cost=26.01 * 8,
w_cost=23.65 * 8,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
min_r_granularity=64,
min_w_granularity=64,
)
sram_1M_with_8_128K_bank_128_1r_1w_W = MemoryInstance(
name="sram_1MB_W",
size=131072 * 8 * 8,
r_bw=128 * 8,
w_bw=128 * 8,
r_cost=26.01 * 8,
w_cost=23.65 * 8,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
min_r_granularity=64,
min_w_granularity=64,
)
#######################################################################################################################
dram = MemoryInstance(
name="dram",
size=10000000000,
r_bw=64,
w_bw=64,
r_cost=700,
w_cost=750,
area=0,
r_port=0,
w_port=0,
rw_port=1,
latency=1,
)
memory_hierarchy_graph = MemoryHierarchy(operational_array=multiplier_array)
"""
fh: from high = wr_in_by_high
fl: from low = wr_in_by_low
th: to high = rd_out_to_high
tl: to low = rd_out_to_low
"""
# we don't have unrolled I-Reg to better support G unrolling
# memory_hierarchy_graph.add_memory(memory_instance=reg_IW1, operands=('I1',),
# port_alloc=({'fh': 'w_port_1', 'tl': 'r_port_1', 'fl': None, 'th': None},),
# served_dimensions={(0, 0, 0, 0)})
memory_hierarchy_graph.add_memory(
memory_instance=reg_W1,
operands=("I2",),
port_alloc=({"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},),
served_dimensions={(0, 1, 0), (0, 0, 1)},
)
memory_hierarchy_graph.add_memory(
memory_instance=reg_O4,
operands=("O",),
port_alloc=(
{"fh": "w_port_1", "tl": "r_port_1", "fl": "w_port_2", "th": "r_port_2"},
),
served_dimensions={(0, 0, 0)},
)
##################################### on-chip highest memory hierarchy initialization #####################################
memory_hierarchy_graph.add_memory(
memory_instance=sram_1KB_256_1r_1w_I,
operands=("I1",),
port_alloc=({"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},),
served_dimensions="all",
)
memory_hierarchy_graph.add_memory(
memory_instance=sram_1KB_256_1r_1w_W,
operands=("I2",),
port_alloc=({"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},),
served_dimensions="all",
)
# memory_hierarchy_graph.add_memory(memory_instance=sram_2KB_with_2_1KB_256_1r_1w, operands=('O',),
# port_alloc=({'fh': 'w_port_1', 'tl': 'r_port_1', 'fl': 'w_port_1', 'th': 'r_port_1'},),
# served_dimensions='all')
memory_hierarchy_graph.add_memory(
memory_instance=sram_1M_with_8_128K_bank_128_1r_1w_W,
operands=("I2",),
port_alloc=({"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},),
served_dimensions="all",
)
memory_hierarchy_graph.add_memory(
memory_instance=sram_1M_with_8_128K_bank_128_1r_1w_A,
operands=("I1", "O"),
port_alloc=(
{"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},
{"fh": "w_port_1", "tl": "r_port_1", "fl": "w_port_1", "th": "r_port_1"},
),
served_dimensions="all",
)
####################################################################################################################
memory_hierarchy_graph.add_memory(
memory_instance=dram,
operands=("I1", "I2", "O"),
port_alloc=(
{"fh": "rw_port_1", "tl": "rw_port_1", "fl": None, "th": None},
{"fh": "rw_port_1", "tl": "rw_port_1", "fl": None, "th": None},
{
"fh": "rw_port_1",
"tl": "rw_port_1",
"fl": "rw_port_1",
"th": "rw_port_1",
},
),
served_dimensions="all",
)
from zigzag.visualization.graph.memory_hierarchy import (
visualize_memory_hierarchy_graph,
)
# visualize_memory_hierarchy_graph(memory_hierarchy_graph)
return memory_hierarchy_graph
def multiplier_array_dut():
"""Multiplier array variables"""
multiplier_input_precision = [8, 8]
multiplier_energy = 0.04
multiplier_area = 1
dimensions = {
"D1": 32,
"D2": 8,
"D3": 4,
} # {'D1': ('K', 32), 'D2': ('OX', 8), 'D3': ('OY', 4),}
multiplier = Multiplier(
multiplier_input_precision, multiplier_energy, multiplier_area
)
multiplier_array = MultiplierArray(multiplier, dimensions)
return multiplier_array
def cores_dut():
multiplier_array1 = multiplier_array_dut()
memory_hierarchy1 = memory_hierarchy_dut(multiplier_array1)
core1 = Core(1, multiplier_array1, memory_hierarchy1)
return {core1}
cores = cores_dut()
acc_name = os.path.basename(__file__)[:-3]
accelerator = Accelerator(acc_name, cores) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/inputs/examples/hardware/Tesla_NPU_like.py | Tesla_NPU_like.py |
import os
from zigzag.classes.hardware.architecture.memory_hierarchy import MemoryHierarchy
from zigzag.classes.hardware.architecture.memory_level import MemoryLevel
from zigzag.classes.hardware.architecture.operational_unit import Multiplier
from zigzag.classes.hardware.architecture.operational_array import MultiplierArray
from zigzag.classes.hardware.architecture.memory_instance import MemoryInstance
from zigzag.classes.hardware.architecture.accelerator import Accelerator
from zigzag.classes.hardware.architecture.core import Core
def memory_hierarchy_dut(multiplier_array):
"""Memory hierarchy variables"""
""" size=#bit, bw=(read bw, write bw), cost=(read word energy, write work energy) """
reg_W1 = MemoryInstance(
name="rf_1B",
size=8,
r_bw=8,
w_bw=8,
r_cost=0.01,
w_cost=0.01,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
)
reg_O1 = MemoryInstance(
name="rf_2B",
size=16,
r_bw=16,
w_bw=16,
r_cost=0.02,
w_cost=0.02,
area=0,
r_port=2,
w_port=2,
rw_port=0,
latency=1,
)
##################################### on-chip memory hierarchy building blocks #####################################
sram_64KB_with_8_8K_64_1r_1w_I = MemoryInstance(
name="sram_64KB_I",
size=8192 * 8,
r_bw=64 * 8,
w_bw=64 * 8,
r_cost=3.32 * 8,
w_cost=3.84 * 8,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
min_r_granularity=64,
min_w_granularity=64,
)
sram_64KB_with_8_8K_256_1r_1w_W = MemoryInstance(
name="sram_64KB_W",
size=8192 * 8,
r_bw=256 * 8,
w_bw=256 * 8,
r_cost=6.27 * 8,
w_cost=13.5 * 8,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
min_r_granularity=64,
min_w_granularity=64,
)
sram_256KB_with_8_32KB_256_1r_1w_O = MemoryInstance(
name="sram_256KB_O",
size=32768 * 8 * 8,
r_bw=256 * 8,
w_bw=256 * 8,
r_cost=15.4 * 8,
w_cost=26.6 * 8,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
min_r_granularity=64,
min_w_granularity=64,
)
sram_256KB_with_8_32KB_256_1r_1w_O_staging = MemoryInstance(
name="sram_256KB_O_staging",
size=32768 * 8 * 8 + 1,
r_bw=256 * 8,
w_bw=256 * 8,
r_cost=15.4 * 8,
w_cost=26.6 * 8,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
min_r_granularity=64,
min_w_granularity=64,
)
sram_1M_with_8_128K_bank_128_1r_1w_A = MemoryInstance(
name="sram_1MB_A",
size=131072 * 8 * 8,
r_bw=512 * 8,
w_bw=512 * 8,
r_cost=58.2 * 8,
w_cost=103.2 * 8,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
min_r_granularity=64,
min_w_granularity=64,
)
sram_1M_with_8_128K_bank_128_1r_1w_W = MemoryInstance(
name="sram_1MB_W",
size=131072 * 8 * 8,
r_bw=512 * 8,
w_bw=512 * 8,
r_cost=58.2 * 8,
w_cost=103.2 * 8,
area=0,
r_port=1,
w_port=1,
rw_port=0,
latency=1,
min_r_granularity=64,
min_w_granularity=64,
)
#######################################################################################################################
dram = MemoryInstance(
name="dram",
size=10000000000,
r_bw=64,
w_bw=64,
r_cost=700,
w_cost=750,
area=0,
r_port=0,
w_port=0,
rw_port=1,
latency=1,
)
memory_hierarchy_graph = MemoryHierarchy(operational_array=multiplier_array)
"""
fh: from high = wr_in_by_high
fl: from low = wr_in_by_low
th: to high = rd_out_to_high
tl: to low = rd_out_to_low
"""
# we don't have unrolled I-Reg to better support G unrolling
# memory_hierarchy_graph.add_memory(memory_instance=reg_IW1, operands=('I1',),
# port_alloc=({'fh': 'w_port_1', 'tl': 'r_port_1', 'fl': None, 'th': None},),
# served_dimensions={(0, 0, 0, 0)})
memory_hierarchy_graph.add_memory(
memory_instance=reg_W1,
operands=("I2",),
port_alloc=({"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},),
served_dimensions={(0, 0, 1, 0), (0, 0, 0, 1)},
)
memory_hierarchy_graph.add_memory(
memory_instance=reg_O1,
operands=("O",),
port_alloc=(
{"fh": "w_port_1", "tl": "r_port_1", "fl": "w_port_2", "th": "r_port_2"},
),
served_dimensions={(0, 1, 0, 0)},
)
##################################### on-chip highest memory hierarchy initialization #####################################
memory_hierarchy_graph.add_memory(
memory_instance=sram_64KB_with_8_8K_256_1r_1w_W,
operands=("I2",),
port_alloc=({"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},),
served_dimensions="all",
)
memory_hierarchy_graph.add_memory(
memory_instance=sram_64KB_with_8_8K_64_1r_1w_I,
operands=("I1",),
port_alloc=({"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},),
served_dimensions="all",
)
memory_hierarchy_graph.add_memory(
memory_instance=sram_256KB_with_8_32KB_256_1r_1w_O,
operands=("O",),
port_alloc=(
{"fh": "w_port_1", "tl": "r_port_1", "fl": "w_port_1", "th": "r_port_1"},
),
served_dimensions="all",
)
# memory_hierarchy_graph.add_memory(memory_instance=sram_256KB_with_8_32KB_256_1r_1w_O_staging, operands=('O',),
# port_alloc=({'fh': 'w_port_1', 'tl': 'r_port_1', 'fl': 'w_port_1', 'th': 'r_port_1'},),
# served_dimensions='all')
memory_hierarchy_graph.add_memory(
memory_instance=sram_1M_with_8_128K_bank_128_1r_1w_W,
operands=("I2",),
port_alloc=({"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},),
served_dimensions="all",
)
memory_hierarchy_graph.add_memory(
memory_instance=sram_1M_with_8_128K_bank_128_1r_1w_A,
operands=("I1", "O"),
port_alloc=(
{"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None},
{"fh": "w_port_1", "tl": "r_port_1", "fl": "w_port_1", "th": "r_port_1"},
),
served_dimensions="all",
)
####################################################################################################################
memory_hierarchy_graph.add_memory(
memory_instance=dram,
operands=("I1", "I2", "O"),
port_alloc=(
{"fh": "rw_port_1", "tl": "rw_port_1", "fl": None, "th": None},
{"fh": "rw_port_1", "tl": "rw_port_1", "fl": None, "th": None},
{
"fh": "rw_port_1",
"tl": "rw_port_1",
"fl": "rw_port_1",
"th": "rw_port_1",
},
),
served_dimensions="all",
)
from zigzag.visualization.graph.memory_hierarchy import (
visualize_memory_hierarchy_graph,
)
# visualize_memory_hierarchy_graph(memory_hierarchy_graph)
return memory_hierarchy_graph
def multiplier_array_dut():
"""Multiplier array variables"""
multiplier_input_precision = [8, 8]
multiplier_energy = 0.04
multiplier_area = 1
dimensions = {
"D1": 16,
"D2": 16,
"D3": 2,
"D4": 2,
} # {'D1': ('K', 16), 'D2': ('C', 16), 'D3': ('OX', 2), 'D4': ('OY', 2),}
multiplier = Multiplier(
multiplier_input_precision, multiplier_energy, multiplier_area
)
multiplier_array = MultiplierArray(multiplier, dimensions)
return multiplier_array
def cores_dut():
multiplier_array1 = multiplier_array_dut()
memory_hierarchy1 = memory_hierarchy_dut(multiplier_array1)
core1 = Core(1, multiplier_array1, memory_hierarchy1)
return {core1}
cores = cores_dut()
acc_name = os.path.basename(__file__)[:-3]
accelerator = Accelerator(acc_name, cores) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/inputs/examples/hardware/Ascend_like.py | Ascend_like.py |
from typing import Set
import itertools
from zigzag.classes.hardware.architecture.core import Core
from zigzag.classes.hardware.architecture.dimension import Dimension
from zigzag.classes.hardware.architecture.memory_hierarchy import MemoryHierarchy
from zigzag.classes.hardware.architecture.operational_array import OperationalArray
## Class that generates valid user-format spatial mappings.
class UserSpatialMappingGenerator:
## The class constructor
# @param layer
# @param accelerator
def __init__(self, layer, accelerator) -> None:
self.layer = layer
self.accelerator = accelerator
def run(self):
return self.generate_user_spatial_mappings()
## Generator that yields user-defined spatial mappings.
# User-defined means across operational array dimensions.
# For example, this might yield {'D1': (C, 16), 'D2': (K,16)}
# In essence it works as follows:
# \code{.py}
# for each operational array dimension oa_dim (D1, D2, ...):
# for each layer operand layer_op (W, I, O, ...):
# if oa_dim not in served_dimensions(layer_op):
# continue
# else:
# for layer dimensions layer_dim (B, K, ...) in the layer:
# if layer_dim is irrelevant for layer_op:
# layer_dim can be unrolled maximally
# if layer_dim is not irrelevant for layer_op:
# layer_dim can be unrolled if the BW allows it (assumes flexible "bus" reads)
# \endcode
def generate_user_spatial_mappings(self):
core_id = self.layer.core_allocation
core: Core = self.accelerator.get_core(core_id=core_id)
operational_array: OperationalArray = core.operational_array
oa_dims = operational_array.dimensions
memory_hierarchy: MemoryHierarchy = core.memory_hierarchy
innermost_levels = memory_hierarchy.get_inner_memories()
# For every operational array dimension, we initialize it by maximally unrolling all layer dimensions.
# Later these will be restricted if the memory structure doesn't allow for this unrolling
oa_dim_unrolling = {
oa_dim: {
layer_dim: int(min(layer_size, oa_dim.size))
for layer_dim, layer_size in self.layer.loop_dim_size.items()
}
for oa_dim in oa_dims
}
for memory_level in innermost_levels:
served_dimensions: Set[Dimension] = memory_level.served_dimensions
mem_ops = memory_level.operands
for mem_op in mem_ops:
layer_op = self.layer.get_layer_operand(
mem_op=mem_op
) # get the layer operand
if layer_op == "O":
mem_bandwidth = (
memory_level.write_bw
) # partial outputs are written to the memory
else:
mem_bandwidth = (
memory_level.read_bw
) # inputs are read from the memory
precision = self.layer.operand_precision[
layer_op
] # bit precision of layer operand
irrelevant_dimensions = self.layer.get_operand_irrelevant_dimensions(
layer_op
)
for oa_dim in oa_dims:
if oa_dim not in served_dimensions:
continue
# If the operational array dimension is a served dimension of the lowest memory level,
# we ought to limit the unrolling for the relevant and partially relevant loop dimensions
for (layer_dim, unrolling_size) in oa_dim_unrolling[oa_dim].items():
if layer_dim in irrelevant_dimensions:
continue
# If not irrelevant, it is (partially) relevant. Limit based on BW and operand precision.
try:
max_multicast_elements = mem_bandwidth // precision
except ZeroDivisionError:
max_multicast_elements = unrolling_size
oa_dim_unrolling[oa_dim][layer_dim] = min(
max_multicast_elements, unrolling_size
)
# At this point the unrolled layer dimensions are maximal (wrt the served dimensions and bandwidth of the lowest memory level).
# The unrolling size might not be a factor of the layer dimension size, which is required (for non greedy mapping).
# Convert the unrolling size to be a factor of the layer dimension size. At the same time convert them to a list.
unrollings = []
for oa_dim in oa_dims:
oa_dim_unrollings = []
for (layer_dim, unrolling_size) in oa_dim_unrolling[oa_dim].items():
layer_dim_size = self.layer.loop_dim_size[layer_dim]
# If e.g. the unrolling size is 10 (because operational array dimension size is 10)
# but the layer dimension size is 14, this would result in a temporal remainder of 14/10.
# In that case we change the unrolling size to 7 (to be a factor of 14).
# We have to make sure the unrolling size is a divisor of the layer dimension size:
# Jan 18 2023: Commented this out as LomaStage allows greedy mapping by adding one more temporal iteration
# while layer_dim_size % unrolling_size != 0:
# unrolling_size -= 1 # decrement the unrolling by 1
# If the unrolling_size is not 1, add it to the unrollings for this oa_dim
if unrolling_size != 1:
oa_dim_unrollings.append((layer_dim, unrolling_size))
# In case there are no unrollings (of size > 1) possible, add a single unrolling of size 1.
# The loop dimension we pick is randomly chosen as the first loop dimension in the layer.
# The loop dimension chosen shouldn't matter as the size of unrolling is 1 anyway.
if len(oa_dim_unrollings) == 0:
oa_dim_unrollings.append(None)
unrollings.append(oa_dim_unrollings)
# Now we have for each operational array dimension the layer dimensions and size they can be unrolled without fractional remainder.
# Now we have to combine them into user-defined spatial mappings.
for combination in itertools.product(*unrollings):
# Zip the combination (which is a (layer_dim, layer_size) for each oa_dim with the oa_dim names.
oa_dim_names = [oa_dim.name for oa_dim in oa_dims]
user_spatial_mapping = {
oa_dim_name: unrolling
for (oa_dim_name, unrolling) in zip(oa_dim_names, combination)
if unrolling is not None
}
yield user_spatial_mapping
@staticmethod
def all_unique(items):
return len(set(items)) == len(items) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/opt/spatial/generator.py | generator.py |
from copy import deepcopy
from sympy.ntheory import factorint
import numpy as np
import logging
import random
from zigzag.classes.hardware.architecture.accelerator import Accelerator
from zigzag.classes.workload.layer_node import LayerNode
from zigzag.classes.mapping.spatial.spatial_mapping import SpatialMapping
from zigzag.classes.hardware.architecture.memory_hierarchy import MemoryHierarchy
from zigzag.classes.opt.temporal.loma.multipermute import permutations
from zigzag.classes.opt.temporal.loma.memory_allocator import MemoryAllocator
from zigzag.classes.opt.temporal.salsa.state import SalsaState
logger = logging.getLogger(__name__)
## Class that handles optimization of temporal mapping given a:
# - layer
# - spatial mapping
# - memory hierarchy
# - number of iterations
# - start temperature
# This optimization is carried out through simulated annealing loop order based.
# Each loop is broken down to the smallest possible part (prime factors), then a runtime
# estimation is performed to choose the fastest engine to use (LOMA or SALSA).
class SalsaEngine:
## The class constructor
# @param acceleartor
# @param layer
# @param spatial mapping
# @param kwargs
def __init__(
self,
*,
accelerator: Accelerator,
layer: LayerNode,
spatial_mapping: SpatialMapping,
**kwargs,
):
# iteration_number, start_temperature, opt_criterion_name
# Initialize the engine with the given:
# - LayerNode
# - SpatialMapping
# - Accelerator
# - Number of iterations
# - Start temperature
# The memory hierarchy from the correct core is extracted from the accelerator.
# Hardware and mapping related inputs
self.accelerator = accelerator
self.layer = layer
self.spatial_mapping = spatial_mapping
# self.memory_hierarchy: MemoryHierarchy = self.accelerator.get_core(layer.core_allocation).memory_hierarchy
# Algorithm related inputs
self.iteration_number = kwargs.get("salsa_iteration_number", 1000)
self.start_temperature = kwargs.get("salsa_start_temperature", 0.05)
self.opt_criterion_name = kwargs.get("salsa_opt_criterion", "energy")
self.lpf_limit = kwargs.get("loma_lpf_limit", 4)
## Call the necessary methods, start the processes and collect the best temporal mapping found during the run.
def run(self, cme_queue):
self.cme_queue = cme_queue
self.get_temporal_loops()
self.get_prime_factors()
self.run_simulated_annealing_opt(self.cme_queue)
## Run a simulated annealing optimiation on the loop ordering using a loma memory allocation strategy.
def run_simulated_annealing_opt(self, cme_queue):
temperature = self.start_temperature
start_ordering = (
self.temporal_mapping_lpf
) # tmo stands for temporal mapping ordering
# Initialize the algorithm with a random starting point
random.shuffle(start_ordering)
# Initialize variables to store current, next and best state
best_state = SalsaState(
self.accelerator,
self.layer,
self.spatial_mapping,
start_ordering,
self.opt_criterion_name,
)
current_state = SalsaState(
self.accelerator,
self.layer,
self.spatial_mapping,
start_ordering,
self.opt_criterion_name,
)
next_state = SalsaState(
self.accelerator,
self.layer,
self.spatial_mapping,
start_ordering,
self.opt_criterion_name,
)
for it in range(self.iteration_number):
temperature = self.start_temperature * (0.995**it)
# Get the index of the loop to swap
i = np.random.randint(0, len(current_state.ordering))
j = np.random.randint(0, len(current_state.ordering))
# Swap the loops
next_state = current_state.swap(i, j)
x = np.random.rand() # x belongs to [0, 1]
p = np.exp(
((current_state.opt_criterion / next_state.opt_criterion) - 1)
/ temperature
) # probability of accepting the next state
if x < p:
# Replace the current state by the next state and compare the energy with the best state
current_state = deepcopy(next_state)
if current_state.opt_criterion < best_state.opt_criterion:
best_state = deepcopy(current_state)
cme_queue.put(best_state.cme)
## Get all loops that have to be temporally scheduled given layer and spatial mapping.
def get_temporal_loops(self):
temporal_loop_dim_size = (
self.layer.loop_dim_size.copy()
) # init with all loop sizes
for spatial_loop in self.spatial_mapping.spatial_loop_dim_size:
(spatial_loop_dim, spatial_loop_size) = spatial_loop
# Allow greedy mapping. If the spatial unrolling is not a multiple of the layer dimension size,
# we take the ceil of the division, so there can be one extra temporal iteration.
q = int(
np.ceil(temporal_loop_dim_size[spatial_loop_dim] / spatial_loop_size)
)
# q, rem = divmod(temporal_loop_dim_size[spatial_loop_dim], spatial_loop_size)
# assert rem == 0, "Division of dimension size by spatial unrolling size is not an integer"
if q == 1:
del temporal_loop_dim_size[spatial_loop_dim]
else:
temporal_loop_dim_size[spatial_loop_dim] = q
# Remove all dimensions with a temporal loop size of 1
temporal_loop_dim_size_no_1s = {
key: val for (key, val) in temporal_loop_dim_size.items() if val > 1
}
self.temporal_loop_dim_size = temporal_loop_dim_size_no_1s
min_nb_temporal_loops = len(self.temporal_loop_dim_size)
if self.lpf_limit < min_nb_temporal_loops:
logger.debug(
f"Updated layer {self.layer}'s lpf limit from {self.lpf_limit} to {min_nb_temporal_loops} lpfs."
)
self.lpf_limit = min_nb_temporal_loops
## Get the prime factors for all temporal loops in the following format:
# [('C', 2), ('OY', 2), ('OX', 2), ('K', 7), ...]
def get_prime_factors(self):
temporal_loop_pfs = {}
temporal_loop_pf_counts = {}
temporal_loop_pf_count_sums = {}
lpfs = []
self.temporal_mapping_lpf = []
for (
tl_dim,
tl_size,
) in self.temporal_loop_dim_size.items(): # tl = temporal loop
factors = factorint(tl_size)
pfs = []
counts = []
for pf, multiplicity in factors.items():
pfs.append(pf)
counts.append(multiplicity)
for i in range(multiplicity):
lpfs.append((tl_dim, pf))
temporal_loop_pfs[tl_dim] = tuple(pfs)
temporal_loop_pf_counts[tl_dim] = tuple(counts)
temporal_loop_pf_count_sums[tl_dim] = sum(counts)
# logger.info(f"Generated {len(lpfs)} LPFs for layer {self.layer}.")
for loop_type in list(temporal_loop_pfs.keys()):
for i in range(len(temporal_loop_pfs[loop_type])):
loop_size = temporal_loop_pfs[loop_type]
for number_of_loop in range(temporal_loop_pf_counts[loop_type][i]):
self.temporal_mapping_lpf.append((loop_type, loop_size[i])) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/opt/temporal/salsa/engine.py | engine.py |
from copy import deepcopy
from zigzag.classes.opt.temporal.loma.memory_allocator import MemoryAllocator
from zigzag.classes.cost_model.cost_model import CostModelEvaluation
from zigzag.classes.hardware.architecture.accelerator import Accelerator
from zigzag.classes.workload.layer_node import LayerNode
from zigzag.classes.mapping.spatial.spatial_mapping import SpatialMapping
from zigzag.classes.hardware.architecture.memory_hierarchy import MemoryHierarchy
## State of SALSA, storing an ordering, his temporal mapping and his energy value.
class SalsaState:
def __init__(
self,
accelerator: Accelerator,
layer: LayerNode,
spatial_mapping: SpatialMapping,
ordering,
opt_criterion_name,
):
self.ordering = ordering
self.accelerator = accelerator
self.layer = layer
self.spatial_mapping = spatial_mapping
self.memory_hierarchy: MemoryHierarchy = self.accelerator.get_core(
layer.core_allocation
).memory_hierarchy
self.opt_criterion_name = opt_criterion_name
allocator = MemoryAllocator(
self.accelerator, self.layer, self.spatial_mapping, ordering
)
# allocator = MemoryAllocator(layer=self.layer,
# ordering=ordering,
# spatial_mapping=self.spatial_mapping)
self.temporal_mapping = (
allocator.run()
) # allocate this ordering to the memories
self.cme = CostModelEvaluation(
accelerator=self.accelerator,
layer=self.layer,
spatial_mapping=self.spatial_mapping,
temporal_mapping=self.temporal_mapping,
)
# The optimization criterion will be minimized
if self.opt_criterion_name == "energy":
self.opt_criterion = self.cme.energy_total
elif self.opt_criterion_name == "latency":
self.opt_criterion = self.cme.latency_total0
else:
self.opt_criterion = None
## Swap between the element at positon i and j in the ordering
# and return the new resulting state.
def swap(self, i, j):
swapped_ordering = deepcopy(self.ordering)
temp = swapped_ordering[i]
swapped_ordering[i] = swapped_ordering[j]
swapped_ordering[j] = temp
return SalsaState(
self.accelerator,
self.layer,
self.spatial_mapping,
swapped_ordering,
self.opt_criterion_name,
) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/opt/temporal/salsa/state.py | state.py |
from math import factorial
import operator
from tqdm import tqdm
import numpy as np
from sympy.ntheory import factorint
import logging
from zigzag.classes.hardware.architecture.memory_hierarchy import MemoryHierarchy
from zigzag.classes.opt.temporal.loma.multipermute import permutations
from zigzag.classes.opt.temporal.loma.memory_allocator import (
MemoryHierarchyTooSmallException,
MemoryTooSmallException,
MemoryAllocator,
)
logger = logging.getLogger(__name__)
## Description missing
class NoValidLoopOrderingFoundException(Exception):
pass
## Class that handles optimization of temporal mapping given a:
# - layer
# - spatial mapping
# - a memory hierarchy
#
# This optimization is carried out through loop order based memory allocation.
# For each ordering of the temporal loops, they are allocated bottom-up to the
# levels in the memory hierarchy.
#
# See https://ieeexplore.ieee.org/document/9458493 for more details.
class LomaEngine:
## The class constructor
# Initialize the engine with the given:
# - Accelerator
# - LayerNode
# - SpatialMapping
#
# The memory hierarchy from the correct core is extracted from the accelerator.
#
# param accelerator: accelerator to use the memory hierarchy of
# param layer: layer to generate temporal mappings for
# param spatial_mapping: SpatialMapping to use
# param loma_lpf_limit:
# param kwargs: further unused, for ease of calling only
def __init__(
self, *, accelerator, layer, spatial_mapping, loma_lpf_limit=np.inf, **kwargs
):
self.lpf_limit = loma_lpf_limit
self.accelerator = accelerator
self.layer = layer
self.spatial_mapping = spatial_mapping
# Extract the memory hierarchy from the accelerator
# TODO: Take into account that data might be stored in lower level,
# TODO: thus adapt the memory hierarchy.
# TODO: The fact that there is a global buffer above the cores requires attention.
core_id = layer.core_allocation
self.memory_hierarchy: MemoryHierarchy = accelerator.get_core(
core_id
).memory_hierarchy
self.show_progress_bar = kwargs.get("loma_show_progress_bar", False)
## Runs the LomaEngine
# @return Generator that yields all temporal mappings
def run(self):
# TODO: add the criterion(s) as inputs to this function.
logger.info("Running temporal mapping search engine...")
self.get_temporal_loops() # get all the temporal loops
self.get_prime_factors() # convert these to LPFs (loop prime factors)
if self.show_progress_bar:
pbar = tqdm(total=self.nb_permutations)
else:
pbar = None
yielded = False
for ordering in self.og(): # ordering generator
allocator = MemoryAllocator(
self.accelerator, self.layer, self.spatial_mapping, ordering
)
# using try catch here because in the depth-first mode the highest level might not be big enough
try:
temporal_mapping = (
allocator.run()
) # allocate this ordering to the memories
yielded = True
yield temporal_mapping
except MemoryHierarchyTooSmallException:
pass
except MemoryTooSmallException:
pass # Skip the ordering that crashed due to ordering (+su) not fitting in memory
if self.show_progress_bar:
pbar.update(1)
if self.show_progress_bar:
pbar.close()
if not yielded:
raise NoValidLoopOrderingFoundException(
f"No valid loop ordering was found for layer {self.layer}. Please make sure the spatial mapping is compatible with the architecture."
)
## Get all loops that have to be temporally scheduled given layer and spatial mapping.
def get_temporal_loops(self):
temporal_loop_dim_size = (
self.layer.loop_dim_size.copy()
) # init with all loop sizes
for spatial_loop in self.spatial_mapping.spatial_loop_dim_size:
(spatial_loop_dim, spatial_loop_size) = spatial_loop
# Allow greedy mapping. If the spatial unrolling is not a multiple of the layer dimension size,
# we take the ceil of the division, so there can be one extra temporal iteration.
q = int(
np.ceil(temporal_loop_dim_size[spatial_loop_dim] / spatial_loop_size)
)
# q, rem = divmod(temporal_loop_dim_size[spatial_loop_dim], spatial_loop_size)
# assert rem == 0, "Division of dimension size by spatial unrolling size is not an integer"
if q == 1:
del temporal_loop_dim_size[spatial_loop_dim]
else:
temporal_loop_dim_size[spatial_loop_dim] = q
# Remove all dimensions with a temporal loop size of 1
temporal_loop_dim_size_no_1s = {
key: val for (key, val) in temporal_loop_dim_size.items() if val > 1
}
self.temporal_loop_dim_size = temporal_loop_dim_size_no_1s
min_nb_temporal_loops = len(self.temporal_loop_dim_size)
if self.lpf_limit < min_nb_temporal_loops:
logger.debug(
f"Updated layer {self.layer}'s lpf limit from {self.lpf_limit} to {min_nb_temporal_loops} lpfs."
)
self.lpf_limit = min_nb_temporal_loops
## Get the prime factors for all temporal loops.
# This is saved in three separate class attributes (temporal_loop_pfs, temporal_loop_pf_counts, temporal_loop_pf_count_sums)
def get_prime_factors(self):
# temporal_loop_pfs: a dict that for each temporal loop dimension contains the prime factors
# temporal_loop_pf_counts: a dict that for each temporal loop dimension contains the prime factor multiplicities
# temporal_loop_pf_count_sums: a dict that for each temporal loop dimension contains the total amount of prime factors
temporal_loop_pfs = {}
temporal_loop_pf_counts = {}
temporal_loop_pf_count_sums = {}
lpfs = []
for (
tl_dim,
tl_size,
) in self.temporal_loop_dim_size.items(): # tl = temporal loop
factors = factorint(tl_size)
pfs = []
counts = []
for pf, multiplicity in factors.items():
pfs.append(pf)
counts.append(multiplicity)
for i in range(multiplicity):
lpfs.append((tl_dim, pf))
temporal_loop_pfs[tl_dim] = tuple(pfs)
temporal_loop_pf_counts[tl_dim] = tuple(counts)
temporal_loop_pf_count_sums[tl_dim] = sum(counts)
# If there are no temporal LPFs generated, i.e. all loops are unrolled spatially,
# we manually insert a loop of size 1
if lpfs == []:
loop_dim = self.layer.loop_dim_list[0]
temporal_loop_pfs = {loop_dim: (1,)}
temporal_loop_pf_counts = {loop_dim: (1,)}
temporal_loop_pf_count_sums = {loop_dim: 1}
lpfs = [(loop_dim, 1)]
logger.debug(f"Generated {len(lpfs)} LPFs for layer {self.layer}.")
self.temporal_loop_pfs = temporal_loop_pfs
self.temporal_loop_pf_counts = temporal_loop_pf_counts
self.temporal_loop_pf_count_sums = temporal_loop_pf_count_sums
self.lpfs = lpfs
# Limit the number of lpfs (if this is set in the settings)
self.limit_lpfs()
# Compute how many total permuatations we will have to consider
self.compute_nb_permutations()
## Compute the number of permutations that will have to be considered given the LPF distribution
def compute_nb_permutations(self):
nb_permutations = factorial(sum(self.temporal_loop_pf_count_sums.values()))
for nb_duplicated_pfs in self.temporal_loop_pf_counts.values():
for nb_duplicated_pf in nb_duplicated_pfs:
nb_permutations = int(nb_permutations / factorial(nb_duplicated_pf))
self.nb_permutations = nb_permutations
logger.debug(
f"Launching {self.nb_permutations:,} temporal loop order permutations."
)
## Function to limit the total number of loop prime factors present in this instance.
# This function scans the lpfs and while the number of lpfs is greater than self.lpf_limit it:
# - picks the loop dimension that has the most lpfs
# - merges the smallest two lpfs of that loop dimension (multiplying their values)
def limit_lpfs(self):
n_pf = sum(self.temporal_loop_pf_count_sums.values())
if n_pf <= self.lpf_limit:
logger.debug(f"No lpf limiting performed for layer {self.layer}")
return
while n_pf > self.lpf_limit:
# Find the loop dimension with the most lpfs
max_ld = max(
self.temporal_loop_pf_count_sums.items(), key=operator.itemgetter(1)
)[0]
# Get the prime factors of this loop dimension
max_pfs = list(self.temporal_loop_pfs[max_ld])
# Get the multiplicity of these prime factors
max_counts = list(self.temporal_loop_pf_counts[max_ld])
if max_counts[0] == 1: # multiplicity of smallest pf is 1
new_factor = max_pfs[0] * max_pfs[1]
max_counts[0] -= 1
max_counts[1] -= 1
else: # multiplicity of smalles pf is > 1
new_factor = max_pfs[0] * max_pfs[0]
max_counts[0] -= 2
if new_factor in max_pfs: # possible if not first iteration of while loop
new_factor_idx = max_pfs.index(new_factor)
max_counts[new_factor_idx] += 1
else: # the new factor is not yet present in the factors, insert so list remains sorted
new_factor_idx = len([pf for pf in max_pfs if pf < new_factor])
max_pfs.insert(new_factor_idx, new_factor)
max_counts.insert(
new_factor_idx, 1
) # first time this factor occured, count = 1
# Sanitize max_pfs and max_counts to remove all elements with multiplicity 0
non_zero_idxs = [idx for idx, count in enumerate(max_counts) if count != 0]
max_pfs = [max_pfs[non_zero_idx] for non_zero_idx in non_zero_idxs]
max_counts = [max_counts[non_zero_idx] for non_zero_idx in non_zero_idxs]
# Update the appropriate variables with these new factors and multiplicities
self.temporal_loop_pfs[max_ld] = tuple(max_pfs)
self.temporal_loop_pf_counts[max_ld] = tuple(max_counts)
self.temporal_loop_pf_count_sums[max_ld] -= 1
# Decrease the total number of factors by 1
n_pf -= 1
# Update self.lpfs for these new factors
lpfs = []
for dim in self.temporal_loop_pfs.keys():
for (pf, count) in zip(
self.temporal_loop_pfs[dim], self.temporal_loop_pf_counts[dim]
):
lpfs += list(((dim, pf),) * count)
self.lpfs = lpfs
logger.debug(f"Limited layer {self.layer} to {len(self.lpfs)} lpfs.")
return
## Generator that yields all orderings of the temporal loops.
def og(self):
# The lpfs are stored in self.lpfs
return permutations(self.lpfs) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/opt/temporal/loma/engine.py | engine.py |
from typing import List
import numpy as np
from math import prod
from zigzag.classes.hardware.architecture.memory_hierarchy import MemoryHierarchy
from zigzag.classes.hardware.architecture.memory_level import MemoryLevel
from zigzag.classes.mapping.temporal.temporal_mapping import TemporalMapping
from zigzag.classes.opt.temporal.loma.loop import Loop
## Missing description
class MemoryHierarchyTooSmallException(Exception):
pass
## Missing description
class MemoryTooSmallException(Exception):
pass
## Class that handles allocation of a loop ordering to the memories in the hierarchy.
class MemoryAllocator:
## The class constructor
#
# Initialize the class with:
# - the layer
# - the memory hierarchy parameters
# - the spatial mapping
#
# through main_inputs and
# - ordering
#
# @param accelerator
# @param layer
# @param spatial_mapping
# @param ordering
def __init__(self, accelerator, layer, spatial_mapping, ordering: List):
self.accelerator = accelerator
self.layer = layer
self.spatial_mapping = spatial_mapping
self.ordering = ordering
# Initialize the different operands
# On the HW side, these are always called 'I1' 'I2' and 'O',
# but on the layer side, they can be anything.
layer_ops = self.layer.operand_list
self.mem_ops = [
self.layer.memory_operand_links[layer_op] for layer_op in layer_ops
]
# Translation dict from layer op to mem op
self.layer_to_mem_op = self.layer.memory_operand_links.copy()
# Translation dict from mem op to layer op
self.mem_to_layer_op = {
mem_op: layer_op for (layer_op, mem_op) in self.layer_to_mem_op.items()
}
# Bit precision for the different mem ops
self.precision = {
mem_op: self.layer.operand_precision[layer_op]
for (layer_op, mem_op) in self.layer_to_mem_op.items()
}
self.precision["O_final"] = self.layer.operand_precision.get(
"O_final", self.precision["O"]
) # Final output precision
# Initialize the unallocated loops with the ordering for each operand
self.unallocated = {}
for mem_op in self.mem_ops:
self.unallocated[mem_op] = [
Loop(dim, size) for (dim, size) in self.ordering
]
# Initialize the allocated loops with the spatial mapping at the operand level for each operand
self.allocated = {}
for (layer_op, mem_op) in self.layer_to_mem_op.items():
self.allocated[mem_op] = [
Loop(dim, size, "spatial")
for (dim, size) in self.spatial_mapping.get_unrolling(
op=layer_op, level=0
)
]
# Initialize the level of memory hierarchy for each layer operand at 1 (first memory level).
# This information is required to fetch the correct spatial loops after we have allocated temporal loops.
self.mem_level = {layer_op: 1 for layer_op in layer_ops}
# Initialize the temporal mapping dict, which is appended to throughout the allocation process.
# It is a dictionary with keys the different layer operands and values a list of lists.
# The sublists represent the memory levels for that operand and contain the loops allocated to that level.
self.temporal_mapping_dict = {layer_op: [] for layer_op in layer_ops}
## Run the memory allocation process.
# Start by the lowest memory hierarchy level and allocate as much loops as possible
# for the different operands. The spatial unrolling has to be taken into account at
# each memory level in the hierarchy.
def run(self):
# self.nodes contains the different memory nodes in bottom-up fashion
core_id = self.layer.core_allocation
memory_hierarchy: MemoryHierarchy = self.accelerator.get_core(
core_id
).memory_hierarchy
top_levels = {
mem_op: memory_hierarchy.get_operand_top_level(mem_op)
for mem_op in self.mem_ops
}
nodes = memory_hierarchy.nodes
for node in nodes:
self.allocate_node(node, top_levels)
# After all the nodes have been allocated, we can creat the TemporalMapping
# object from the dictionary we have built
temporal_mapping = TemporalMapping(self.temporal_mapping_dict, self.layer)
return temporal_mapping
## Allocate a single memory node with the best loops that remain in the unallocated loop ordering.
# @param node: The MemoryLevel to which we will allocate loops.
# @param top_levels: A list of MemoryLevels for each mem_op that is the highest MemoryLevel that stores that mem_op.
def allocate_node(self, node: MemoryLevel, top_levels: List[MemoryLevel]):
# Find out which mem operands this node stores
mem_ops = node.operands
# Then select only the mem operands that are required for this layer (e.g. pooling has no weights so one mem op less)
mem_ops = [mem_op for mem_op in mem_ops if mem_op in self.mem_ops]
# Get the capacity of this memory node (in bits)
mem_capacity = node.memory_instance.size
# For all the mem_ops, find the max amount of unallocated loops we could allocate
all_sizes = {}
for mem_op in mem_ops:
sizes = self.calc_size_slices(mem_op, mem_capacity)
all_sizes[mem_op] = sizes
# Now that we have this for all the mem_ops, call function that finds the best
# combination of loops to minimize the number of accesses to the level above
best_loop_idxs = self.find_best_loop_combination(
mem_ops, all_sizes, node, top_levels
)
for (best_loop_idx, mem_op) in zip(best_loop_idxs, mem_ops):
# Now that we have the combination of loop_idx for each mem_op, add them
# to the allocated loops and remove them from the unallocated loops
loops_to_allocate = self.unallocated[mem_op][:best_loop_idx].copy()
self.allocated[mem_op] += loops_to_allocate
del self.unallocated[mem_op][:best_loop_idx]
# Add the loops to allocate to the level-by-level temporal_mapping_dict
# The key of this dict is the layer_op and not the mem_op
layer_op = self.mem_to_layer_op[mem_op]
self.temporal_mapping_dict[layer_op].append(
[(loop.dimension, loop.size) for loop in loops_to_allocate]
)
# This memory node that stores one or more mem_ops might be
# spatially unrolled, add these spatially unrolled loops to
# the list of allocated loops now, so that the next memory nodes
# correctly see this spatial unrolling.
# For this we require the level of memory we are evaluating for this op.
mem_level_op = self.mem_level[layer_op]
spatial_loops = self.spatial_mapping.get_unrolling(
op=layer_op, level=mem_level_op
)
for (loop_dim, loop_size) in spatial_loops:
spatial_loop = Loop(dimension=loop_dim, size=loop_size, type="spatial")
self.allocated[mem_op].append(spatial_loop)
# Check if this node (i.e. MemoryLevel) is the highest level of memory hierarchy.
# If this is the case and we haven't allocated all loops, raise an exception.
if (
node == top_levels[mem_op] and self.unallocated[mem_op]
): # if top level and unallocated not empty
raise MemoryHierarchyTooSmallException(
f"Highest MemoryLevel for {mem_op} = {node} too small to store all loops."
)
# Increment the mem_level we are currently at for this layer_op by 1
self.mem_level[layer_op] += 1
## Calculate the required memory size to store different
# slices of the unallocated loops, with 'mem_capacity' as an upper bound.
# @param mem_op
# @param mem_capacity Capacity of the memory node in bits.
def calc_size_slices(self, mem_op: str, mem_capacity: int):
# Already allocated loops for this mem_op
allocated_loops = self.allocated[mem_op]
# Unallocated loops for this mem_op
unallocated_loops = self.unallocated[mem_op]
sizes = []
for i in range(
len(unallocated_loops) + 1
): # Go through all slices (includes empty slice)
unallocated_slice = unallocated_loops[
:i
] # Grab a slice of the unallocated loops
loops = (
allocated_loops + unallocated_slice
) # Join them with already allocated loops
size = self.calc_loops_size(loops, mem_op, unallocated_loops)
if size <= mem_capacity:
sizes.append(size)
else:
if i == 0: # This means we can't even store the already allocated loops
raise MemoryTooSmallException(
f"Memory capacity overflow for mem_op {mem_op}. loops={loops} size={size} mem_capacity={mem_capacity}"
)
break # Stop as soon as we have added a loop that overflows the memory
return sizes
## Calculate the 'mem_op' tensor size required for all the loops in 'loops'.
# @param loops: The loops we want to calculate the size for.
# @para mmem_op: The memory operand we are calculating the size for.
# @param all_unallocated_loops: All unallocated loops for this MemoryLevel node. Needed for output precision calculation.
def calc_loops_size(self, loops: List, mem_op: str, all_unallocated_loops: List):
# First we compute the size of all loop dimensions present in this layer given the loops in 'loops'.
all_dimensions = self.layer.loop_dim_list
all_dim_sizes = {dim: 1 for dim in all_dimensions}
for loop in loops:
all_dim_sizes[loop.dimension] *= loop.size
op_dimensions = self.layer.operand_loop_dim[self.mem_to_layer_op[mem_op]]
layer_op = self.mem_to_layer_op[mem_op]
tensor_size = self.layer.calc_tensor_size(layer_op, all_dim_sizes)
# Get the precision at which this tensor will have to be stored in the MemoryLevel node.
# For output it can be either the partial sum precision, or the final sum precision.
# This depends on if all the irrelevant loops were allocated in a previous MemoryLevel.
# Which in turn means all remaining unallocated loops for this MemoryLevel must not contain any ir loops.
if mem_op == "O":
ir_dims = op_dimensions["ir"] # Irrelevant dimensions
unallocated_dims = [
unallocated_loop.dimension for unallocated_loop in all_unallocated_loops
]
# If there is still an irrelevant unallocated loop dimension, pick the full precision
if any([dim in ir_dims for dim in unallocated_dims]):
precision = self.precision["O"]
else:
precision = self.precision["O_final"]
else:
precision = self.precision[mem_op]
tensor_size_bits = tensor_size * precision
return tensor_size_bits
## Find the best combination of loops from different mem_ops.
# Best is defined as the combination that minimizes the number of accesses
# to the memory level above.
# @param mem_ops
# @param all_sizes
# @param node
# @param top_levels
def find_best_loop_combination(self, mem_ops, all_sizes, node, top_levels):
# TODO: Take into account the operand precision which can change based on the loops picked
mem_capacity = node.memory_instance.size
# nb_operations = self.__main_inputs.layer.total_MAC_count
# all_accesses = {mem_op: [nb_operations//size for size in all_sizes[mem_op]] for mem_op in mem_ops}
# If for one of the mem_ops this is the top level memory, we have to enforce that all unallocated loops
# will be allocated for this operand. We do this by changing the sizes for this mem_op to only include
# the last number (which represents the size to store all the ops).
# Because we modify the sizes, we add an offset to the loop_idx for this mem_op.
loop_idx_offsets = {mem_op: 0 for mem_op in mem_ops}
for mem_op, sizes in all_sizes.items():
if node == top_levels[mem_op]:
loop_idx_offsets[mem_op] = (
len(sizes) - 1
) # offset is number of original sizes - 1
all_sizes[mem_op] = [sizes[-1]]
all_accesses = {mem_op: [] for mem_op in mem_ops}
for mem_op in mem_ops:
# The number of accesses to the level above is determined through the reuse we have for the different
# choices of temporal loops. accesses = # total temporal loop iterations / temporal reuse
# The temporal reuse = # allocated loop iterations / operand size
# Thus: accesses = # total iterations / (# allocated iterations / size)
# Thus: accesses = (# total iterations / # allocated iterations) * size
# Thus: accesses = # unallocated iterations * size
for i, size in enumerate(all_sizes[mem_op]):
unallocated_loops = self.unallocated[mem_op][
(i + loop_idx_offsets[mem_op]) :
] # slice of unallocated loops for this operand size
unallocated_iterations = prod(
(unallocated_loop.size for unallocated_loop in unallocated_loops)
)
if node == top_levels[mem_op]:
accesses = 0
else:
accesses = unallocated_iterations * size
all_accesses[mem_op].append(accesses)
all_max_nb_loops = {mem_op: len(all_sizes[mem_op]) for mem_op in mem_ops}
all_max_nb_loops_list = list(all_max_nb_loops.values())
best_loop_idxs = [0 for mem_op in mem_ops]
best_accesses = np.inf
nb_combinations = prod(len(sizes) for sizes in all_sizes.values())
for i in range(nb_combinations):
size_comb = 0
accesses_comb = 0
current_loop_idxs = []
for mem_op_idx, mem_op in enumerate(mem_ops):
this_max_nb_loops = all_max_nb_loops_list[mem_op_idx]
current_loop_idx = (
i // prod(all_max_nb_loops_list[mem_op_idx + 1 :])
) % this_max_nb_loops
current_loop_idxs.append(current_loop_idx + loop_idx_offsets[mem_op])
size_comb += all_sizes[mem_op][current_loop_idx]
accesses_comb += all_accesses[mem_op][current_loop_idx]
if size_comb > mem_capacity:
if i == 0:
raise MemoryTooSmallException(
"The memory can't store all loops assigned to lower level memories. Likely due to spatial unrolling."
)
continue
if accesses_comb <= best_accesses:
best_accesses = accesses_comb
best_loop_idxs = current_loop_idxs
return best_loop_idxs | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/opt/temporal/loma/memory_allocator.py | memory_allocator.py |
from typing import Dict, List
from typing import TYPE_CHECKING
from math import prod
from copy import deepcopy
from zigzag.utils import pickle_deepcopy
if TYPE_CHECKING:
from zigzag.classes.workload.layer_node import LayerNode
## Collect information of each single loop tuple in mapping.
# Applied range: from the lowest architectural level to the current level.
class Loop:
## The class constructor
# @param loop
# @param MAC_op
# @param data_elem
def __init__(self, loop: tuple, MAC_op: int, data_elem: int):
self.loop = loop
self.MAC_op = MAC_op
self.data_elem = data_elem
self.reuse = MAC_op / data_elem
def __str__(self):
return str(self.loop)
def __repr__(self):
return str(self.loop)
## This function decouples the pr loops into data size (r loops) and data reuse (ir loops).
# It also provides a transferred mapping dictionary in which the pr loops are replaced by r and ir loops.
def decouple_pr_loop(mapping_dict: Dict, layer_node: "LayerNode"):
operand_loop_dim = {
op: layer_node.operand_loop_dim[op] for op in mapping_dict.keys()
}
r_ir_operand_loop_LUT = {
op: relevance["r"] + relevance["ir"]
for (op, relevance) in operand_loop_dim.items()
}
pr_operand_loop_LUT = {
op: relevance["pr"]
for (op, relevance) in operand_loop_dim.items()
if relevance["pr"] != {}
}
pr_operand_list = list(pr_operand_loop_LUT.keys())
mapping_dict_reform = pickle_deepcopy(mapping_dict)
# current and below level pr data size
cabl_pr_data_size = {}
# current and below level pr data reuse
cabl_pr_data_reuse = {}
# each single pr loop data size
per_pr_data_size = {}
# each single pr loop data reuse
per_pr_data_reuse = {}
for operand in pr_operand_list:
# initialize current and below level pr loop size
cabl_pr_lp_size = {
pr_data_dim: {
pr_loop_dim: 1
for pr_loop_dim in pr_operand_loop_LUT[operand][pr_data_dim]
}
for pr_data_dim in pr_operand_loop_LUT[operand].keys()
}
# initialize current and below level pr data size
cabl_pr_data_size[operand] = {
pr_data_dim: [[] for _ in range(len(mapping_dict[operand]))]
for pr_data_dim in pr_operand_loop_LUT[operand].keys()
}
# initialize current and below level pr data reuse
cabl_pr_data_reuse[operand] = {
pr_data_dim: [[] for _ in range(len(mapping_dict[operand]))]
for pr_data_dim in pr_operand_loop_LUT[operand].keys()
}
# initialize per pr loop data size
per_pr_data_size[operand] = {
pr_data_dim: [[] for _ in range(len(mapping_dict[operand]))]
for pr_data_dim in pr_operand_loop_LUT[operand].keys()
}
# initialize per pr loop data reuse
per_pr_data_reuse[operand] = {
pr_data_dim: [[] for _ in range(len(mapping_dict[operand]))]
for pr_data_dim in pr_operand_loop_LUT[operand].keys()
}
# update the cabl_pr_lp_size by multiply pr loop size across architectural level
for level, loop_list in enumerate(mapping_dict[operand]):
for loop_type, loop_size in loop_list:
if loop_type in r_ir_operand_loop_LUT[operand]:
continue
for pr_data_dim in pr_operand_loop_LUT[operand].keys():
if any(
lp_type == loop_type
for lp_type in pr_operand_loop_LUT[operand][pr_data_dim]
):
cabl_pr_lp_size[pr_data_dim][loop_type] *= loop_size
# compute pr related data dimension size and data dimension reuse at current and below joint levels
# based on pr_funcs (dynamic functions extracted in LayerNode). Each pr loop is decoupled into r and ir loops.
pr_loop_combined_to_r = layer_node.calc_tensor_dim(
cabl_pr_lp_size[pr_data_dim], pr_data_dim
)
pr_loop_combined_to_ir = (
prod(cabl_pr_lp_size[pr_data_dim].values())
/ pr_loop_combined_to_r
)
cabl_pr_data_size[operand][pr_data_dim][level].append(
pr_loop_combined_to_r
)
cabl_pr_data_reuse[operand][pr_data_dim][level].append(
pr_loop_combined_to_ir
)
# compute pr related data dimension size and data dimension reuse at each level for each pr loop
# based on cabl_pr_data_size/cabl_pr_data_reuse """
for pr_data_dim in cabl_pr_data_size[operand].keys():
data_size_list = cabl_pr_data_size[operand][pr_data_dim]
data_reuse_list = cabl_pr_data_reuse[operand][pr_data_dim]
previous_data_size = 1
previous_data_data_reuse = 1
for level, va_list in enumerate(data_size_list):
for idx in range(len(va_list)):
per_pr_data_size[operand][pr_data_dim][level].append(
data_size_list[level][idx] / previous_data_size
)
per_pr_data_reuse[operand][pr_data_dim][level].append(
data_reuse_list[level][idx] / previous_data_data_reuse
)
previous_data_size = data_size_list[level][idx]
previous_data_data_reuse = data_reuse_list[level][idx]
mapping_dict_reform[operand] = replace_pr_loop_in_mapping(
mapping_dict[operand],
per_pr_data_size[operand],
per_pr_data_reuse[operand],
pr_operand_loop_LUT[operand],
r_ir_operand_loop_LUT[operand],
)
# return mapping_dict_reform, cabl_pr_data_size, cabl_pr_data_reuse, per_pr_data_size, per_pr_data_reuse
return mapping_dict_reform
## This function replaces all pr loops in a mapping of a single operand with r and ir loops.
# @param single_operand_mapping
# @param per_pr_data_size
# @param per_pr_data_reuse
# @param pr_operand_loop_LUT
# @param r_ir_operand_loop_LUT
def replace_pr_loop_in_mapping(
single_operand_mapping: Dict,
per_pr_data_size: Dict,
per_pr_data_reuse: Dict,
pr_operand_loop_LUT: Dict,
r_ir_operand_loop_LUT: List,
):
mapping_new = pickle_deepcopy(single_operand_mapping)
for level, loop_list in enumerate(single_operand_mapping):
# Introduce the current level pr loop index to distinguish different pr loops at the same architectural level
cl_pr_lp_idx_local = {
pr_data_dim: 0 for pr_data_dim in pr_operand_loop_LUT.keys()
}
cl_pr_lp_idx_global = 0
for idx, (loop_type, loop_size) in enumerate(loop_list):
if loop_type in r_ir_operand_loop_LUT:
continue
for pr_data_dim in pr_operand_loop_LUT.keys():
if any(
lp_type == loop_type for lp_type in pr_operand_loop_LUT[pr_data_dim]
):
# replace the pr loop in the mapping by r loop
pr_idx_local = cl_pr_lp_idx_local[pr_data_dim]
pr_idx_global = cl_pr_lp_idx_global
mapping_new[level][idx + pr_idx_global] = (
pr_data_dim + "_r",
per_pr_data_size[pr_data_dim][level][pr_idx_local],
)
# insert ir loop after the r loop
# NOTE: Here we insert the ir loop after/above the r loop, which indicates that we ignore the input FIFO effect
# during current level feeds data to below level. We could also insert the ir loop before/below the r loop,
# which leads to more energy-efficient mapping if the innermost ir loop merging down is enabled.
mapping_new[level].insert(
idx + pr_idx_global + 1,
(
pr_data_dim + "_ir",
per_pr_data_reuse[pr_data_dim][level][pr_idx_local],
),
)
# update the pr loop index
cl_pr_lp_idx_local[pr_data_dim] += 1
cl_pr_lp_idx_global += 1
return mapping_new
# This function generates detailed information for each single loop item for each operand.
def calc_data_size_MAC_count_per_loop(
mapping_dict_reform: Dict, operand_loop_dim_reform: Dict
):
detailed_mapping_dict = deepcopy(mapping_dict_reform)
for operand, mapping_list in mapping_dict_reform.items():
MAC_count = 1
data_elem = 1
for level, loop_list in enumerate(mapping_dict_reform[operand]):
for idx, (loop_type, loop_size) in enumerate(loop_list):
MAC_count *= loop_size
if loop_type in operand_loop_dim_reform[operand]["r"]:
data_elem *= loop_size
detailed_mapping_dict[operand][level][idx] = Loop(
(loop_type, loop_size), round(MAC_count), round(data_elem)
)
return detailed_mapping_dict | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/mapping/mapping_assist_funcs.py | mapping_assist_funcs.py |
from typing import Dict
from math import prod
from zigzag.classes.workload.layer_node import LayerNode
from zigzag.classes.mapping.spatial.spatial_mapping import SpatialMapping
from zigzag.classes.mapping.temporal.temporal_mapping import TemporalMapping
from zigzag.classes.hardware.architecture.accelerator import Accelerator
import zigzag.classes.mapping.mapping_assist_funcs as mapping_assist_funcs
## The standard four-way data moving attribute of a memory interface.
class FourWayDataMoving:
## The class constructor
# @param rd_out_to_low
# @param wr_in_by_low
# @param rd_out_to_high
# @param wr_in_by_high
def __init__(self, rd_out_to_low, wr_in_by_low, rd_out_to_high, wr_in_by_high):
self.rd_out_to_low = rd_out_to_low
self.wr_in_by_low = wr_in_by_low
self.rd_out_to_high = rd_out_to_high
self.wr_in_by_high = wr_in_by_high
""" format used in the original ZigZag version """
self.info_list = [
(self.rd_out_to_low, self.wr_in_by_low),
(self.rd_out_to_high, self.wr_in_by_high),
]
def update_single_dir_data(self, dir: str, new_value):
setattr(self, dir, new_value)
self.info_list = [
(self.rd_out_to_low, self.wr_in_by_low),
(self.rd_out_to_high, self.wr_in_by_high),
]
## Return the total amount of times this memory interface is read from to the level above.
# If scaling is the energy cost per read, this returns the total read energy.
def get_total_read_outs_to_above(self, scaling: float = 1):
return scaling * self.rd_out_to_high
## Return the total amount of times this memory interface is read from to the level below.
# If scaling is the energy cost per read, this returns the total read energy.
def get_total_read_outs_to_below(self, scaling: float = 1):
return scaling * self.rd_out_to_low
## Return the total amount of times this memory interface is written to from the level above.
# If scaling is the energy cost per write, this returns the total read energy.
def get_total_write_ins_from_above(self, scaling: float = 1):
return scaling * self.wr_in_by_high
## Return the total amount of times this memory interface is written to from the level below.
# If scaling is the energy cost per write, this returns the total read energy.
def get_total_write_ins_from_below(self, scaling: float = 1):
return scaling * self.wr_in_by_low
def __add__(self, other):
return FourWayDataMoving(
self.rd_out_to_low + other.rd_out_to_low,
self.wr_in_by_low + other.wr_in_by_low,
self.rd_out_to_high + other.rd_out_to_high,
self.wr_in_by_high + other.wr_in_by_high,
)
def __mul__(self, other):
return FourWayDataMoving(
self.rd_out_to_low * other,
self.wr_in_by_low * other,
self.rd_out_to_high * other,
self.wr_in_by_high * other,
)
def __iter__(self):
for e in ["rd_out_to_low", "wr_in_by_low", "rd_out_to_high", "wr_in_by_high"]:
yield e
def __repr__(self):
return f"4waydatamoving (rd /\\: {self.rd_out_to_high}, wr V: {self.wr_in_by_high}, rd V: {self.rd_out_to_low}, wr /\\: {self.wr_in_by_low})"
def __jsonrepr__(self):
return repr(self)
def __getitem__(self, item):
if hasattr(self, item):
return getattr(self, item)
else:
raise KeyError()
## Collect the memory access pattern for each unit memory (memory that only hold one operand at one level).
class DataMovePattern:
## The class construcotr
# @param operand
# @param mem_level
def __init__(self, operand, mem_level):
self.name = operand + str(mem_level)
self.data_elem_move_count = FourWayDataMoving(0, 0, 0, 0)
self.data_precision = FourWayDataMoving(0, 0, 0, 0)
self.req_mem_bw_aver = FourWayDataMoving(0, 0, 0, 0)
self.req_mem_bw_inst = FourWayDataMoving(0, 0, 0, 0)
self.data_trans_period = FourWayDataMoving(0, 0, 0, 0)
self.data_trans_period_count = FourWayDataMoving(0, 0, 0, 0)
self.data_trans_amount_per_period = FourWayDataMoving(0, 0, 0, 0)
self.inst_data_trans_window = FourWayDataMoving(0, 0, 0, 0)
# For every memory, there are 4 data transfer link in the hierarchy:
# rd_out_to_low, wr_in_by_low, rd_out_to_high, wr_in_by_high
def set_data_elem_move_count(
self, rd_out_to_low, wr_in_by_low, rd_out_to_high, wr_in_by_high
):
self.data_elem_move_count = FourWayDataMoving(
rd_out_to_low, wr_in_by_low, rd_out_to_high, wr_in_by_high
)
def set_data_precision(
self, rd_out_to_low, wr_in_by_low, rd_out_to_high, wr_in_by_high
):
self.data_precision = FourWayDataMoving(
rd_out_to_low, wr_in_by_low, rd_out_to_high, wr_in_by_high
)
def set_req_mem_bw_aver(
self, rd_out_to_low, wr_in_by_low, rd_out_to_high, wr_in_by_high
):
self.req_mem_bw_aver = FourWayDataMoving(
rd_out_to_low, wr_in_by_low, rd_out_to_high, wr_in_by_high
)
def set_req_mem_bw_inst(
self, rd_out_to_low, wr_in_by_low, rd_out_to_high, wr_in_by_high
):
self.req_mem_bw_inst = FourWayDataMoving(
rd_out_to_low, wr_in_by_low, rd_out_to_high, wr_in_by_high
)
def set_data_trans_period(
self, rd_out_to_low, wr_in_by_low, rd_out_to_high, wr_in_by_high
):
# data_trans_period: every how many cycle, the memory link need to be activated for a certain duration
self.data_trans_period = FourWayDataMoving(
rd_out_to_low, wr_in_by_low, rd_out_to_high, wr_in_by_high
)
def set_data_trans_period_count(
self, rd_out_to_low, wr_in_by_low, rd_out_to_high, wr_in_by_high
):
# data_trans_period_count: to finish all the for-loop computation, how many such ideal_period is required
self.data_trans_period_count = FourWayDataMoving(
rd_out_to_low, wr_in_by_low, rd_out_to_high, wr_in_by_high
)
def set_data_trans_amount_per_period(
self, rd_out_to_low, wr_in_by_low, rd_out_to_high, wr_in_by_high
):
# data_trans_amount_per_period: data amount that being transferred for each single period
self.data_trans_amount_per_period = FourWayDataMoving(
rd_out_to_low, wr_in_by_low, rd_out_to_high, wr_in_by_high
)
def set_inst_data_trans_window(
self, rd_out_to_low, wr_in_by_low, rd_out_to_high, wr_in_by_high
):
# inst_data_trans_window: the allowed memory updating window, assuming the served memory level
# is non-double buffered (thus need to avoid the data overwriting issue
self.inst_data_trans_window = FourWayDataMoving(
rd_out_to_low, wr_in_by_low, rd_out_to_high, wr_in_by_high
)
## update a single direction value for all data move attributes
def update_single_dir_data(self, direction, new_value):
self.data_elem_move_count.update_single_dir_data(direction, new_value)
self.data_precision.update_single_dir_data(direction, new_value)
self.req_mem_bw_aver.update_single_dir_data(direction, new_value)
self.req_mem_bw_inst.update_single_dir_data(direction, new_value)
self.data_trans_period.update_single_dir_data(direction, new_value)
self.data_trans_period_count.update_single_dir_data(direction, new_value)
self.data_trans_amount_per_period.update_single_dir_data(direction, new_value)
self.inst_data_trans_window.update_single_dir_data(direction, new_value)
def __str__(self):
return self.name
def __repr__(self):
return str(self)
## Collect information of a complete mapping (spatial and temporal)
#
# NOTE: Mapping is HW-unaware, i.e. Mapping doesn't take in HW information
# like memory bw, access cost, size and so on.
class Mapping:
## The class construcotr
# @param accelerator
# @param spatial mapping
# @param temporal mapping
# @param layer_node
# @param access_same_data_considered_as_no_access
def __init__(
self,
accelerator: Accelerator,
spatial_mapping: Dict or SpatialMapping,
temporal_mapping: Dict or TemporalMapping,
layer_node: LayerNode,
access_same_data_considered_as_no_access: bool = False,
):
# Mapping object can be initialized with separate spatial and temporal mappings
self.accelerator = accelerator
if type(spatial_mapping) is SpatialMapping:
self.spatial_mapping = spatial_mapping
else:
self.spatial_mapping = SpatialMapping(spatial_mapping, layer_node)
if type(temporal_mapping) is TemporalMapping:
self.temporal_mapping = temporal_mapping
else:
self.temporal_mapping = TemporalMapping(temporal_mapping, layer_node)
self.layer_node = layer_node
self.operand_list = layer_node.operand_list
self.access_same_data_considered_as_no_access = (
access_same_data_considered_as_no_access
)
self.mem_level = self.temporal_mapping.mem_level
# Initialize unit_mem_data_movement, which collects all the important data movement info
# related to each unit memory, such as data access count, data precision, required memory BW to
# prevent stall, data transfer rate, etc.
self.unit_mem_data_movement = {
op: [[] for _ in range(self.mem_level[op])] for op in self.operand_list
}
# Combine spatial and temporal mapping dictionary into "joint_mapping_dict" in order to
# enable decoupling pr loops into r and ir loops in one go
self.combine_spatial_temporal_mapping_dict()
# Decouple pr loops into r and ir loops, preparing for later mapping info extraction
self.combined_mapping_dict_1s1t_reform = mapping_assist_funcs.decouple_pr_loop(
self.combined_mapping_dict_1s1t, layer_node
)
self.combined_mapping_dict_1s2t_reform = mapping_assist_funcs.decouple_pr_loop(
self.combined_mapping_dict_1s2t, layer_node
)
# Distinguish final output from partial output: "psum_flag"
self.distinguish_output()
# Generate a dictionary that collect data precision for each operand at each arch level
self.gen_data_precision_dict()
# Generate r/ir loop size list at each level for each operand
self.gen_r_ir_loop_list()
# Calculate data size at each memory level, including total data size and
# unrolled data size (data at each unrolled memory unit). Unit used: # element
self.calc_data_size()
# Calculate the effective data size at each unrolled memory unit.
# Effective data size: the unrolled data size divided by all top r loops at that level.
# Unit used: # element
self.calc_effective_data_size()
# Calculate data access count at each memory level. Unit used: # element
# NOTE: this data access is not memory word access!
self.calc_data_access()
# Calculate required memory bandwidth and the periodic data transfer pattern
self.calc_req_mem_bw_and_data_transfer_rate()
# Ignore the data traffic between the top level memory and the external world
self.disable_data_traffic_external()
## Combine spatial and temporal mapping dictionary into combined_mapping_dict by
# inserting spatial loops above temporal loops at each level.
#
# - combined_mapping_dict_1s1t: corresponding level's smap and tmap are merged together.
# Each level's data size is the total data size.
# - combined_mapping_dict_1s2t: each level's smap is merged to level+1's tmap.
# Each level's data size is the unrolled data size.
def combine_spatial_temporal_mapping_dict(self):
# Initialization
combined_mapping_dict_1s1t = {
op: [[] for _ in range(self.spatial_mapping.arch_level[op])]
for op in self.operand_list
}
combined_mapping_dict_1s2t = {
op: [[] for _ in range(self.spatial_mapping.arch_level[op] + 1)]
for op in self.operand_list
}
su_dict_seed = self.spatial_mapping.mapping_dict_origin
# Add an empty innermost level and an empty outermost level
tm_dict_seed = {
op: [[]] + tm_list + [[]]
for op, tm_list in self.temporal_mapping.mapping_dic_stationary.items()
}
# Combining
for operand in self.operand_list:
for level, current_level_su_loops in enumerate(su_dict_seed[operand]):
current_level_tm_loops = tm_dict_seed[operand][level]
above_level_tm_loops = tm_dict_seed[operand][level + 1]
combined_mapping_dict_1s1t[operand][level] = (
current_level_tm_loops + current_level_su_loops
)
combined_mapping_dict_1s2t[operand][level + 1] = (
above_level_tm_loops + current_level_su_loops
)
self.combined_mapping_dict_1s1t = combined_mapping_dict_1s1t
self.combined_mapping_dict_1s2t = combined_mapping_dict_1s2t
## This function generates an list "psum_flag" that identify whether an output memory
# level holds partial or final output.
# E.g., psum_flag = [True, True, False] means that there are 3 memory levels for output and only the outermost
# memory level hold the final output, the 1st and 2nd memory levels need to store partial output for some time.
# For indexing convenience, we add an extra False to the end of the psum_flag list.
def distinguish_output(self):
output_operand = self.layer_node.output_operand
output_loop_dim_relevancy = self.layer_node.operand_loop_dim_reform[
output_operand
]
# output_ir_flag indicate whether at an architectural level, there is output's ir loop in it.
# True for yes, there is.
output_arch_level = self.spatial_mapping.arch_level[output_operand]
output_ir_flag = [False] * output_arch_level
for level, current_level_loops in enumerate(
self.combined_mapping_dict_1s1t_reform[output_operand]
):
for loop_type, loop_dim in current_level_loops:
if loop_type in output_loop_dim_relevancy["ir"] and loop_dim > 1:
output_ir_flag[level] = True
break
# reversely check from current level to the top level whether there is ir loop shows up in the middle,
# False means final output is present at current level
psum_flag_H2L = [
any(output_ir_flag[lv:output_arch_level])
for lv in reversed(range(output_arch_level))
]
psum_flag_L2H = list(reversed(psum_flag_H2L))
self.psum_flag = psum_flag_L2H[1:] + [
False
] # add an extra False on top for later indexing convenience
## This function generates a dictionary that collect data precision for each operand at each arch level
def gen_data_precision_dict(self):
input_operands = self.layer_node.input_operands
output_operand = self.layer_node.output_operand
data_precision_dict = {
op: [self.layer_node.operand_precision[op]] * (self.mem_level[op] + 1)
for op in input_operands
}
data_precision_dict[output_operand] = []
for i in range(self.mem_level[output_operand] + 1):
if self.psum_flag[i]:
data_precision_dict[output_operand].append(
self.layer_node.operand_precision[output_operand]
)
else:
data_precision_dict[output_operand].append(
self.layer_node.operand_precision[output_operand + "_final"]
)
self.data_precision_dict = data_precision_dict
## Given the combined mapping, generate r/ir loop size list at each level for each operand
def gen_r_ir_loop_list(self):
combined_mapping = self.combined_mapping_dict_1s1t_reform
combined_mapping2 = self.combined_mapping_dict_1s2t_reform
relevancy_table = self.layer_node.operand_loop_dim_reform
r_loop_size_per_level = {
op: [
prod(
[
lp_dim
for lp_type, lp_dim in combined_mapping[op][lv]
if lp_type in relevancy_table[op]["r"]
]
)
for lv in range(self.spatial_mapping.arch_level[op])
]
for op in self.operand_list
}
r_loop_size_per_level2 = {
op: [
prod(
[
lp_dim
for lp_type, lp_dim in combined_mapping2[op][lv]
if lp_type in relevancy_table[op]["r"]
]
)
for lv in range(self.spatial_mapping.arch_level[op])
]
for op in self.operand_list
}
ir_loop_size_per_level = {
op: [
prod(
[
lp_dim
for lp_type, lp_dim in combined_mapping[op][lv]
if lp_type in relevancy_table[op]["ir"]
]
)
for lv in range(self.spatial_mapping.arch_level[op])
]
for op in self.operand_list
}
ir_loop_size_per_level2 = {
op: [
prod(
[
lp_dim
for lp_type, lp_dim in combined_mapping2[op][lv]
if lp_type in relevancy_table[op]["ir"]
]
)
for lv in range(self.spatial_mapping.arch_level[op])
]
for op in self.operand_list
}
# current and below levels (cabl) r loop size
r_loop_size_cabl = {
op: [
round(prod(r_loop_size_per_level[op][0 : lv + 1]))
for lv in range(self.spatial_mapping.arch_level[op])
]
for op in self.operand_list
}
r_loop_size_cabl2 = {
op: [
round(prod(r_loop_size_per_level2[op][0 : lv + 1]))
for lv in range(self.spatial_mapping.arch_level[op])
]
for op in self.operand_list
}
# current and below levels (cabl) ir loop size
ir_loop_size_cabl = {
op: [
prod(ir_loop_size_per_level[op][0 : lv + 1])
for lv in range(self.spatial_mapping.arch_level[op])
]
for op in self.operand_list
}
# current and below levels (cabl) ir loop size
ir_loop_size_cabl2 = {
op: [
prod(ir_loop_size_per_level2[op][0 : lv + 1])
for lv in range(self.spatial_mapping.arch_level[op])
]
for op in self.operand_list
}
# current and above levels (caal) ir loop size, only for output operand for calculating psum backflow access count
output_operand = self.layer_node.output_operand
O_ir_loop_size_caal = [
prod(
ir_loop_size_per_level[output_operand][
lv : self.spatial_mapping.arch_level[output_operand]
]
)
for lv in range(self.spatial_mapping.arch_level[output_operand])
]
# append two extra 1 to the list to facilitate the psum bcakflow access calculation later
# We can see it as adding two output memory levels on top with no data reuse.
O_ir_loop_size_caal.extend([1, 1])
self.r_loop_size_per_level = r_loop_size_per_level
self.r_loop_size_per_level2 = r_loop_size_per_level2
self.ir_loop_size_per_level = ir_loop_size_per_level
self.r_loop_size_cabl = r_loop_size_cabl
self.r_loop_size_cabl2 = r_loop_size_cabl2
self.ir_loop_size_cabl = ir_loop_size_cabl
self.ir_loop_size_cabl2 = ir_loop_size_cabl2
self.O_ir_loop_size_caal = O_ir_loop_size_caal
## Based on the r loop size list, calculate the data size held by each architectural level.
def calc_data_size(self):
# data_elem_per_level_unrolled: data size held inside of each unrolled unit at each architectural level
# data_elem_per_level: total data size at each architectural level (= data_elem_per_level_unrolled * unique unit count)
data_elem_per_level_unrolled = {
op: [
round(self.r_loop_size_cabl2[op][lv])
for lv in range(self.spatial_mapping.arch_level[op])
]
for op in self.operand_list
}
data_bit_per_level_unrolled = {
op: [
round(self.r_loop_size_cabl2[op][lv]) * self.data_precision_dict[op][lv]
for lv in range(self.spatial_mapping.arch_level[op])
]
for op in self.operand_list
}
data_elem_per_level = {
op: [
round(data_elem_unrolled * self.spatial_mapping.unit_unique[op][lv])
for lv, data_elem_unrolled in enumerate(
data_elem_per_level_unrolled[op]
)
]
for op in self.operand_list
}
data_bit_per_level = {
op: [
round(data_elem_unrolled * self.spatial_mapping.unit_unique[op][lv])
* self.data_precision_dict[op][lv]
for lv, data_elem_unrolled in enumerate(
data_elem_per_level_unrolled[op]
)
]
for op in self.operand_list
}
self.data_elem_per_level_unrolled = data_elem_per_level_unrolled
self.data_bit_per_level_unrolled = data_bit_per_level_unrolled
self.data_elem_per_level = data_elem_per_level
self.data_bit_per_level = data_bit_per_level
## Calculate the effective data size for getting the allowed memory updating window in latency calculation.
# The effective data size is calculated by using data_elem_per_level_unrolled divided by the top r loops.
def calc_effective_data_size(self):
effective_data_elem = {
op: [
data_elem_unrolled // self.temporal_mapping.top_r_loop_size[op][lv]
for lv, data_elem_unrolled in enumerate(
self.data_elem_per_level_unrolled[op]
)
]
for op in self.operand_list
}
effective_data_bit = {
op: [
data_bit_unrolled // self.temporal_mapping.top_r_loop_size[op][lv]
for lv, data_bit_unrolled in enumerate(
self.data_bit_per_level_unrolled[op]
)
]
for op in self.operand_list
}
self.effective_data_elem = effective_data_elem
self.effective_data_bit = effective_data_bit
## Based on the ir loop size list and the total MAC Op count, calculate the data access
# at each memory level in a bottom-up way.
def calc_data_access(self):
total_MAC_count = self.layer_node.total_MAC_count
# data_access_raw doesn't distinguish read and write, doesn't distinguish input operands from output operand
# data_access_raw: each memory levels' spatial loops and temporal loops are put together (combined_mapping_dict_1s1t)
# data_access_raw2: each memory levels' spatial loops are put to memory level + 1s' temporal loops location (combined_mapping_dict_1s2t)
data_access_raw = {
op: [
round(total_MAC_count / self.ir_loop_size_cabl[op][lv])
for lv in range(self.spatial_mapping.arch_level[op])
]
for op in self.operand_list
}
data_access_raw2 = {
op: [
round(total_MAC_count / self.ir_loop_size_cabl2[op][lv])
for lv in range(self.spatial_mapping.arch_level[op])
]
for op in self.operand_list
}
self.data_access_raw = data_access_raw
self.data_access_raw2 = data_access_raw2
# Distinguish read and write, unify input operands and output operand
# For input operands
for operand in self.layer_node.input_operands:
for mem_level in range(self.mem_level[operand]):
unit_mem_data_movement = DataMovePattern(operand, mem_level)
# data access count
if (
self.access_same_data_considered_as_no_access
and mem_level == 0
and self.accelerator.get_core(
self.layer_node.get_core_allocation()
).mem_r_bw_dict[self.layer_node.memory_operand_links[operand]][
mem_level
]
>= self.data_bit_per_level[operand][mem_level]
// self.spatial_mapping.unit_unique[operand][mem_level + 1]
):
# If we need access the same input data multiple times from the innermost memory level and the data size is smaller than the memory read bw,
# take into account only one-time access cost (assume the data can stay at the output pins of the memory as long as it is needed).
rd_out_to_low = (
data_access_raw[operand][mem_level]
// self.temporal_mapping.MAC_level_data_stationary_cycle[
operand
]
)
else:
rd_out_to_low = data_access_raw[operand][mem_level]
wr_in_by_low = 0
rd_out_to_high = 0
wr_in_by_high = data_access_raw2[operand][mem_level + 1]
unit_mem_data_movement.set_data_elem_move_count(
rd_out_to_low, wr_in_by_low, rd_out_to_high, wr_in_by_high
)
# data precision
rd_out_to_low_pre = self.layer_node.operand_precision[operand]
wr_in_by_low_pre = 0
rd_out_to_high_pre = 0
wr_in_by_high_pre = self.layer_node.operand_precision[operand]
unit_mem_data_movement.set_data_precision(
rd_out_to_low_pre,
wr_in_by_low_pre,
rd_out_to_high_pre,
wr_in_by_high_pre,
)
self.unit_mem_data_movement[operand][mem_level] = unit_mem_data_movement
# For output operand
output_operand = self.layer_node.output_operand
for mem_level in range(self.mem_level[output_operand]):
unit_mem_data_movement = DataMovePattern(output_operand, mem_level)
# Note that the index for data_access_raw is arch_level, which is one level more than mem_level.
# the first arch_level means the operational array level (e.g. MAC array level);
# the first mem_level means the innermost memory level (e.g. register file level.
# data access count
wr_in_by_low = data_access_raw[output_operand][mem_level]
rd_out_to_low = self.layer_node.operand_size_elem[output_operand] * (
self.O_ir_loop_size_caal[mem_level + 1] - 1
)
rd_out_to_high = data_access_raw2[output_operand][mem_level + 1]
wr_in_by_high = self.layer_node.operand_size_elem[output_operand] * (
self.O_ir_loop_size_caal[mem_level + 2] - 1
)
unit_mem_data_movement.set_data_elem_move_count(
rd_out_to_low, wr_in_by_low, rd_out_to_high, wr_in_by_high
)
# data precision
if rd_out_to_low != 0:
# partial output data precision
wr_in_by_low_pre = self.layer_node.operand_precision[output_operand]
rd_out_to_low_pre = self.layer_node.operand_precision[output_operand]
else:
# final output data precision
wr_in_by_low_pre = self.layer_node.operand_precision[
output_operand + "_final"
]
rd_out_to_low_pre = 0
if wr_in_by_high != 0:
# partial output data precision
wr_in_by_high_pre = self.layer_node.operand_precision[output_operand]
rd_out_to_high_pre = self.layer_node.operand_precision[output_operand]
else:
# final output data precision
wr_in_by_high_pre = 0
rd_out_to_high_pre = self.layer_node.operand_precision[
output_operand + "_final"
]
unit_mem_data_movement.set_data_precision(
rd_out_to_low_pre,
wr_in_by_low_pre,
rd_out_to_high_pre,
wr_in_by_high_pre,
)
self.unit_mem_data_movement[output_operand][
mem_level
] = unit_mem_data_movement
## This function calculates the average & instant required memory bw and the periodic data transfer pattern.
def calc_req_mem_bw_and_data_transfer_rate(self):
if self.access_same_data_considered_as_no_access:
# For input operands, add operational array level's 'MAC_level_data_stationary_cycle' cycle in the below to align with the list length of data_each_level
cycle_each_level = {
op: [self.temporal_mapping.MAC_level_data_stationary_cycle[op]]
+ self.temporal_mapping.cycle_cabl_level[op]
for op in self.layer_node.input_operands
}
# For output operands, add operational array level's 1 cycle in the below to align with the list length of data_each_level
cycle_each_level[self.layer_node.output_operand] = [
1
] + self.temporal_mapping.cycle_cabl_level[self.layer_node.output_operand]
else:
cycle_each_level = {
op: [1] + self.temporal_mapping.cycle_cabl_level[op]
for op in self.operand_list
}
data_each_level_unrolled = self.data_elem_per_level_unrolled
# Add the mem BW boost factor 1 on the top (the memory BW boost factor from outside to top memory)
# to align with the list length of data_each_level
mem_bw_boost_factor = {
op: self.spatial_mapping.mem_bw_boost[op] + [1] for op in self.operand_list
}
# req_mem_bw_raw doesn't distinguish read and write, doesn't distinguish input operands from output operand
# "_L/_H" indicates for each data transfer link (DTL), the lower/higher memory level's required BW,
# e.g. for the DTL of Global Buffer (Weight) talking to spatially unrolled Weight Reg File,
# each Weight Reg File's required write BW is indicated by "_L",
# while Global Buffer (Weight)'s required read BW is indicate by "_H"
req_mem_bw_L_raw = {
op: [
data_each_level_unrolled[op][lv] / cycle_each_level[op][lv]
for lv in range(self.spatial_mapping.arch_level[op])
]
for op in self.operand_list
}
req_mem_bw_H_raw = {
op: [
req_mem_bw_L_raw[op][lv] * mem_bw_boost_factor[op][lv]
for lv in range(self.spatial_mapping.arch_level[op])
]
for op in self.operand_list
}
# Calculates the average required memory bw.
# Distinguish read and write, unify input operands and output operand
# For input operands
for operand in self.layer_node.input_operands:
for mem_level in range(self.mem_level[operand]):
# average required memory BW
rd_out_to_low_bw = req_mem_bw_H_raw[operand][mem_level]
wr_in_by_low_bw = 0
rd_out_to_high_bw = 0
wr_in_by_high_bw = req_mem_bw_L_raw[operand][mem_level + 1]
self.unit_mem_data_movement[operand][mem_level].set_req_mem_bw_aver(
rd_out_to_low_bw,
wr_in_by_low_bw,
rd_out_to_high_bw,
wr_in_by_high_bw,
)
# data transfer period
rd_out_to_low_pd = cycle_each_level[operand][mem_level]
wr_in_by_low_pd = 0
rd_out_to_high_pd = 0
wr_in_by_high_pd = cycle_each_level[operand][mem_level + 1]
self.unit_mem_data_movement[operand][mem_level].set_data_trans_period(
rd_out_to_low_pd,
wr_in_by_low_pd,
rd_out_to_high_pd,
wr_in_by_high_pd,
)
# data transfer period count
rd_out_to_low_pc = (
self.temporal_mapping.total_cycle
// cycle_each_level[operand][mem_level]
)
wr_in_by_low_pc = 0
rd_out_to_high_pc = 0
wr_in_by_high_pc = (
self.temporal_mapping.total_cycle
// cycle_each_level[operand][mem_level + 1]
)
self.unit_mem_data_movement[operand][
mem_level
].set_data_trans_period_count(
rd_out_to_low_pc,
wr_in_by_low_pc,
rd_out_to_high_pc,
wr_in_by_high_pc,
)
# per-period data transfer amount
rd_out_to_low_da = (
data_each_level_unrolled[operand][mem_level]
* mem_bw_boost_factor[operand][mem_level]
)
wr_in_by_low_da = 0
rd_out_to_high_da = 0
wr_in_by_high_da = data_each_level_unrolled[operand][mem_level + 1]
self.unit_mem_data_movement[operand][
mem_level
].set_data_trans_amount_per_period(
rd_out_to_low_da,
wr_in_by_low_da,
rd_out_to_high_da,
wr_in_by_high_da,
)
# For output operand
output_operand = self.layer_node.output_operand
for mem_level in range(self.mem_level[output_operand]):
wr_in_by_low_bw = req_mem_bw_H_raw[output_operand][mem_level]
rd_out_to_high_bw = req_mem_bw_L_raw[output_operand][mem_level + 1]
wr_in_by_low_pd = cycle_each_level[output_operand][mem_level]
rd_out_to_high_pd = cycle_each_level[output_operand][mem_level + 1]
wr_in_by_low_pc = (
self.temporal_mapping.total_cycle
// cycle_each_level[output_operand][mem_level]
)
rd_out_to_high_pc = (
self.temporal_mapping.total_cycle
// cycle_each_level[output_operand][mem_level + 1]
)
wr_in_by_low_da = (
data_each_level_unrolled[output_operand][mem_level]
* mem_bw_boost_factor[output_operand][mem_level]
)
rd_out_to_high_da = data_each_level_unrolled[output_operand][mem_level + 1]
if self.psum_flag[mem_level]:
rd_out_to_low_bw = wr_in_by_low_bw
rd_out_to_low_pd = wr_in_by_low_pd
rd_out_to_low_pc = wr_in_by_low_pc
rd_out_to_low_da = wr_in_by_low_da
else:
rd_out_to_low_bw = 0
rd_out_to_low_pd = 0
rd_out_to_low_pc = 0
rd_out_to_low_da = 0
if self.psum_flag[mem_level + 1]:
wr_in_by_high_bw = rd_out_to_high_bw
wr_in_by_high_pd = rd_out_to_high_pd
wr_in_by_high_pc = rd_out_to_high_pc
wr_in_by_high_da = rd_out_to_high_da
else:
wr_in_by_high_bw = 0
wr_in_by_high_pd = 0
wr_in_by_high_pc = 0
wr_in_by_high_da = 0
# average required memory BW
self.unit_mem_data_movement[output_operand][mem_level].set_req_mem_bw_aver(
rd_out_to_low_bw, wr_in_by_low_bw, rd_out_to_high_bw, wr_in_by_high_bw
)
# data transfer period
self.unit_mem_data_movement[output_operand][
mem_level
].set_data_trans_period(
rd_out_to_low_pd, wr_in_by_low_pd, rd_out_to_high_pd, wr_in_by_high_pd
)
# data transfer period count
self.unit_mem_data_movement[output_operand][
mem_level
].set_data_trans_period_count(
rd_out_to_low_pc, wr_in_by_low_pc, rd_out_to_high_pc, wr_in_by_high_pc
)
# per-period data transfer amount
self.unit_mem_data_movement[output_operand][
mem_level
].set_data_trans_amount_per_period(
rd_out_to_low_da, wr_in_by_low_da, rd_out_to_high_da, wr_in_by_high_da
)
# Calculate the instant memory updating behavior.
top_ir_loop_size = self.temporal_mapping.top_ir_loop_size
for operand in self.operand_list:
for level, data_movement_item in enumerate(
self.unit_mem_data_movement[operand]
):
req_mem_bw_aver = data_movement_item.req_mem_bw_aver
# calculate "instant required memory BW" based on "average required memory BW"
rd_out_to_low_bw = (
req_mem_bw_aver.rd_out_to_low * top_ir_loop_size[operand][level]
)
wr_in_by_low_bw = (
req_mem_bw_aver.wr_in_by_low * top_ir_loop_size[operand][level]
)
rd_out_to_high_bw = (
req_mem_bw_aver.rd_out_to_high
* top_ir_loop_size[operand][level + 1]
)
wr_in_by_high_bw = (
req_mem_bw_aver.wr_in_by_high * top_ir_loop_size[operand][level + 1]
)
data_movement_item.set_req_mem_bw_inst(
rd_out_to_low_bw,
wr_in_by_low_bw,
rd_out_to_high_bw,
wr_in_by_high_bw,
)
data_trans_period = data_movement_item.data_trans_period
# calculate "instant data transferring window", assuming non-double buffered memory
rd_out_to_low_wd = (
data_trans_period.rd_out_to_low // top_ir_loop_size[operand][level]
)
wr_in_by_low_wd = (
data_trans_period.wr_in_by_low // top_ir_loop_size[operand][level]
)
rd_out_to_high_wd = (
data_trans_period.rd_out_to_high
// top_ir_loop_size[operand][level + 1]
)
wr_in_by_high_wd = (
data_trans_period.wr_in_by_high
// top_ir_loop_size[operand][level + 1]
)
data_movement_item.set_inst_data_trans_window(
rd_out_to_low_wd,
wr_in_by_low_wd,
rd_out_to_high_wd,
wr_in_by_high_wd,
)
## This function set all the data traffic between the top level memory and the external world to 0
# in unit_mem_data_movement.
def disable_data_traffic_external(self):
for operand in self.operand_list:
mem_level = self.mem_level[operand] - 1
self.unit_mem_data_movement[operand][mem_level].update_single_dir_data(
"rd_out_to_high", 0
)
self.unit_mem_data_movement[operand][mem_level].update_single_dir_data(
"wr_in_by_high", 0
) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/mapping/combined_mapping.py | combined_mapping.py |
from typing import Dict
from math import prod
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from zigzag.classes.workload.layer_node import LayerNode
import zigzag.classes.mapping.mapping_assist_funcs as mapping_assist_funcs
## Class that collect all the info related to spatial mapping.
class SpatialMapping:
## The class constructor
# @param spatial_mapping_dict
# @param layer_node
def __init__(self, spatial_mapping_dict: Dict, layer_node: "LayerNode"):
self.mapping_dict_origin = spatial_mapping_dict
self.mapping_dict_reform = mapping_assist_funcs.decouple_pr_loop(
spatial_mapping_dict, layer_node
)
self.layer_node = layer_node
self.operand_list = layer_node.operand_list
# Extract architecture level count for each operand from spatial mapping definition, starting from MAC level
self.arch_level = {op: len(smap) for (op, smap) in spatial_mapping_dict.items()}
# Calculate unrolled loop size for different loop types (r/ir/total)
self.calc_unroll_size()
# Calculate total/unique/duplicate unit count
self.calc_unit_count()
# Calculate data serve scope: each data element serves/(is served by) how many unit at below level
# NOTE: data_serve_scope doesn't include MAC level, thus is one level less than other spatial mapping attributes.
self.calc_data_serve_scope()
# Calculate memory bandwidth incremental factor between architectural levels
# mem_bw_boost_factor doesn't include MAC level, thus is one level less than other spatial mapping attributes.
self.calc_mem_bw_boost_factor()
# Added for loma: Get list of the spatially unrolled loops, without any information about arch levels
self.save_spatial_loop_dim_size()
def __str__(self):
return f"SpatialMapping({self.mapping_dict_origin})"
def __repr__(self):
return str(self)
## JSON representation of this object to save it to a file.
def __jsonrepr__(self):
return {"spatial_mapping": self.mapping_dict_origin}
## Return the unrolled loops for operand 'op' at level 'level'.
# 'level' = 0 would signify the operational level.
# @param op
# @param level
def get_unrolling(self, op: str, level: int):
return self.mapping_dict_origin[op][level]
## Calculate unrolled loop size for different loop types (r/ir/total) per operand per architecture level
def calc_unroll_size(self):
# Initialization
unroll_size_r = {op: [1] * arch_lv for (op, arch_lv) in self.arch_level.items()}
unroll_size_ir = {
op: [1] * arch_lv for (op, arch_lv) in self.arch_level.items()
}
unroll_size_total = {
op: [1] * arch_lv for (op, arch_lv) in self.arch_level.items()
}
# Go through the reformed spatial mapping and extract the unroll size
for operand in self.operand_list:
for level, current_level_loops in enumerate(
self.mapping_dict_reform[operand]
):
for loop_type, loop_dim in current_level_loops:
if (
loop_type
in self.layer_node.operand_loop_dim_reform[operand]["r"]
):
unroll_size_r[operand][level] *= loop_dim
else:
unroll_size_ir[operand][level] *= loop_dim
unroll_size_total[operand][level] *= loop_dim
self.unroll_size_r = unroll_size_r
self.unroll_size_ir = unroll_size_ir
self.unroll_size_total = unroll_size_total
## Calculate total/unique/duplicate unit count per operand per architecture level
def calc_unit_count(self):
# Number of unit at each level (for each operand)
# Added round call as number doesn't remain integer due to self.mapping_dict_reform number instability
unit_count = {
op: [
round(
round(prod(self.unroll_size_total[op][lv : self.arch_level[op]]), 3)
)
for lv in range(self.arch_level[op])
]
for op in self.operand_list
}
""" ASSERT: The bottom level (MAC level) unit count must be the same for all operand """
bottom_unit_count = [unit_count[op][0] for op in unit_count.keys()]
assert all(
x == bottom_unit_count[0] for x in bottom_unit_count
), f"The MAC level unit count is not the same for all operand {bottom_unit_count}, please correct the spatial mapping."
""" Number of unit at each level that hold unique data (for each operand) """
unit_unique = {
op: [
prod(self.unroll_size_r[op][lv : self.arch_level[op]])
for lv in range(self.arch_level[op])
]
for op in self.operand_list
}
""" Number of unit at each level that hold the same data (for each operand) """
unit_duplicate = {
op: [
prod(self.unroll_size_ir[op][lv : self.arch_level[op]])
for lv in range(self.arch_level[op])
]
for op in self.operand_list
}
self.unit_count = unit_count
self.unit_unique = unit_unique
self.unit_duplicate = unit_duplicate
## Calculate data serve scope, i.e., for input operands, it means that each data element
# is broadcast to how many unit at below level; for output operand, it means that how
# many unit add/collect their output values to one result, and push it to above level
#
# NOTE: data_serve_scope doesn't include MAC level, thus is one level less than other spatial mapping attributes.
#
# data_serve_scope is calculated by dividing unit_duplicate at current level by unit_count at one level above.
def calc_data_serve_scope(self):
data_serve_scope = {
op: [
self.unit_duplicate[op][lv] / self.unit_duplicate[op][lv + 1]
for lv in range(self.arch_level[op] - 1)
]
for op in self.operand_list
}
self.data_serve_scope = data_serve_scope
## Calculate memory bandwidth incremental factor between architectural levels.
#
# NOTE: mem_bw_boost doesn't include MAC level, thus is one level less than other spatial mapping attributes.
#
# mem_bw_boost can calculated by either dividing unit_unique at current level by unit_count at one level above.
def calc_mem_bw_boost_factor(self):
mem_bw_boost = {
op: [
round(self.unit_unique[op][lv] / self.unit_unique[op][lv + 1])
for lv in range(self.arch_level[op] - 1)
]
for op in self.operand_list
}
self.mem_bw_boost = mem_bw_boost
# Save the loops that were unrolled spatially in a list without any arch level information for easy access in loma.
def save_spatial_loop_dim_size(self):
# We take one of the input operands and go through the spatial mapping dict for that operand.
# Which operand shouldn't matter as all operands store the same loops, but possibly at different arch levels.
op = self.layer_node.input_operands[0]
self.spatial_loop_dim_size = [
loop
for spatial_loops in self.mapping_dict_origin[op]
for loop in spatial_loops
] | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/mapping/spatial/spatial_mapping.py | spatial_mapping.py |
from typing import Dict
from math import prod
from zigzag.classes.workload.layer_node import LayerNode
from zigzag.utils import pickle_deepcopy
## Class that collect all the info related to temporal mapping.
class TemporalMapping:
## The class constructor
# @param temporal_mapping_dict
# @param layer_node
def __init__(self, temporal_mapping_dict: Dict, layer_node: LayerNode):
self.mapping_dic_origin = temporal_mapping_dict
self.layer_node = layer_node
self.operand_list = layer_node.operand_list
# Extract memory hierarchy level count for each operand from temporal mapping definition
self.mem_level = {op: len(tmap) for (op, tmap) in temporal_mapping_dict.items()}
# For each memory level, if the innermost/bottom loop is ir loop, merge it down to the below level
self.innermost_stationary_loop_merge_down()
# Calculate the current and below level (cabl) iteration cycle for each memory level,
# i.e., each memory level refreshes once, how many cycles it covers
self.calc_cycle_cabl_level()
# Calculate the current and below loop (cabl) iteration cycle for each loop,
# i.e., each loop iterates once, how many cycles it covers '''
# self.calc_cycle_cabl_loop()
# Calculate the top-ir loop size at each memory level, which will be used
# to compute instant required memory BW in combined_mapping.py """
self.calc_top_r_and_ir_loop()
def __str__(self):
return str(self.mapping_dic_stationary)
def __repr__(self):
return str(self)
## JSON representation of this object to save it to a json file.
def __jsonrepr__(self):
return {"temporal_mapping": self.mapping_dic_stationary}
## Iteratively merging down the ir loops which located at the bottom position of each memory level.
# Also calculate the MAC level data stationary cycle, i,e., the innermost memory level's bottom ir loops.
def innermost_stationary_loop_merge_down(self):
# Initialization
mapping_current = pickle_deepcopy(self.mapping_dic_origin)
mapping_previous = pickle_deepcopy(self.mapping_dic_origin)
done = False
while not done:
mapping_st = {
op: [[] for _ in range(self.mem_level[op])] for op in self.operand_list
}
MAC_level_st = {op: 1 for op in self.operand_list}
for operand in self.mem_level.keys():
for level, current_level_loops in enumerate(mapping_previous[operand]):
if not current_level_loops:
mapping_st[operand][level] = pickle_deepcopy(
current_level_loops
)
else:
for loop_type, loop_dim in current_level_loops:
if (
loop_type
in self.layer_node.operand_loop_dim[operand]["ir"]
):
if level == 0:
MAC_level_st[operand] *= loop_dim
mapping_st[operand][level].append(
(loop_type, loop_dim)
)
mapping_current[operand][level].remove(
(loop_type, loop_dim)
)
else:
mapping_st[operand][level - 1].append(
(loop_type, loop_dim)
)
mapping_current[operand][level].remove(
(loop_type, loop_dim)
)
else:
mapping_st[operand][level].extend(
mapping_current[operand][level]
)
break
if mapping_st != mapping_previous:
mapping_previous = pickle_deepcopy(mapping_st)
mapping_current = pickle_deepcopy(mapping_st)
continue
else:
done = True
self.mapping_dic_stationary = mapping_st
self.MAC_level_data_stationary_cycle = MAC_level_st
## Calculate the iteration cycles that each memory level covers
def calc_cycle_cabl_level(self):
# iteration_each_level only counts for the current level for-loops
iteration_each_level = {
op: [
prod(
[loop_dim for (_, loop_dim) in self.mapping_dic_stationary[op][lv]]
)
for lv in range(self.mem_level[op])
]
for op in self.operand_list
}
# cycle_per_level count for current and below levels' for-loops
cycle_cabl_level = {
op: [
prod(iteration_each_level[op][0 : lv + 1])
for lv in range(self.mem_level[op])
]
for op in self.operand_list
}
# ASSERT: The total cycle count must be the same for all operand
total_cycle = [cycle_cabl_level[op][-1] for op in self.operand_list]
assert all(
x == total_cycle[0] for x in total_cycle
), f"The total cycle count is not the same for all operand {total_cycle}, please correct the temporal mapping."
self.cycle_cabl_level = cycle_cabl_level
self.total_cycle = total_cycle[0]
def calc_top_r_and_ir_loop(self):
# top_ir_loop_size: For each memory level, from top to bottom, the product of top few irrelevant loops.
# top_ir is used for later required instant memory bandwidth calculation.
# Initialization
# self.mem_level[op] + 1 to add the placeholder for operational array level
top_r_loop_size = {
op: [1 for _ in range(self.mem_level[op] + 1)] for op in self.operand_list
}
top_ir_loop_size = {
op: [1 for _ in range(self.mem_level[op] + 1)] for op in self.operand_list
}
# Check and extract the top ir loops
for operand in self.operand_list:
for level, current_level_loops in enumerate(
self.mapping_dic_stationary[operand]
):
if not current_level_loops:
continue
else:
for loop_type, loop_dim in reversed(current_level_loops):
if loop_type in self.layer_node.operand_loop_dim[operand]["r"]:
top_r_loop_size[operand][level + 1] *= loop_dim
else:
break
for loop_type, loop_dim in reversed(current_level_loops):
if loop_type in self.layer_node.operand_loop_dim[operand]["ir"]:
top_ir_loop_size[operand][level + 1] *= loop_dim
else:
break
self.top_r_loop_size = top_r_loop_size
self.top_ir_loop_size = top_ir_loop_size | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/mapping/temporal/temporal_mapping.py | temporal_mapping.py |
import networkx as nx
from zigzag.classes.workload.layer_node import LayerNode
from typing import Dict, Any
from networkx import DiGraph
## Description missing
class DNNWorkload(DiGraph):
## The class constructor
# Collect all the algorithmic workload information here.
# @param workload: user-defined workload file (py).
# @return (self): Directed Graph with nodes the layers and edges the connections between layers.
def __init__(self, workload: Dict[Any, Dict], mapping: Dict[Any, Dict], **attr):
super().__init__(**attr)
layer_id_to_obj = {} # Lookup dict for id to LayerNode object translation
self.layer_node_list = []
for layer_id, layer in workload.items():
# TODO Support other type of layers, such as concatenation, max pooling, BN, etc.
# What is special about max pooling?
# elif type(layer_id) == str and layer_id[0:6] == 'concat':
# continue
if layer["operator_type"] in mapping.keys():
for attr_name, attr_va in mapping[layer["operator_type"]].items():
layer[attr_name] = attr_va
else:
for attr_name, attr_va in mapping["default"].items():
layer[attr_name] = attr_va
# For each item in the dict generate the LayerNode and add it to the dnn graph G
layer_node = LayerNode(layer_id, layer)
# Save this layer_id and LayerNode pair in the layer_id_to_obj dict
layer_id_to_obj[layer_id] = layer_node
# self.add_node(layer_id, info=layer_node)
self.add_node(layer_node)
self.layer_node_list.append(layer_node)
# Find all of its operand sources and add edges accordingly
edges = []
for (op, parent_list) in layer.get("operand_source", {}).items():
for parent_id in parent_list:
parent_layer = layer_id_to_obj[parent_id]
edges.append((parent_layer, layer_node))
layer_node.input_operand_source[op] = parent_layer
self.add_edges_from(edges)
def topological_sort(self):
return nx.topological_sort(self)
def get_node_with_id(self, id):
for node in self.nodes:
if node.id == id:
return node
raise ValueError(
"DNNWorkload instance does not have a node with the requested id"
) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/workload/dnn_workload.py | dnn_workload.py |
from math import gcd, prod
import re
from collections import defaultdict
from typing import Dict, List
from copy import deepcopy
from collections import defaultdict
## Description missing
class LayerNode:
## The class constructor
#
# To construct each layer node, algorithm equation/dimension/indirect relation are parsed.
# This parser collects information of operand, loop dimension, and loop relevance.
# Equal-to-1 loop dimensions are eliminated.
#
# @param layer_id: The identifier (key) of the layer, as defined in the workload
# @param layer_attrs: contains attributes specified below:
# @param node_name: an optional name for the Node. E.g. the node's name from the onnx model.
def __init__(self, layer_id, layer_attrs, node_name="", type=None):
# To construct each layer node, algorithm equation/dimension/indirect relation are parsed.
# This parser collects information of operand, loop dimension, and loop relevance.
# Equal-to-1 loop dimensions are eliminated.
# :param layer_id: The identifier (key) of the layer, as defined in the workload
# :param layer_attrs: contains attributes specified below:
# :param node_name: an optional name for the Node. E.g. the node's name from the onnx model.
# *equation: core computation equation, e.g. 'O[g][b][k][oy][ox]+=W[g][k][c][fy][fx]*I[g][b][c][ix][iy]',
# 'Y[i][j] += A[i][k] * B[k][j]', 'Y[i][j] += A[i][k][l] * B[k][j] * C[l][j]', etc.
# *loop_dim_size: size of each computation loop, e.g. {'B': 1, 'K': 32, 'C': 64, 'OY': 28, 'OX': 28,
# 'FY': 1, 'FX': 1, 'G': 1}.
# *equation_relations: for the operand dimension that is not directly a loop dimension,
# a set of specific relation equations between them (operand dimension and loop dimension) is required,
# e.g. ['ix=ox+fx-1', 'iy=oy+fy-1'].
# *core_allocation: the accelerator core on which this layer is executed
# *memory_operand_links: the link between layer operands and where they are stored in the memory hierarchy.
# :return (self)
# ------- directly get from inputs -------
# - loop_dim_size: collection of loop dimension size that >1.
# - operand_precision
# - loop_dim_list, e.g. ['B', 'K', 'C', ...], collection of loop dimension whose size >1.
# - operand_list, e.g. ['W', 'I', 'O']
# ------- operand and loop dimension relation -------
# - operand_loop_dim: operand and loop dimension relationship, e.g.
# operand_loop_dim = {'O': {'r': ['B', 'K', 'OY', 'OX'], 'ir': ['C', 'FX', 'FY'], 'pr': {}},
# 'W': {'r': ['K', 'C', 'FY', 'FX'], 'ir': ['B', 'OX', 'OY'], 'pr': {}},
# 'I': {'r': ['B', 'C'], 'ir': ['K'], 'pr': {'IY': ('OY', 'FY'), 'IX': ('OX', 'FX')}}}
# ------- basic layer information extraction -------
# - total_MAC_count
# - operand_size_elem
# - operand_size_bit
# - operand_data_reuse
self.id = layer_id
self.layer_attrs = layer_attrs
self.name = node_name
self.type = type
# equation_relations has been replaced by dimension_relations.
# Check if layer has equation_relations and notify user.
if "equation_relations" in layer_attrs:
raise ValueError(
f"Please replace equation_relations by dimension_relations for layer {self}."
)
# Get required attributes from layer_attrs
equation: str = layer_attrs.get("equation")
loop_dim_size: Dict[str, int] = layer_attrs.get("loop_dim_size")
pr_loop_dim_size: Dict[str, int] = layer_attrs.get("pr_loop_dim_size", None)
operand_precision: Dict[str, int] = layer_attrs.get("operand_precision")
dimension_relations: List[str] = layer_attrs.get("dimension_relations", [])
user_spatial_mapping: Dict[str, tuple] = layer_attrs.get(
"spatial_mapping", None
)
user_temporal_ordering = layer_attrs.get("temporal_ordering", None)
core_allocation: int = layer_attrs.get("core_allocation", None)
memory_operand_links: Dict[str, str] = layer_attrs.get(
"memory_operand_links", None
)
source_storage_level: int = layer_attrs.get("source_storage_level", {})
operand_source_dimension_mapping: Dict[Dict[str, str]] = layer_attrs.get(
"operand_source_dimension_mapping", {}
)
constant_operands: List[str] = layer_attrs.get("constant_operands", [])
input_operand_source: Dict[str, list] = layer_attrs.get(
"operand_source", dict()
)
# Save the padding for different tensor dimensions
padding: Dict[str, tuple] = layer_attrs.get(
"padding", {}
) # Empty dict signals no padding in any dimension
self.equation = equation
self.loop_dim_size = dict(
item for item in tuple(loop_dim_size.items())
) # if item[1] != 1)
self.pr_loop_dim_size = pr_loop_dim_size
self.operand_precision = operand_precision
self.dimension_relations = dimension_relations
self.loop_dim_list = list(loop_dim_size.keys())
self.user_spatial_mapping = user_spatial_mapping
self.user_temporal_ordering = user_temporal_ordering
self.core_allocation = core_allocation
self.memory_operand_links = memory_operand_links.copy()
self.source_storage_level = source_storage_level
self.operand_source_dimension_mapping = operand_source_dimension_mapping
self.constant_operands = constant_operands
self.padding = padding
# Step1: extract partially-relevant data dimension and its relation to loop dimensions.
pr_loop, pr_loop_list, pr_scaling_factors = self.build_pr_funcs()
self.pr_loop = pr_loop
self.pr_scaling_factors = pr_scaling_factors
if not self.pr_loop_dim_size:
self.pr_loop_dim_size = {
dim: self.calc_pr_dimension_size_total(dim) for dim in pr_loop
}
# Step2: extract relevant and irrelevant loop dimensions.
(
operand_loop_dim,
operand_loop_dim_reform,
operand_list,
operand_dimensionality_order,
) = self.extract_r_ir_loop_info(equation, loop_dim_size, pr_loop, pr_loop_list)
self.operand_loop_dim = operand_loop_dim
self.operand_loop_dim_reform = operand_loop_dim_reform
self.output_operand = operand_list[0]
self.input_operands = operand_list[1:]
self.operand_list = operand_list
self.input_operand_source = input_operand_source
self.operand_dimensionality_order = operand_dimensionality_order
# Save the variable (non-constant) input operands
self.variable_input_operands: list = [
op for op in self.input_operands if op not in self.constant_operands
]
# Save the way an operand's tensor should be reshaped for interaction with other nodes.
self.operand_tensor_reshape: Dict[str, list] = layer_attrs.get(
"operand_tensor_reshape", {op: [] for op in self.operand_list}
)
# Step3: extract layer info, e.g. total operand size, total operand data reuse, total MAC operation, etc.
self.extract_layer_info()
def build_pr_funcs(self):
# 1 long dimensions are removed in self.loop_dim_size but required in extract_pr_loop_info
loop_dim_size = defaultdict(lambda: 1)
loop_dim_size.update(self.loop_dim_size)
if self.dimension_relations:
pr_loop, pr_loop_list, pr_scaling_factors = self.extract_pr_loop_info(
self.dimension_relations
)
else:
pr_loop, pr_loop_list, pr_scaling_factors = {}, [], {}
return pr_loop, pr_loop_list, pr_scaling_factors
def get_core_allocation(self):
return self.core_allocation
def __str__(self):
return f"LayerNode_{self.id}"
def __repr__(self):
return str(self)
## JSON representation used for saving this object to a json file.
def __jsonrepr__(self):
return {
"equation": self.equation,
"equation_relations": self.dimension_relations,
"loop_dimensions": self.loop_dim_size,
"operand_precision": self.operand_precision,
"core_allocation": self.core_allocation,
"user_spatial_mapping": self.user_spatial_mapping,
"memory_operand_links": self.memory_operand_links,
"source_storage_level": self.source_storage_level,
}
## Calculates the tensor size (nb of elements) for the given operand layer_op with the given loop dimension sizes loop_sizes.
# @param layer_op: str. A String representing the layer operand for which to compute the tensor size.
# @param loop_sizes: dict. A dict with string keys representing the dimension and integer values representing the size.
def calc_tensor_size(self, layer_op, loop_sizes):
return prod(self.calc_tensor_dims(layer_op, loop_sizes).values())
# Initialize the tensor size as 1
def calc_tensor_dim(self, loop_sizes, dim):
if dim in loop_sizes:
return loop_sizes[dim]
elif dim in self.pr_loop:
related_dimension_sizes = [
loop_sizes[dimension] for dimension in self.pr_loop[dim]
]
scaling_factors = list(self.pr_scaling_factors[dim].values())
assert (
len(related_dimension_sizes) == len(scaling_factors) == 2
), "Shouldn't happen if partial relevancy checks in extract_pr_loop_info() are done correctly."
args = (
val
for pair in zip(scaling_factors, related_dimension_sizes)
for val in pair
)
pr_dim_size = self.calc_pr_dimension_size(*args)
# Clip this to the largest possible size for this partially relevant dimension (computed at initialization based on padding)
pr_dim_size = min(self.pr_loop_dim_size[dim], pr_dim_size)
return pr_dim_size
elif dim in self.loop_dim_size:
assert (
self.loop_dim_size[dim] == 1
), "This line should only be reached when the dim has a size of 1 in the layer."
return 1
else:
raise ValueError(
"Something went wrong in the initialization of the layer, or in the caller function."
)
def calc_tensor_dims(self, layer_op, loop_sizes):
out = {}
op_dimensions = self.operand_loop_dim[layer_op]
for dim in op_dimensions["r"] + list(op_dimensions["pr"].keys()):
out[dim] = self.calc_tensor_dim(loop_sizes, dim)
return out
## Compute the total pr dimension size of this node, taking padding into account.
# @param dim (str): The partially relevant dimension, e.g. 'IX'.
# @return int: The total partially relevant dimension size
def calc_pr_dimension_size_total(self, dim):
related_dimension_sizes = [
self.loop_dim_size[related_dim] for related_dim in self.pr_loop[dim]
]
scaling_factors = list(
self.pr_scaling_factors[dim].values()
) # assumes this dict is ordered
assert (
len(related_dimension_sizes) == len(scaling_factors) == 2
), "Shouldn't happen if partial relevancy checks in extract_pr_loop_info() are done correctly."
args = (
val
for pair in zip(scaling_factors, related_dimension_sizes)
for val in pair
)
total_pr_dim_size = self.calc_pr_dimension_size(*args)
# Partially relevant loop dimensions can also have padding, so get the padding for this pr dimension and subtract
padding = self.padding.get(dim, (0, 0)) # default = (0, 0)
total_pr_dim_size_without_padding = int(total_pr_dim_size - sum(padding))
return total_pr_dim_size_without_padding
@staticmethod
## Calculates the number of unique indices c generated by iterating through the indices
# a in range(0,A,1) and b in range(0,B,1) according to the equation c = sa * a + sb * b.
# sa and sb thus represent the scaling of a, resp. b.
def calc_pr_dimension_size(sa, A, sb, B):
return int(A * B - max(0, B - (sa / gcd(sa, sb))) * (A - (sb / gcd(sa, sb))))
@staticmethod
def return_lambda(equal_sign_right):
return eval("lambda n: " + equal_sign_right)
def extract_pr_loop_info(self, equation_relations):
pr_loop: Dict[str, list] = {}
pr_loop_list: List[str] = []
pr_scaling_factors: Dict[str, list] = {}
padding: Dict[str, int] = {}
for relation in equation_relations:
relation_disassembly = re.findall("[a-zA-Z]+", relation)
assert (
len(relation_disassembly) == 3
), f"equation_relation {relation} does not involve a linear relationship between two dimension iterators."
key = relation_disassembly[0].upper()
val = [loop_dim.upper() for loop_dim in relation_disassembly[1:]]
pr_loop[key] = val
pr_loop_list.extend([key] + val)
# To extract the scaling factors for the different loop dimension iterators, we need to make sure
# there is a scaling factor present in the equation. If it is not present, raise an exception.
scaling_factors = {}
for val_lower in relation_disassembly[1:]:
if relation[relation.index(val_lower) - 1] == "*":
if not relation[relation.index(val_lower) - 2].isdigit():
raise NotImplementedError(
f"Please use a scaling factor for every dimension iterator on the RHS of equation {relation}"
)
else:
scaling_factors[val_lower] = int(
re.findall("(\\d+)(?=\\*" + val_lower + ")", relation)[0]
)
else:
scaling_factors[val_lower] = 1
# scaling_factors = re.findall('[0-9]+', relation)
assert (
len(scaling_factors) == 2
), f"Please remove any constants in the equation relation {relation}."
pr_scaling_factors[key] = scaling_factors
return pr_loop, pr_loop_list, pr_scaling_factors
@staticmethod
def extract_r_ir_loop_info(equation, loop_dim_size, pr_loop, pr_loop_list):
operand_loop_dim: Dict[str, Dict] = {}
operand_list = []
equation = equation.replace("*", " * ")
equation = equation.replace("=", " = ")
equation = equation.replace("+", " + ")
equation_disassembly = re.findall("[a-zA-Z,0-9,=,*,+]+", equation)
# filter out + that directly precedes an = (+=) or another + (++) to make this work for concat and add
prev_char = None
for i, char in enumerate(equation_disassembly):
if (char == "=" or char == "+") and prev_char == "+":
equation_disassembly.pop(i - 1)
prev_char = char
split_location = [
i for (i, x) in enumerate(equation_disassembly) if x in ["=", "*", "+"]
] + [len(equation_disassembly)]
dimension_list = list(loop_dim_size.keys())
begin_idx = 0
operand_dimensionality_order = {}
for split_loc in split_location:
operand = equation_disassembly[begin_idx]
operand_list.append(operand)
operand_loop_dim[operand] = {}
r_loop_list = [
loop_dim.upper()
for loop_dim in equation_disassembly[begin_idx + 1 : split_loc]
]
ir_loop_list = list(set(dimension_list).difference(r_loop_list))
pr_loop_remove_flag = any(
loop in list(pr_loop.keys()) for loop in r_loop_list
)
if pr_loop_remove_flag:
operand_loop_dim[operand]["r"] = [
loop for loop in r_loop_list if loop not in pr_loop_list
] # and loop_dim_size[loop] != 1]
operand_loop_dim[operand]["ir"] = [
loop
for loop in ir_loop_list
if loop not in pr_loop_list and loop_dim_size[loop] != 1
]
operand_loop_dim[operand]["pr"] = pr_loop
else:
operand_loop_dim[operand]["r"] = [
loop for loop in r_loop_list if loop_dim_size[loop] != 1
]
operand_loop_dim[operand]["ir"] = [
loop for loop in ir_loop_list if loop_dim_size[loop] != 1
]
operand_loop_dim[operand]["pr"] = {}
begin_idx = split_loc + 1
# Add the dimensionality order of all relevant (including partially relevant) dimensions of this operand
operand_dimensionality_order[operand] = r_loop_list
# operand_loop_dim_reform remove the pr loop dict, and put the pr-related data dimension (e.g. IX and IY)
# to r and ir dict with "_r" and "_ir" suffix. It brings benefits to loop info extraction after pr loop decoupling step.
operand_loop_dim_reform = deepcopy(operand_loop_dim)
for operand, dic in operand_loop_dim.items():
del operand_loop_dim_reform[operand]["pr"]
if dic["pr"] != {}:
r_extend_list = [pr_data_dim + "_r" for pr_data_dim in pr_loop.keys()]
ir_extend_list = [pr_data_dim + "_ir" for pr_data_dim in pr_loop.keys()]
operand_loop_dim_reform[operand]["r"] += r_extend_list
operand_loop_dim_reform[operand]["ir"] += ir_extend_list
return (
operand_loop_dim,
operand_loop_dim_reform,
operand_list,
operand_dimensionality_order,
)
## This function extract basic information for each layer node.
# @return: total_MAC_count, operand_size_elem, operand_size_bit, operand_data_reuse.
def extract_layer_info(self):
# total MAC operation count
total_MAC_count: int = 1
for ky in self.loop_dim_size:
total_MAC_count *= self.loop_dim_size[ky]
self.total_MAC_count = total_MAC_count
# each operand's size (Unit: # of data element)
operand_size_elem: Dict[str, int] = {}
for operand, relevancy in self.operand_loop_dim.items():
operand_size_elem[operand] = 1
for r_loop in relevancy["r"]:
operand_size_elem[operand] *= self.loop_dim_size[r_loop]
for pr_loop, pr_loop_collect in relevancy["pr"].items():
multiply_factor = self.calc_tensor_dims(operand, self.loop_dim_size)[
pr_loop
]
operand_size_elem[operand] *= multiply_factor
self.operand_size_elem = operand_size_elem
# each operand's size (Unit: bit)
operand_size_bit: Dict[str, int] = {}
for operand, size_in_elem in operand_size_elem.items():
operand_size_bit[operand] = size_in_elem * self.operand_precision[operand]
self.operand_size_bit = operand_size_bit
# each operand's total data reuse factor, which is total MAC Op/total operand size (in element),
# i.e. each data element can be used to support how many MAC operation.
operand_data_reuse: Dict[str, float] = {}
for operand, size_in_elem in operand_size_elem.items():
operand_data_reuse[operand] = total_MAC_count / size_in_elem
self.operand_data_reuse = operand_data_reuse
## Return the irrelevant dimensions of layer operand 'layer_op'.
def get_operand_irrelevant_dimensions(self, layer_op: str):
return self.operand_loop_dim[layer_op]["ir"]
## Return the layer operand associated with the given memory operand for this layer.
# If there is no such memory operand, an error is raised.
def get_layer_operand(self, mem_op: str) -> str:
for layer_operand, memory_operand in self.memory_operand_links.items():
if memory_operand == mem_op:
return layer_operand
raise ValueError(f"The memory operand {mem_op} is not present in layer {self}.")
## Return the memory level at which an input operand is stored.
# If this layer node has no information for the given operand, it returns None.
def get_operand_storage_level(self, layer_op: str):
if layer_op not in self.source_storage_level:
return None
return self.source_storage_level[layer_op]
if __name__ == "__main__":
equation = "O[g][b][k][oy][ox]+=W[g][k][c][fy][fx]*I[g][b][c][ix][iy]"
dimension_size = {
"B": 1,
"K": 32,
"C": 64,
"OY": 28,
"OX": 28,
"FY": 3,
"FX": 3,
"G": 2,
}
operand_precision = {"O": 24, "O_final": 24, "W": 8, "I": 8}
equation_relations = ["ix=ox+fx-1", "iy=oy+fy-1"]
aa = LayerNode(equation, dimension_size, operand_precision, equation_relations)
a = 1 | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/workload/layer_node.py | layer_node.py |
from typing import Generator, Any, Tuple
from zigzag.classes.stages.Stage import Stage
from zigzag.classes.cost_model.cost_model import CostModelEvaluation
import os
import pickle
import json
import numpy as np
import logging
logger = logging.getLogger(__name__)
## Class that passes through all results yielded by substages, but saves the results as a json list to a file
# at the end of the iteration.
class CompleteSaveStage(Stage):
## The class constructor
# @param list_of_callables: see Stage
# @param dump_filename_pattern: filename string formatting pattern, which can use named field whose values will be
# in kwargs (thus supplied by higher level runnables)
# @param kwargs: any kwargs, passed on to substages and can be used in dump_filename_pattern
def __init__(self, list_of_callables, *, dump_filename_pattern, **kwargs):
super().__init__(list_of_callables, **kwargs)
self.dump_filename_pattern = dump_filename_pattern
## Run the complete save stage by running the substage and saving the CostModelEvaluation json representation.
def run(self) -> Generator[Tuple[CostModelEvaluation, Any], None, None]:
self.kwargs["dump_filename_pattern"] = self.dump_filename_pattern
substage = self.list_of_callables[0](self.list_of_callables[1:], **self.kwargs)
for id, (cme, extra_info) in enumerate(substage.run()):
cme: CostModelEvaluation
# filename = self.dump_filename_pattern.format(datetime=datetime.now().isoformat().replace(":", "-"))
if type(cme.layer) == list:
filename = self.dump_filename_pattern.replace(
"?", "overall_complete"
)
else:
filename = self.dump_filename_pattern.replace(
"?", f"{cme.layer}_complete"
)
self.save_to_json(cme, filename=filename)
logger.info(
f"Saved {cme} with energy {cme.energy_total:.3e} and latency {cme.latency_total2:.3e} to {filename}"
)
yield cme, extra_info
def save_to_json(self, obj, filename):
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as fp:
json.dump(obj, fp, default=self.complexHandler, indent=4)
@staticmethod
def complexHandler(obj):
# print(type(obj))
if isinstance(obj, set):
return list(obj)
if isinstance(obj, np.int32):
return int(obj)
if hasattr(obj, "__jsonrepr__"):
return obj.__jsonrepr__()
else:
raise TypeError(
f"Object of type {type(obj)} is not serializable. Create a __jsonrepr__ method."
)
## Class that passes through results yielded by substages, but saves the results as a json list to a file
# at the end of the iteration.
# In this simple version, only the energy total and latency total are saved.
class SimpleSaveStage(Stage):
## The class constructor
# @param list_of_callables: see Stage
# @param dump_filename_pattern: filename string formatting pattern, which can use named field whose values will be
# in kwargs (thus supplied by higher level runnables)
# @param kwargs: any kwargs, passed on to substages and can be used in dump_filename_pattern
def __init__(self, list_of_callables, *, dump_filename_pattern, **kwargs):
super().__init__(list_of_callables, **kwargs)
self.dump_filename_pattern = dump_filename_pattern
## Run the simple save stage by running the substage and saving the CostModelEvaluation simple json representation.
def run(self) -> Generator[Tuple[CostModelEvaluation, Any], None, None]:
self.kwargs["dump_filename_pattern"] = self.dump_filename_pattern
substage = self.list_of_callables[0](self.list_of_callables[1:], **self.kwargs)
for id, (cme, extra_info) in enumerate(substage.run()):
cme: CostModelEvaluation
# filename = self.dump_filename_pattern.format(datetime=datetime.now().isoformat().replace(":", "-"))
if type(cme.layer) == list:
filename = self.dump_filename_pattern.replace("?", "overall_simple")
else:
filename = self.dump_filename_pattern.replace(
"?", f"{cme.layer}_simple"
)
self.save_to_json(cme, filename=filename)
logger.info(
f"Saved {cme} with energy {cme.energy_total:.3e} and latency {cme.latency_total2:.3e} to {filename}"
)
yield cme, extra_info
def save_to_json(self, obj, filename):
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as fp:
json.dump(obj, fp, default=self.complexHandler, indent=4)
@staticmethod
def complexHandler(obj):
# print(type(obj))
if isinstance(obj, set):
return list(obj)
if isinstance(obj, np.int32):
return int(obj)
if hasattr(obj, "__simplejsonrepr__"):
return obj.__simplejsonrepr__()
else:
raise TypeError(
f"Object of type {type(obj)} is not serializable. Create a __simplejsonrepr__ method."
)
## Class that dumps all received CMEs into a list and saves that list to a pickle file.
class PickleSaveStage(Stage):
## The class constructor
# @param list_of_callables: see Stage
# @param pickle_filename: output pickle filename
# @param kwargs: any kwargs, passed on to substages and can be used in dump_filename_pattern
def __init__(self, list_of_callables, *, pickle_filename, **kwargs):
super().__init__(list_of_callables, **kwargs)
self.pickle_filename = pickle_filename
## Run the simple save stage by running the substage and saving the CostModelEvaluation simple json representation.
# This should be placed above a ReduceStage such as the SumStage, as we assume the list of CMEs is passed as extra_info
def run(self) -> Generator[Tuple[CostModelEvaluation, Any], None, None]:
substage = self.list_of_callables[0](self.list_of_callables[1:], **self.kwargs)
for id, (cme, extra_info) in enumerate(substage.run()):
all_cmes = [cme for (cme, extra) in extra_info]
yield cme, extra_info
# After we have received all the CMEs, save them to the specified output location.
dirname = os.path.dirname(self.pickle_filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(self.pickle_filename, "wb") as handle:
pickle.dump(all_cmes, handle, protocol=pickle.HIGHEST_PROTOCOL)
logger.info(
f"Saved pickled list of {len(all_cmes)} CMEs to {self.pickle_filename}."
) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/stages/SaveStage.py | SaveStage.py |
from typing import Generator, Callable, List, Tuple, Any
from zigzag.classes.stages.Stage import Stage
from zigzag.classes.cost_model.cost_model import CostModelEvaluation
from zigzag.classes.hardware.architecture.accelerator import Accelerator
from zigzag.classes.mapping.spatial.spatial_mapping import SpatialMapping
from zigzag.classes.mapping.temporal.temporal_mapping import TemporalMapping
from zigzag.classes.workload.layer_node import LayerNode
import logging
logger = logging.getLogger(__name__)
## Pipeline stage that calls a cost model to evaluate a mapping on a HW config.
class CostModelStage(Stage):
## The class constructor
# Initializes the cost model stage given main inputs
# @param list_of_callables
# @param accelerator
# @param layer
# @param spatial_mapping
# @param temporal_mapping
# @param access_same_data_considered_as_no_access
# @param kwargs
def __init__(
self,
list_of_callables: List[Callable],
*,
accelerator,
layer,
spatial_mapping,
temporal_mapping,
access_same_data_considered_as_no_access=True,
**kwargs
):
super().__init__(list_of_callables, **kwargs)
(
self.accelerator,
self.layer,
self.spatial_mapping,
self.temporal_mapping,
self.access_same_data_considered_as_no_access,
) = (
accelerator,
layer,
spatial_mapping,
temporal_mapping,
access_same_data_considered_as_no_access,
)
## Run the cost model stage by calling the internal zigzag cost model with the correct inputs.
def run(self) -> Generator[Tuple[CostModelEvaluation, Any], None, None]:
self.cme = CostModelEvaluation(
accelerator=self.accelerator,
layer=self.layer,
spatial_mapping=self.spatial_mapping,
temporal_mapping=self.temporal_mapping,
# the below parameter is optional
access_same_data_considered_as_no_access=self.access_same_data_considered_as_no_access,
)
yield (self.cme, None)
def is_leaf(self) -> bool:
return True | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/stages/CostModelStage.py | CostModelStage.py |
from typing import Generator, Callable, List
## Abstract superclass for Runnables
class Stage:
## The class constructor
# @param list_of_callables: a list of callables, that must have a signature compatible with this __init__ function
# (list_of_callables, *, required_kwarg1, required_kwarg2, kwarg_with_default=default, **kwargs)
# and return a Stage instance. This is used to flexibly build iterators upon other iterators.
# @param kwargs: any keyword arguments, irrelevant to the specific class in question but passed on down
def __init__(self, list_of_callables: List[Callable], **kwargs):
self.kwargs = kwargs
self.list_of_callables = list_of_callables
if self.is_leaf() and list_of_callables not in ([], tuple(), set(), None):
raise ValueError("Leaf runnable received a non empty list_of_callables")
if list_of_callables in ([], tuple(), set(), None) and not self.is_leaf():
raise ValueError(
"List of callables empty on a non leaf runnable, so nothing can be generated.\
Final callable in list_of_callables must return Stage instances that have is_leaf() == True"
)
## Runs the runnable.
# This requires no arguments and returns a generator yielding any amount of tuple, that each have
# a CostModelEvaluation as the first element and a second element that can be anything, meant only for manual
# inspection.
def run(self) -> Generator:
raise ImportError("Run function not implemented for runnable")
def __iter__(self):
return self.run()
## @return: Returns true if the runnable is a leaf runnable, meaning that it does not use (or thus need) any substages
# to be able to yield a result. Final element in list_of_callables must always have is_leaf() == True, except
# for that final element that has an empty list_of_callables
def is_leaf(self) -> bool:
return False
## Not actually a Stage, as running it does return (not yields!) a list of results instead of a generator
# Can be used as the main entry point
class MainStage:
def __init__(self, list_of_callables, **kwargs):
self.kwargs = kwargs
self.list_of_callables = list_of_callables
def run(self):
answers = []
for cme, extra_info in self.list_of_callables[0](
self.list_of_callables[1:], **self.kwargs
).run():
answers.append((cme, extra_info))
return answers | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/stages/Stage.py | Stage.py |
import importlib
from zigzag.classes.io.accelerator.parser import AcceleratorParser
from zigzag.classes.stages.Stage import Stage
from zigzag.classes.workload.dnn_workload import DNNWorkload
from zigzag.utils import pickle_deepcopy
import logging
logger = logging.getLogger(__name__)
## Description missing
class AcceleratorParserStage(Stage):
def __init__(self, list_of_callables, *, accelerator, **kwargs):
super().__init__(list_of_callables, **kwargs)
self.accelerator_parser = AcceleratorParser(accelerator)
def run(self):
self.accelerator_parser.run()
accelerator = self.accelerator_parser.get_accelerator()
sub_stage = self.list_of_callables[0](self.list_of_callables[1:], accelerator=accelerator, **self.kwargs)
for cme, extra_info in sub_stage.run():
yield cme, extra_info
## @ingroup Stages
## Parse the input workload residing in workload_path.
## The "workload" dict is converted to a NetworkX graph.
## @param workload
## @param mapping
def parse_workload_from_path_or_from_module(workload, mapping):
if isinstance(workload, str): # load from path
module = importlib.import_module(workload)
workload = module.workload
if isinstance(mapping, str): # load from path
module = importlib.import_module(mapping)
mapping = module.mapping
# make a copy here to prevent later it is being changed in the following stages
workload_copy = pickle_deepcopy(workload)
workload = DNNWorkload(workload_copy, mapping)
logger.info(
f"Created workload graph with {workload.number_of_nodes()} nodes and {workload.number_of_edges()} edges.")
return workload
## Description missing
class WorkloadParserStage(Stage):
def __init__(self, list_of_callables, *, workload, mapping, **kwargs):
super().__init__(list_of_callables, **kwargs)
self.workload = workload
self.mapping = mapping
def run(self):
workload = parse_workload_from_path_or_from_module(self.workload, self.mapping)
sub_stage = self.list_of_callables[0](self.list_of_callables[1:], workload=workload, **self.kwargs)
for cme, extra_info in sub_stage.run():
yield cme, extra_info | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/stages/MainInputParserStages.py | MainInputParserStages.py |
import logging
import numpy as np
from zigzag.classes.mapping.spatial.spatial_mapping import SpatialMapping
from zigzag.classes.stages.Stage import Stage
logger = logging.getLogger(__name__)
## Pipeline stage that converts the spatial mapping from a
# user-provided spatial mapping across operational array dimensions
# to the internal spatial mapping representation used in the cost model.
class SpatialMappingConversionStage(Stage):
## The class constructor
# Initialize the accelerator and layer attributes.
def __init__(self, list_of_callables, *, accelerator, layer, **kwargs):
super().__init__(list_of_callables, **kwargs)
self.check_layer(layer) # raise ValueError in case anything is wrong
self.layer = layer
self.accelerator = accelerator
@staticmethod
## Check the layer attribute of the main_inputs:
#
# check that the layer includes:
# - the core which it is allocated to
# - the user-defined spatial mapping
#
# If not, a ValueError is raised.
# @return: True
def check_layer(layer):
if not isinstance(layer.core_allocation, int):
logger.critical(f"Layer {layer} has no core allocation.")
raise ValueError(f"Missing core allocation for {layer}.")
if not layer.user_spatial_mapping:
logger.critical(f"Layer {layer} has no user-defined spatial mapping.")
raise ValueError(
"Missing spatial mapping for {layer}. Please provide 'spatial_mapping' for {layer}."
)
return True
def run(self):
user_spatial_mapping = self.layer.user_spatial_mapping
spatial_mapping = self.convert_user_spatial_mapping(user_spatial_mapping)
kwargs = self.kwargs.copy()
kwargs["spatial_mapping"] = spatial_mapping
kwargs["accelerator"] = self.accelerator
kwargs["layer"] = self.layer
sub_stage = self.list_of_callables[0](self.list_of_callables[1:], **kwargs)
for cme, extra_info in sub_stage.run():
yield cme, extra_info
## Convert the user-defined spatial mapping across operational array dimensions
# to the internal SpatialMapping representation.
''
# For this conversion we need to know:
# - the user defined spatial mapping
# - the core (i.e. operational array) on which the unrolling happens,
# and the memory hierarchy that is connected to that operational array.
# @param user_spatial_mapping: The user-defined spatial mapping to be converted.
# @returns: A SpatialMapping object with the converted spatial mapping.
def convert_user_spatial_mapping(self, user_spatial_mapping):
# Adjust the user defined spatial mapping size based on the operational array dimension and the layer dimension:
# E.g. user-provided unrolling is 16 but operational array dimension size iso only 12: change unrolling to 12
# E.g. user-provided unrolling is 16 but layer dimension is only 12: change unrolling to 12
# E.g. user-provided unrolling is 16 but layer dimension is not a multiple of 16: change unrolling to fractional number
# so that the temporal remainder is an integer.
core_id = self.layer.core_allocation
core = self.accelerator.get_core(core_id)
mem_hierarchy = core.memory_hierarchy
oa_dims = core.operational_array.dimensions
layer_dim_sizes = self.layer.loop_dim_size.copy()
limited_user_spatial_mapping = {} # init dict we will be filling
for oa_dim_name, spatial_loop in user_spatial_mapping.items():
(loop_dim_unrolled, loop_size_unrolled) = spatial_loop
# Check 0: Skip this spatial dimension if it doesn't exist in the layer
if loop_dim_unrolled not in layer_dim_sizes.keys():
continue
# Check 1: Limit unrolling if operational array dimension is smaller than provided unrolling
oa_dim_size = next(
(oa_dim for oa_dim in oa_dims if oa_dim.name == oa_dim_name)
).size
loop_size_unrolled = min(oa_dim_size, loop_size_unrolled)
# Check 2: Limit unrolling if layer dimension is smaller than provided unrolling or if the loop dim doesn't exist
layer_dim_size = layer_dim_sizes.get(loop_dim_unrolled, 1)
loop_size_unrolled = min(layer_dim_size, loop_size_unrolled)
# Check 3: Adjust unrolling if it is not a multiple of the layer dimension size
temporal_remainder = int(np.ceil(layer_dim_size / loop_size_unrolled))
loop_size_unrolled = layer_dim_size / temporal_remainder
# Set the adjusted unrolling size in the original user_spatial_mapping dict if it is greater than 1
limited_user_spatial_mapping[oa_dim_name] = (
loop_dim_unrolled,
loop_size_unrolled,
)
# Update the layer_dim_size to support multiple oa dims unrolling the same loop dim but not unrolling it more than the total layer dim
if (
temporal_remainder == 1
): # Remove it from the dict if we have unrolled the entirely layer dim onto the array dimension(s)
del layer_dim_sizes[loop_dim_unrolled]
else: # Update the dict if we have some layer dims left to potentially unroll onto the next oa dims
layer_dim_sizes[loop_dim_unrolled] = temporal_remainder
user_spatial_mapping_for_log = {
array_dim: (loop_dim, f"{loop_size:.2f}")
for (
array_dim,
(loop_dim, loop_size),
) in limited_user_spatial_mapping.items()
}
logger.debug(
f"User-provided spatial mapping converted to: {user_spatial_mapping_for_log}"
)
spatial_mapping_dict = {}
layer_to_mem_op = self.layer.memory_operand_links
mem_to_layer_op = {
mem_op: layer_op for (layer_op, mem_op) in layer_to_mem_op.items()
}
core_id = self.layer.core_allocation
mem_hierarchy = self.accelerator.get_core(core_id).memory_hierarchy
for mem_op, layer_op in mem_to_layer_op.items():
user_sm_copy = limited_user_spatial_mapping.copy()
# layer_op = mem_to_layer_op[mem_op]
spatial_mapping_dict[layer_op] = []
memory_levels = mem_hierarchy.get_memory_levels(
mem_op,
)
for memory_level in memory_levels:
spatial_mapping_lvl = []
served_dimensions = memory_level.served_dimensions
for dimension in served_dimensions:
dim_name = dimension.name
if dim_name in user_sm_copy:
# The dimension name is present in the user defined spatial mapping
# Add the spatial loop of this dimension to the spatial mapping
spatial_loop = user_sm_copy[dim_name]
spatial_mapping_lvl.append(spatial_loop)
# Then remove this dim_name and spatial loop key value pair from the dict
# as the spatial mapping representation is a level-by-level one.
del user_sm_copy[dim_name]
spatial_mapping_dict[layer_op].append(spatial_mapping_lvl)
# After we have gone through the memory levels, if there are still user-defined dimensions
# present, add them as the top level. Otherwise add an empty list to make arch levels correct:
# because first list we added was the operational array level.
top_level_spatial_mapping = [
spatial_loop for (dim_name, spatial_loop) in user_sm_copy.items()
]
spatial_mapping_dict[layer_op].append(top_level_spatial_mapping)
return SpatialMapping(
spatial_mapping_dict=spatial_mapping_dict, layer_node=self.layer
) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/stages/SpatialMappingConversionStage.py | SpatialMappingConversionStage.py |
from math import ceil
from zigzag.classes.hardware.architecture.accelerator import Accelerator
from zigzag.classes.hardware.architecture.core import Core
from zigzag.classes.hardware.architecture.memory_hierarchy import MemoryHierarchy
from zigzag.classes.hardware.architecture.operational_array import OperationalArray
from zigzag.utils import pickle_deepcopy
from zigzag.classes.stages.Stage import Stage
import logging
logger = logging.getLogger(__name__)
## This stage scales the PE array of the given accelerator.
## Because the user-defined spatial mapping resides in the different workload layer nodes,
## We also have to modify those to scale accordingly
class PEArrayScalingStage(Stage):
def __init__(
self, list_of_callables, *, workload, accelerator, pe_array_scaling, **kwargs
):
super().__init__(list_of_callables, **kwargs)
## SANITY CHECKS
# Only allow scaling factors that are a power of 2
assert pe_array_scaling in [2**i for i in range(-3, 3)]
# Make sure there's only one core so that the correct one is scaled
# If your accelerator has more cores, modify the function below
assert len(accelerator.cores) == 1
self.workload = workload
self.accelerator = accelerator
self.pe_array_scaling = pe_array_scaling
def run(self):
scaled_accelerator = self.generate_scaled_accelerator()
modified_workload = self.scale_workload_spatial_mapping()
sub_stage = self.list_of_callables[0](
self.list_of_callables[1:],
workload=modified_workload,
accelerator=scaled_accelerator,
**self.kwargs,
)
for cme, extra_info in sub_stage.run():
yield cme, extra_info
def generate_scaled_accelerator(self):
"""
Recreate the Accelerator with PE array dimension scaling in all dimensions.
The elements required for this recreation are:
- accelerator
- name
- cores
- operational array
- operational unit
- dimension sizes
- memory hierarchy
- name
- memory levels
- memory instance
- operands
- port allocation
- served dimensions
"""
# Get the relevant accelerator attributes
core = next(iter(self.accelerator.cores))
operational_array = core.operational_array
operational_unit = operational_array.unit
dimension_sizes = operational_array.dimension_sizes
memory_hierarchy = core.memory_hierarchy
# Create new operational array
new_operational_unit = pickle_deepcopy(operational_unit)
new_dimension_sizes = [
ceil(self.pe_array_scaling * dim_size) for dim_size in dimension_sizes
]
new_dimensions = {
f"D{i}": new_dim_size
for i, new_dim_size in enumerate(new_dimension_sizes, start=1)
}
new_operational_array = OperationalArray(new_operational_unit, new_dimensions)
# Initialize the new memory hierarchy
mh_name = memory_hierarchy.name
new_mh_name = mh_name + "-scaled"
new_memory_hierarchy = MemoryHierarchy(new_operational_array, new_mh_name)
# Add memories to the new memory hierarchy with the correct attributes
for memory_level in memory_hierarchy.mem_level_list:
memory_instance = memory_level.memory_instance
operands = tuple(memory_level.operands)
port_alloc = memory_level.port_alloc_raw
served_dimensions_vec = memory_level.served_dimensions_vec
assert len(served_dimensions_vec) >= 1
served_dimensions = served_dimensions_vec[0]
new_memory_instance = pickle_deepcopy(memory_instance)
new_operands = pickle_deepcopy(operands)
new_port_alloc = pickle_deepcopy(port_alloc)
new_served_dimensions = pickle_deepcopy(served_dimensions)
new_memory_hierarchy.add_memory(
memory_instance=new_memory_instance,
operands=new_operands,
port_alloc=new_port_alloc,
served_dimensions=new_served_dimensions,
)
# Create the new core
id = core.id
dataflows = core.dataflows
if dataflows is not None:
raise NotImplementedError(
"Scale your core-defined dataflows accordingly here."
)
new_id = id
new_dataflows = pickle_deepcopy(dataflows)
new_core = Core(
id=new_id,
operational_array=new_operational_array,
memory_hierarchy=new_memory_hierarchy,
dataflows=new_dataflows,
)
# Create the new accelerator
name = self.accelerator.name
new_name = name + "-scaled"
new_cores = {new_core}
new_accelerator = Accelerator(
name=new_name,
core_set=new_cores,
)
return new_accelerator
def scale_workload_spatial_mapping(self):
"""
Scale the user-defined mappings for each layer.
"""
modified_workload = pickle_deepcopy(self.workload)
for node in modified_workload.nodes():
if hasattr(node, "user_spatial_mapping") and node.user_spatial_mapping:
for array_dim, (layer_dim, size) in node.user_spatial_mapping.items():
if size != 1:
node.user_spatial_mapping[array_dim] = (
layer_dim,
self.pe_array_scaling * size,
)
return modified_workload | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/stages/PEArrayScalingStage.py | PEArrayScalingStage.py |
import logging
from zigzag.classes.opt.temporal.loma.memory_allocator import MemoryAllocator
from zigzag.classes.stages.Stage import Stage
logger = logging.getLogger(__name__)
## Description missing
class TemporalOrderingConversionStage(Stage):
## The class constructor
# Initialize the accelerator and layer attributes.
def __init__(
self, list_of_callables, *, accelerator, layer, spatial_mapping, **kwargs
):
super().__init__(list_of_callables, **kwargs)
self.check_layer(layer)
self.layer = layer
self.spatial_mapping = spatial_mapping
self.accelerator = accelerator
@staticmethod
## Check the layer attribute of the main_inputs:
#
# check that the layer includes:
# - the core which it is allocated to
# - the user-defined spatial mapping
#
# If not, a ValueError is raised.
#
# @return: True
def check_layer(layer):
if not layer.core_allocation:
logger.critical(f"Layer {layer} has no core allocation.")
raise ValueError()
if not layer.user_temporal_ordering:
logger.critical(f"Layer {layer} has no user-defined temporal ordering.")
raise ValueError(
f"Layer {layer} has no user-defined temporal ordering. Use LomaStage to generate automatically."
)
return True
## Run this stage by converting the user-defined temporal loop ordering
# to the memory-level based temporal mapping representation.
def run(self):
temporal_mapping = self.convert_user_temporal_mapping(
self.layer.user_temporal_ordering
)
kwargs = self.kwargs.copy()
kwargs["temporal_mapping"] = temporal_mapping
kwargs["spatial_mapping"] = self.spatial_mapping
kwargs["layer"] = self.layer
kwargs["accelerator"] = self.accelerator
substage = self.list_of_callables[0](self.list_of_callables[1:], **kwargs)
for cme, extra_info in substage.run():
yield cme, extra_info
def convert_user_temporal_mapping(self, user_temporal_mapping):
spatial_mapping = self.spatial_mapping
layer = self.layer
layer_dim_sizes = layer.loop_dim_size
for i, utm in list(enumerate(user_temporal_mapping))[::-1]:
if utm[0] not in layer_dim_sizes:
logger.warning(
f"Supplied temporal ordering {utm} for layer {layer} thrown out because loop not present in the layer"
)
del user_temporal_mapping[i]
# I don't think this is actually necessary to check:
# If a dimension is fully unrolled spatially it doesn't have to be present in temporal ordering.
# for d in layer_dim_sizes:
# if d not in [utm[0] for utm in user_temporal_mapping]:
# logger.error(f"Supplied temporal ordering for layer {layer} is missing dimension {d}")
# raise ValueError(f"Supplied temporal ordering for layer {layer} is missing dimension {d}")
converted_mapping = []
for dim, size in user_temporal_mapping:
if size == "all":
size = layer_dim_sizes[dim]
size_already = 1
for dim_already, size_already_sub in (
converted_mapping + spatial_mapping.spatial_loop_dim_size
):
if dim_already == dim:
size_already *= size_already_sub
size //= size_already
converted_mapping.append((dim, size))
allocator = MemoryAllocator(
self.accelerator, layer, spatial_mapping, converted_mapping
)
temporal_mapping = allocator.run() # allocate this ordering to the memories
return temporal_mapping | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/stages/TemporalOrderingConversionStage.py | TemporalOrderingConversionStage.py |
from typing import Generator, Tuple, Any
from zigzag.classes.stages.Stage import Stage
from zigzag.classes.cost_model.cost_model import CostModelEvaluation
import logging
import os
logger = logging.getLogger(__name__)
## Strips extra info for subcallables to save memory
class RemoveExtraInfoStage(Stage):
## The class constructor
# Initialize the remove extra info stage.
def __init__(self, list_of_callables, **kwargs):
super().__init__(list_of_callables, **kwargs)
## Run the remove extra info stage by running the substage and discarding the extra_info.
def run(self) -> Generator[Tuple[CostModelEvaluation, Any], None, None]:
sub_list_of_callables = self.list_of_callables[1:]
substage = self.list_of_callables[0](sub_list_of_callables, **self.kwargs)
for cme, extra_info in substage.run():
yield cme, None
## Caches results in a list and then yields them.
# This breaks the yield flow from top to bottom.
class CacheBeforeYieldStage(Stage):
## The class constructor
# Initialize the cache before yield stage.
def __init__(self, list_of_callables, **kwargs):
super().__init__(list_of_callables, **kwargs)
## Run the cache before yield stage by running the substage and caching everything it yields, then yielding everything.
def run(self) -> Generator[Tuple[CostModelEvaluation, Any], None, None]:
sub_list_of_callables = self.list_of_callables[1:]
substage = self.list_of_callables[0](sub_list_of_callables, **self.kwargs)
to_yield = []
for ty in substage.run():
to_yield.append(ty)
for ty in to_yield:
yield ty
## Check if the output file is already generated, skip the run if so.
class SkipIfDumpExistsStage(Stage):
## The class constructor
def __init__(self, list_of_callables, *, dump_filename_pattern, **kwargs):
super().__init__(list_of_callables, **kwargs)
self.dump_filename_pattern = dump_filename_pattern
def run(self):
filename = self.dump_filename_pattern.format(**self.kwargs)
if os.path.isfile(filename):
print(
f"==================================Dump {filename} already existed. Skip! =================================="
)
return
substage = self.list_of_callables[0](
self.list_of_callables[1:],
dump_filename_pattern=self.dump_filename_pattern,
**self.kwargs,
)
for cme, extra in substage.run():
yield cme, extra
import multiprocessing
threadpool = None
def get_threadpool(nb_threads_if_non_existent):
global threadpool
if threadpool is None:
threadpool = multiprocessing.Pool(nb_threads_if_non_existent)
return threadpool
def close_threadpool():
global threadpool
threadpool.close()
threadpool = None
def terminate_threadpool():
global threadpool
threadpool.terminate()
threadpool = None
def raise_exception(e):
terminate_threadpool()
raise e
## Multiprocessing support stage.
#
# Warning: does not yield (CostModelEvaluation, extra_info) pairs.
#
# Use as follows in a list_of_callables:
# - [..., ..., MultiProcessingGatherStage, some stage(s) that loop over stuff and just yield (cme, extra_info) pairs
# every iteration without postprocessing it, MultiProcessingSpawnStage, ..., ...]
#
# Note: list of callables may not contain lambda functions, as this will break pickling which is required for
# by multiprocessing
#
# Note: there is quite some overhead in spawning these parallel processes (python...; it needs to copy through pickle
# all variables), so best to do this at some high level loop (early in list of callables)
class MultiProcessingSpawnStage(Stage):
## The class constructor
# @param list_of_callables: may not contain lambda functions, as this will break pickling which is required for
# by multiprocessing.
# @param multiprocessing_callback: intended to be set by MultiProcessingGatherStage
# @param kwargs:
def __init__(
self,
list_of_callables,
*,
multiprocessing_callback,
nb_multiprocessing_threads=multiprocessing.cpu_count(),
**kwargs,
):
super().__init__(list_of_callables, **kwargs)
self.nb_multiprocessing_threads = nb_multiprocessing_threads
self.callback = multiprocessing_callback
def _to_run(self):
return list(self.sub_stage.run())
def run(self):
self.sub_stage = self.list_of_callables[0](
self.list_of_callables[1:], **self.kwargs
)
get_threadpool(self.nb_multiprocessing_threads).apply_async(
self._to_run, callback=self.callback, error_callback=raise_exception
)
yield None, None
## Multiprocessing support stage.
#
# Use as follows in a list_of_callables:
# - [..., ..., MultiProcessingGatherStage, some stage(s) that loop over stuff and just yield (cme, extra_info) pairs
# every iteration without postprocessing it, MultiProcessingSpawnStage, ..., ...]
#
# Note: list of callables may not contain lambda functions, as this will break pickling which is required for
# by multiprocessing
class MultiProcessingGatherStage(Stage):
def _callback(self, ans):
self.queue.put(ans)
def run(self):
self.queue = multiprocessing.Manager().Queue()
kwargs = self.kwargs.copy()
kwargs["multiprocessing_callback"] = self._callback
sub_stage = self.list_of_callables[0](self.list_of_callables[1:], **kwargs)
count_to_get = 0
for i in sub_stage.run():
count_to_get += 1
logger.info(f"Multiprocessing results to get: {count_to_get}")
count = 0
while count < count_to_get:
for ans in self.queue.get(block=True):
yield ans
count += 1
if count % (count_to_get // 10) == 0:
logger.info(
f"Multiprocessing results received: {count} of {count_to_get}"
)
close_threadpool() | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/stages/RunOptStages.py | RunOptStages.py |
from .CostModelStage import CostModelStage
from .DumpStage import DumpStage
from .PEArrayScalingStage import PEArrayScalingStage
from .PlotTemporalMappingsStage import PlotTemporalMappingsStage
from .SaveStage import CompleteSaveStage, SimpleSaveStage, PickleSaveStage
from .GeneralParameterIteratorStage import GeneralParameterIteratorStage
from .LomaStage import LomaStage
from .SalsaStage import SalsaStage
from .MainInputParserStages import AcceleratorParserStage, WorkloadParserStage
from .ONNXModelParserStage import ONNXModelParserStage
from .ReduceStages import (
MinimalEnergyStage,
MinimalLatencyStage,
MinimalEDPStage,
SumStage,
)
from .RunOptStages import (
CacheBeforeYieldStage,
RemoveExtraInfoStage,
MultiProcessingGatherStage,
MultiProcessingSpawnStage,
SkipIfDumpExistsStage,
)
from .SpatialMappingConversionStage import SpatialMappingConversionStage
from .SpatialMappingGeneratorStage import SpatialMappingGeneratorStage
from .Stage import Stage, MainStage
from .TemporalOrderingConversionStage import TemporalOrderingConversionStage
from .WorkloadStage import WorkloadStage
# Parameter providers: these parameters are provided to substages by the following classes:
# - accelerator: AcceleratorParserStage
# - workload: WorkloadParserStage
# - temporal_mapping: LomaStage, TemporalMappingConversionStage
# - spatial_mapping: SpatialMappingGenerationStage, SpatialMappingConversionStage
# - layer: WorkloadStage
# - multiprocessing_callback: MultiProcessingGatherStage
# - *: GeneralParameterIteratorStage: can provide anything
# Parameter consumers: these parameters are no longer provided to substages after the following classes
# - accelerator_path: AcceleratorParserStage
# - dump_filename_pattern: DumpStage
# - plot_filename_pattern: PlotTemporalMappingsStage
# - general_parameter_iterations: GeneralParameterIteratorStage
# - multiprocessing_callback: MultiProcessingSpawnStage
# - workload: WorkloadStage
# - workload_path: WorkloadParserStage
# Parameters required: these stages require the following parameters:
# - CostModelStage: accelerator, layer, spatial_mapping, temporal_mapping
# - WorkloadStage: workload
# - DumpStage: dump_filename_pattern
# - PlotTemporalMappingsStage: plot_filename_pattern
# - GeneralParameterIteratorStage: general_parameter_iterations
# - LomaStage: accelerator, layer, spatial_mapping
# - AcceleratorParserStage: accelerator_path
# - WorkloadParserStage: workload_path
# - MultiProcessingSpawnStage: multiprocessing_callback
# - SpatialMappingConversionStage: accelerator, layer
# - SpatialMappingGeneratorStage: accelerator, layer
# - TemporalOrderingConversionStage: accelerator, layer, spatial_mapping
# - SkipIfDumpExistStage: dump_filename_pattern | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/stages/__init__.py | __init__.py |
import multiprocessing_on_dill as multiprocessing
from sympy.ntheory import factorint
from copy import deepcopy
import logging
from zigzag.classes.hardware.architecture.accelerator import Accelerator
from zigzag.classes.mapping.spatial.spatial_mapping import SpatialMapping
from zigzag.classes.opt.temporal.salsa.engine import SalsaEngine
from zigzag.classes.workload.layer_node import LayerNode
from typing import Generator, Callable, List, Tuple, Any
from zigzag.classes.stages.Stage import Stage
from zigzag.classes.cost_model.cost_model import CostModelEvaluation
from zigzag.classes.stages.ReduceStages import MinimalEnergyStage
from zigzag.classes.stages.ReduceStages import MinimalLatencyStage
logger = logging.getLogger(__name__)
## Class that return the best temporal mapping found by the Simulated Annealing
# Loop-ordering Scheduler for Accelerators (SALSA) for a single layer.
class SalsaStage(Stage):
## The class constructor
# Initialize the SalsaStage by setting the accelerator, layer, and spatial mapping.
# @param list_of_callables (List[Callable]): List of substages to call with each generated temporal mapping.
# @param accelerator (Accelerator): The accelerator object.
# @param layer (Layer): The layer object.
# @param spatial_mapping (SpatialMapping): The spatial mapping object.
def __init__(
self,
list_of_callables: List[Callable],
*,
accelerator,
layer,
spatial_mapping,
**kwargs,
):
super().__init__(list_of_callables, **kwargs)
self.accelerator, self.layer, self.spatial_mapping = (
accelerator,
layer,
spatial_mapping,
)
self.engine = None
self.best_cme = None
self.opt_criterion_name = kwargs.get("salsa_opt_criterion", "energy")
self.number_of_core_allocated = kwargs.get("salsa_number_of_core", 1)
# Multiprocessing parameters
self.worker_list = []
self.cme_queue = multiprocessing.Queue()
if self.opt_criterion_name == "energy":
self.compare_stage = self.compare_cme_energy
elif self.opt_criterion_name == "latency":
self.compare_stage = self.compare_cme_latency
else:
raise Exception(
"Invalid optimization criterion for SALSA. Must be either 'energy' or 'latency'."
)
## Set up and start salsa engine, then collect and return the best cost model evaluation
def run(self):
logger.info(
f"Running SALSA Temporal Mapping Optimizer with {self.number_of_core_allocated} core(s)."
)
self.engine = SalsaEngine(
accelerator=self.accelerator,
layer=self.layer,
spatial_mapping=self.spatial_mapping,
**self.kwargs,
)
# self.best_cme = self.engine.run(self.cme_queue)
# Get the number of core the user wants to allocate
if self.number_of_core_allocated <= multiprocessing.cpu_count():
self.number_of_core = self.number_of_core_allocated
else:
self.number_of_core = multiprocessing.cpu_count()
# Create processes
for core_id in range(0, self.number_of_core):
p = multiprocessing.Process(target=self.engine.run, args=(self.cme_queue,))
self.worker_list.append(p)
# Start the processes
for core_id in range(0, self.number_of_core):
logger.debug(f"Starting SALSA Process #{core_id}.")
self.worker_list[core_id].start()
# For every core we gather the ouput
for core_id in range(0, self.number_of_core):
cme = self.cme_queue.get()
self.compare_stage(cme)
# Then join them to make sure they all end before continuing the execution
for core_id in range(0, self.number_of_core):
self.worker_list[core_id].join()
kwargs = self.kwargs.copy()
kwargs["accelerator"] = self.accelerator
kwargs["layer"] = self.layer
kwargs["spatial_mapping"] = self.spatial_mapping
kwargs["temporal_mapping"] = self.best_cme.mapping.temporal_mapping
sub_stage = self.list_of_callables[0](self.list_of_callables[1:], **kwargs)
for cme, extra_info in sub_stage.run():
yield cme, (self.best_cme.mapping.temporal_mapping, extra_info)
## Compare the latency of the current cost model evaluation with the best latency found so far.
# Then replace the current best cme if the current cme has a lower latency.
def compare_cme_latency(self, cme):
if self.best_cme is None:
self.best_cme = cme
elif (
cme.latency_total2 == self.best_cme.latency_total2
and cme.energy_total < self.best_cme.energy_total
):
self.best_cme = cme
elif cme.latency_total2 < self.best_cme.latency_total2:
self.best_cme = cme
## Compare the energy of the current cost model evaluation with the best energy found so far.
# Then replace the best cme if the current cme has a lower energy.
def compare_cme_energy(self, cme):
if self.best_cme is None:
self.best_cme = cme
elif (
cme.energy_total == self.best_cme.energy_total
and cme.latency_total2 < self.best_cme.latency_total2
):
self.best_cme = cme
elif cme.energy_total < self.best_cme.energy_total:
self.best_cme = cme | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/stages/SalsaStage.py | SalsaStage.py |
import logging
from zigzag.classes.opt.spatial.generator import UserSpatialMappingGenerator
from zigzag.classes.stages.Stage import Stage
from zigzag.classes.stages.SpatialMappingConversionStage import (
SpatialMappingConversionStage,
)
import copy
logger = logging.getLogger(__name__)
## Pipeline stage that finds spatial mappings given a:
# - accelerator
# - core allocation
# - interconnection pattern on the allocated core
# - layer
#
# The spatial mappings are found using the interconnection pattern present on the core.
#
# The inner-most memory level served dimensions is used,
# as this is how the memories connect to the operational array.
class SpatialMappingGeneratorStage(Stage):
## The class constructor
# Note: list_of_callables does NOT need to include SpatialMappingConversionStage. Although this is used,
# this usage is done automatically.
def __init__(self, list_of_callables, *, accelerator, layer, **kwargs):
super().__init__(list_of_callables, **kwargs)
self.accelerator = accelerator
self.check_layer(layer)
self.layer = layer
@staticmethod
# Check that the layer includes:
# - the core which it is allocated to
#
# If not, a ValueError is raised.
#
# If the layer in main_inputs is not set, False is returned
#
# @return: True if layer is set correctly
def check_layer(layer):
if layer is None:
raise ValueError()
if layer.core_allocation is None:
logger.critical(f"Layer {layer} has no core allocation.")
raise ValueError()
return True
## Run this stage by generating user-formatted spatial mappings which are converted
# to the memory-level based spatial mapping representation.
def run(self):
user_provided_spatial_mappings = self.layer.user_spatial_mapping
if isinstance(
user_provided_spatial_mappings, dict
): # There is a single USM provided
user_spatial_mappings = [user_provided_spatial_mappings]
elif isinstance(
user_provided_spatial_mappings, list
): # There are multiple USMs provided
user_spatial_mappings = user_provided_spatial_mappings
else: # There is no USM provided
# Initialize the UserSpatialMappingGenerator which will automatically generate SMs
user_spatial_mapping_generator = UserSpatialMappingGenerator(
self.layer, self.accelerator
)
# Get all the USMs by running the generator
user_spatial_mappings = list(
(usm for usm in user_spatial_mapping_generator.run())
)
logger.debug(f"No user-provided spatial mappings found. Auto-generating..")
nb_user_spatial_mappings = len(user_spatial_mappings)
for i, user_spatial_mapping in enumerate(user_spatial_mappings):
logger.info(
f"Launching spatial mapping {i+1}/{nb_user_spatial_mappings}: {user_spatial_mapping}."
)
# Set the user_spatial_mapping in the layer, as this is required by SpatialMappingConversionStage
self.layer.user_spatial_mapping = user_spatial_mapping
# Note: manual instantiation of spatial mapping conversion stage here. We let that class deal with
# everything else, including instantion of the actual substages
spatial_mapping_conversion_stage = SpatialMappingConversionStage(
self.list_of_callables,
accelerator=self.accelerator,
layer=copy.copy(self.layer),
**self.kwargs,
)
for cme, extra_info in spatial_mapping_conversion_stage.run():
yield cme, (user_spatial_mapping, extra_info) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/stages/SpatialMappingGeneratorStage.py | SpatialMappingGeneratorStage.py |
import logging
from typing import Generator, Callable, List, Tuple, Any
from zigzag.classes.stages.Stage import Stage
from zigzag.classes.cost_model.cost_model import CostModelEvaluation
logger = logging.getLogger(__name__)
## General iterator over any parameter whose values can be set from a predetermined list
class GeneralParameterIteratorStage(Stage):
## The class constructor
# @param list_of_callables: see Stage
# @param general_parameter_iterations: dictionary with:
# - keys: variables to iterate over, or tuples of variables to iterate over
# - With K1 and K2 both keys, all combinations of K1 values and K2 values are tried.
# - With K1 and K2 together in a tuple as key, their values are paired and the dictionary value
# must be a list (or other iterable) with tuples containing the values for K1 and K2
# - values: a list of values (single arg key) or a list of tuples of values (multi arg keys)
# @param kwargs: see Stage
def __init__(self, list_of_callables, *, general_parameter_iterations, **kwargs):
super().__init__(list_of_callables, **kwargs)
self.param_iters = general_parameter_iterations
def recursive_run(self, reduced_param_iters, runparams):
if reduced_param_iters:
key = next(iter(reduced_param_iters))
reduced_param_iters_reduced = reduced_param_iters.copy()
runparams = runparams.copy()
del reduced_param_iters_reduced[key]
for v in reduced_param_iters[key]:
if isinstance(key, (list, tuple)):
for kk, vv in zip(key, v):
runparams[kk] = vv
iterable = True
else:
runparams[key] = v
iterable = False
for cme, extra_info in self.recursive_run(
reduced_param_iters_reduced, runparams
):
yield cme, (
(tuple((kk, vv) for kk, vv in zip(key, v)) + extra_info[0],)
+ extra_info[1:]
if iterable
else (((key, v),) + extra_info[0],) + extra_info[1:]
)
else:
# trivial case, no more extra parameters to iterate over
sub_stage = self.list_of_callables[0](
self.list_of_callables[1:], **runparams
)
for cme, extra_info in sub_stage.run():
yield cme, (tuple(), extra_info)
def run(self):
return self.recursive_run(self.param_iters, self.kwargs)
# if __name__ == "__main__":
# class Dummy(Stage):
# def is_leaf(self):
# return True
# def run(self):
# yield None, self.kwargs
# from zigzag.classes.stages.Stage import MainStage
# DUT = MainStage(
# [GeneralParameterIteratorStage, Dummy],
# general_parameter_iterations={
# ("arg1.1", "arg1.2"): ((111, 121), (112, 122), (113, 123)),
# "arg2": (21, 22, 23, 24, 25),
# "arg3": (31, 32),
# },
# )
# for l in DUT.run():
# print(l) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/stages/GeneralParameterIteratorStage.py | GeneralParameterIteratorStage.py |
from zigzag.classes.hardware.architecture.accelerator import Accelerator
from zigzag.classes.mapping.spatial.spatial_mapping import SpatialMapping
from zigzag.classes.opt.temporal.loma.engine import LomaEngine
from zigzag.classes.workload.layer_node import LayerNode
from typing import Generator, Callable, List, Tuple, Any
from zigzag.classes.stages.Stage import Stage
from zigzag.classes.cost_model.cost_model import CostModelEvaluation
## Class that iterates through the different temporal mappings generated through
# the loop order based memory allocation (loma) engine
class LomaStage(Stage):
## The class constructor
# Initialize the LomaStage by setting the accelerator, layer, and spatial mapping.
# @param list_of_callables (List[Callable]): List of substages to call with each generated temporal mapping.
# @param accelerator (Accelerator): The accelerator object.
# @param layer (Layer): The layer object.
# @param spatial_mapping (SpatialMapping): The spatial mapping object.
def __init__(
self,
list_of_callables: List[Callable],
*,
accelerator,
layer,
spatial_mapping,
**kwargs
):
super().__init__(list_of_callables, **kwargs)
self.accelerator, self.layer, self.spatial_mapping = (
accelerator,
layer,
spatial_mapping,
)
self.engine = None
def run(self):
self.engine = LomaEngine(
accelerator=self.accelerator,
layer=self.layer,
spatial_mapping=self.spatial_mapping,
**self.kwargs
)
for tm in self.engine.run():
kwargs = self.kwargs.copy()
kwargs["accelerator"] = self.accelerator
kwargs["layer"] = self.layer
kwargs["spatial_mapping"] = self.spatial_mapping
kwargs["temporal_mapping"] = tm
sub_stage = self.list_of_callables[0](self.list_of_callables[1:], **kwargs)
for cme, extra_info in sub_stage.run():
yield cme, (tm, extra_info) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/stages/LomaStage.py | LomaStage.py |
from collections import defaultdict
from typing import Set, Tuple, List
import networkx as nx
from networkx import DiGraph
from zigzag.classes.hardware.architecture.memory_instance import MemoryInstance
from zigzag.classes.hardware.architecture.memory_level import MemoryLevel
from zigzag.classes.hardware.architecture.operational_array import OperationalArray
## Class that represents a memory hierarchy as a directed networkx graph.
# The memory hierarchy graph is directed, with the root nodes representing the lowest level
# in the memory hierarchy.
class MemoryHierarchy(DiGraph):
## The class constructor
# Initialize the memory hierarchy graph.
# The initialization sets the operational array this memory hierarchy will connect to.
# The graph nodes are the given nodes. The edges are extracted from the operands the memory levels store.
# @param nodes: a list of MemoryLevels. Entries need to be provided from lowest to highest memory level.
def __init__(
self,
operational_array: OperationalArray,
name: str = "Memory Hierarchy",
**attr,
):
super().__init__(**attr)
self.name = name
self.operational_array = operational_array
self.operands = set() # Initialize the set that will store all memory operands
self.nb_levels = (
{}
) # Initialize the dict that will store how many memory levels an operand has
self.mem_level_list = []
self.memory_level_id = 0
## JSON Representation of this object to save it to a json file.
def __jsonrepr__(self):
return {"memory_levels": [node for node in nx.topological_sort(self)]}
def __eq__(self, __o: object) -> bool:
if not isinstance(__o, MemoryHierarchy):
return False
return all(
[self_ml == __o_ml for (self_ml, __o_ml) in zip(self.nodes(), __o.nodes())]
)
## Adds a memory to the memory hierarchy graph.
#
# NOTE: memory level need to be added from bottom level (e.g., Reg) to top level (e.g., DRAM) for each operand !!!
#
# Internally a MemoryLevel object is built, which represents the memory node.
#
# Edges are added from all sink nodes in the graph to this node if the memory operands match
# @param memory_instance: The MemoryInstance containing the different memory characteristics.
# @param operands: The memory operands the memory level stores.
# @param served_dimensions: The operational array dimensions this memory level serves.
# Each vector in the set is a direction that is served.
# Use 'all' to represent all dimensions (i.e. the memory level is not unrolled).
def add_memory(
self,
memory_instance: MemoryInstance,
operands: Tuple[str, ...],
port_alloc: Tuple[dict, ...] = None,
served_dimensions: Set or str = "all",
):
if port_alloc is None:
# Define the standard port allocation scheme (this assumes one read port and one write port)
if not (
memory_instance.r_port == 1
and memory_instance.w_port == 1
and memory_instance.rw_port == 0
):
raise ValueError(
f"No port allocation was provided for memory level of instance {memory_instance} and doesn't match with standard port allocation generation of 1 read and 1 write port."
)
port_alloc = []
for operand in operands:
if operand == "O":
port_alloc.append(
{
"fh": "w_port_1",
"tl": "r_port_1",
"fl": "w_port_1",
"th": "r_port_1",
}
)
else:
port_alloc.append(
{"fh": "w_port_1", "tl": "r_port_1", "fl": None, "th": None}
)
port_alloc = tuple(port_alloc)
# Assert that if served_dimensions is a string, it is "all"
if type(served_dimensions) == str:
assert (
served_dimensions == "all"
), "Served dimensions is a string, but is not all."
# Add the memory operands to the self.operands set attribute that stores all memory operands.
for mem_op in operands:
if mem_op not in self.operands:
self.nb_levels[mem_op] = 1
self.operands.add(mem_op)
else:
self.nb_levels[mem_op] += 1
self.operands.add(mem_op)
# Parse the served_dimensions by replicating it into a tuple for each memory operand
# as the MemoryLevel constructor expects this.
served_dimensions_repl = tuple(
[served_dimensions for _ in range(len(operands))]
)
# Compute which memory level this is for all the operands
mem_level_of_operands = {}
for operand in operands:
nb_levels_so_far = len(
[node for node in self.nodes() if operand in node.operands]
)
mem_level_of_operands[operand] = nb_levels_so_far
memory_level = MemoryLevel(
memory_instance=memory_instance,
operands=operands,
mem_level_of_operands=mem_level_of_operands,
port_alloc=port_alloc,
served_dimensions=served_dimensions_repl,
operational_array=self.operational_array,
id=self.memory_level_id,
)
self.mem_level_list.append(memory_level)
self.memory_level_id += 1
# Precompute appropriate edges
to_edge_from = set()
for mem_op in operands:
# Find top level memories of the operands
for m in self.get_operator_top_level(mem_op)[0]:
to_edge_from.add(m)
# Add the node to the graph
self.add_node(memory_level)
for sink_node in to_edge_from:
# Add an edge from this sink node to the current node
self.add_edge(sink_node, memory_level)
## Returns a list of memories in the memory hierarchy for the memory operand.
# The first entry in the returned list is the innermost memory level.
def get_memory_levels(self, mem_op: str):
# Sort the nodes topologically and filter out all memories that don't store mem_op
memories = [
node for node in nx.topological_sort(self) if mem_op in node.operands
]
return memories
## Returns all the memory operands this memory hierarchy graph contains as a set.
def get_operands(self):
return self.operands
## Returns the inner-most memory levels for all memory operands.
def get_inner_memories(self) -> List[MemoryLevel]:
memories = [node for node, in_degree in self.in_degree() if in_degree == 0]
return memories
## Returns the outer-most memory levels for all memory operands.
def get_outer_memories(self) -> List[MemoryLevel]:
memories = [node for node, out_degree in self.out_degree() if out_degree == 0]
return memories
## Returns the 'top'-most MemoryLevels, where 'the' level of MemoryLevel is considered to be the largest
# level it has across its assigned operands
# @return (list_of_memories_on_top_level, top_level)
def get_top_memories(self) -> Tuple[List[MemoryLevel], int]:
level_to_mems = defaultdict(lambda: [])
for node in self.nodes():
level_to_mems[max(node.mem_level_of_operands.values())].append(node)
top_level = max(level_to_mems.keys())
return level_to_mems[top_level], top_level
## Removes the top level of this memory hierarchy.
# 'The' level of MemoryLevel instance is considered to be the largest level it has across its assigned operands,
# and those with the highest appearing level will be removed from this MemoryHierarchy instance
# @return (removed_MemoryLevel_instances, new_number_of_levels_in_the_hierarchy)
def remove_top_level(self) -> Tuple[List[MemoryLevel], int]:
to_remove, top_level = self.get_top_memories()
for tr in to_remove:
self.mem_level_list.remove(tr)
self.remove_node(tr)
for k in self.nb_levels:
self.nb_levels[k] = len(
set(
node.mem_level_of_operands.get(k)
for node in self.nodes()
if k in node.mem_level_of_operands
)
)
return to_remove, max(self.nb_levels.keys())
## Finds the highest level of memories that have the given operand assigned to it, and returns the MemoryLevel
# instance on this level that have the operand assigned to it.
# 'The' level of a MemoryLevel is considered to be the largest
# level it has across its assigned operands.
# @param operand
# @return level_to_mems[top_level], top_level
def get_operator_top_level(self, operand) -> Tuple[List[MemoryLevel], int]:
level_to_mems = defaultdict(lambda: [])
for node in self.nodes():
if operand in node.operands[:]:
level_to_mems[max(node.mem_level_of_operands.values())].append(node)
top_level = max(level_to_mems.keys()) if level_to_mems else -1
return level_to_mems[top_level], top_level
## Finds the highest level of memory that have the given operand assigned to, and returns the MemoryLevel
# @param operand
def get_operand_top_level(self, operand) -> MemoryLevel:
top_lv = self.nb_levels[operand] - 1
for mem in reversed(self.mem_level_list):
if operand in mem.mem_level_of_operands.keys():
if mem.mem_level_of_operands[operand] == top_lv:
return mem
raise ValueError(f"Operand {operand} not found in any of the memory instances.")
## Finds the highest level of memories that have the given operand assigned to it, and returns the MemoryLevel
# instance on this level that have the operand assigned to it AFTER removing the operand from its operands.
# 'The' level of a MemoryLevel is considered to be the largest
# level it has across its assigned operands.
# If a memory has no operands left, it is removed alltogether.
# @param operand
# @return list of MemoryLevel instance that have the operand removed, new top_level of the operand
def remove_operator_top_level(self, operand):
to_remove, top_level = self.get_operator_top_level(operand)
served_dimensions = []
for tr in to_remove:
del tr.mem_level_of_operands[operand]
tr.operands.remove(operand)
for p in tr.port_list:
for so in p.served_op_lv_dir[:]:
if so[0] == operand:
p.served_op_lv_dir.remove(so)
if len(tr.mem_level_of_operands) == 0:
self.mem_level_list.remove(tr)
self.remove_node(tr)
for k in self.nb_levels:
self.nb_levels[k] = len(
set(
node.mem_level_of_operands.get(k)
for node in self.nodes()
if k in node.mem_level_of_operands
)
)
return to_remove, self.nb_levels[operand] | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/hardware/architecture/memory_hierarchy.py | memory_hierarchy.py |
from typing import Dict, List
from math import log2, ceil, prod
from zigzag.classes.hardware.architecture.operational_array import (
Multiplier,
MultiplierArray,
)
## This class represents one single adder.
class Adder:
## The class constructor
## @param fan_in: the number of input data to be added together.
## @param unit_cost: one addition energy.
## @param unit_area: one adder area.
## @param input_precision: input data precision. If it is 'int' format, it means the same precision is applied to all input data;
# if it is 'list' format, it allows to define for different input data the different precision.
## @param output_precision: output data precision.
def __init__(
self,
fan_in: int,
unit_cost: float,
unit_area: float,
input_precision: List[int] or int,
output_precision: int,
):
self.fan_in = fan_in
self.cost = unit_cost
self.area = unit_area
self.input_precision = input_precision
self.output_precision = output_precision
## Adder Level is the basic building block for Adder Hierarchy.
# It can be an array of aggregators (AG, addition over space) or accumulators (AC, addition over time).
class AdderLevel:
## The class constructor
# @param index: Adder Level index.
# @param name: Adder Level name's default format: 'ALi' (i = 1,2,3,...).
# @param details: Adder Level's type, fan-in, and so on.
def __init__(self, index: int, name: str, details: Dict[str, str or int]):
self.id = index
self.name = name
self.type = details["type"]
self.unit = Adder(
details["fan_in"],
details["unit_cost"],
details["unit_area"],
details["input_precision"],
details["output_precision"],
)
self.one_instance_unit_count = details["one_instance_unit_count"]
self.total_unit_count = details["total_unit_count"]
def __str__(self):
return self.name
def __repr__(self):
return str(self)
## Construct AdderHierarchy class based on user-defined adder hierarchy. It will check if users' definition is valid,
# and extract all the related info, e.g. unit count for each adder level and total area.
class AdderHierarchy:
## The class constructor
# @param adder_hierarchy: user-defined adder hierarchy.For aggregation level (AG),
# it should contain 'type', 'fan_in', 'unit_cost', 'unit_area';
# for accumulation level (AC), it should contain 'type', output_precision', 'unit_cost', 'unit_area'.
# @param multiplier_array: MultiplierArray object, check in "architecture/operational_array.py" for more info.
def __init__(
self,
adder_hierarchy: Dict[str, Dict[str, str or int]],
multiplier_array: MultiplierArray,
):
self.calc_output_reduction_size(multiplier_array)
self.assert_valid(adder_hierarchy)
multiplier_output_precision = multiplier_array.unit.output_precision
self.construct_adder_levels(multiplier_output_precision, adder_hierarchy)
self.total_area = prod(
[
adder_level.unit.area * adder_level.total_unit_count
for adder_level in self.adder_levels
]
)
## From dimensions and operand_spatial_sharing defined by user, calculate total output-sharing dimension size.
# This function updates self.output_reduction_size and self.output_non_reduction_size.
# @param multiplier_array: MultiplierArray object, check in "architecture/operational_array.py" for more info.
def calc_output_reduction_size(self, multiplier_array: MultiplierArray):
total_dimension_size = multiplier_array.total_unit_count
output_reduction_size = 1
for os in multiplier_array.operand_spatial_sharing:
if os.operand == "O":
output_reduction_size *= int(os.size)
self.output_reduction_size = output_reduction_size
self.output_non_reduction_size = total_dimension_size // output_reduction_size
## A valid adder hierarchy need to match operand_spatial_sharing (especially the output reduction dimension).
# @param adder_hierarchy: user-defined adder hierarchy
def assert_valid(self, adder_hierarchy: Dict[str, Dict[str, str or int]]):
assert all(
[
adder_level["type"] in ["AG", "AC"]
for adder_level in adder_hierarchy.values()
]
), "Some adder type not recognized. Adder type can only be 'AG' or 'AC'."
total_fan_in = 1
fan_in_list = []
acc_flag = False
for adder_level in adder_hierarchy.values():
if adder_level["type"] == "AG":
total_fan_in *= adder_level["fan_in"]
fan_in_list.append(adder_level["fan_in"])
else:
acc_flag = True
num = self.output_reduction_size
output_reduction_size_factors = [n for n in range(1, num + 1) if num % n == 0]
assert set(fan_in_list).issubset(set(output_reduction_size_factors)), (
f"Invalid adder hierarchy due to at least 1 element in adder tree's fan-in ({fan_in_list}) "
f"is not in the factor list of total output-reduction size ({output_reduction_size_factors})."
)
assert total_fan_in in output_reduction_size_factors, (
f"Invalid adder hierarchy due to adder tree's total fan-in ({total_fan_in}) is not a factor "
f"of output reduction size ({output_reduction_size_factors})."
)
assert self.output_reduction_size == total_fan_in or acc_flag, (
f"Invalid adder hierarchy due to adder tree's total fan-in ({total_fan_in}) < total output "
f"reduction size ({self.output_reduction_size}) and no accumulator found."
)
## Construct adder level from the innermost level (close to multiplier) to the outermost. Calculate adder count and precision at each adder level. This function updates self.adder_levels.
# @param multiplier_output_precision: treated as the innermost-level adder's input precision.
# @param adder_hierarchy: user-defined adder hierarchy.
def construct_adder_levels(
self,
multiplier_output_precision: int,
adder_hierarchy: Dict[str, Dict[str, str or int]],
):
precision_counter = multiplier_output_precision
unit_counter = self.output_reduction_size
for name, adder_details in adder_hierarchy.items():
if adder_details["type"] == "AG":
adder_details["input_precision"] = precision_counter
adder_details["output_precision"] = precision_counter + ceil(
log2(adder_details["fan_in"])
)
adder_details["one_instance_unit_count"] = (
unit_counter // adder_details["fan_in"]
)
adder_details["total_unit_count"] = (
adder_details["one_instance_unit_count"]
* self.output_non_reduction_size
)
""" update precision and unit count when encounter aggregation (AG) adder level """
precision_counter = adder_details["output_precision"]
unit_counter = adder_details["one_instance_unit_count"]
else:
adder_details["fan_in"] = 2
adder_details["input_precision"] = [
precision_counter,
adder_details["output_precision"],
]
adder_details["one_instance_unit_count"] = unit_counter
adder_details["total_unit_count"] = (
adder_details["one_instance_unit_count"]
* self.output_non_reduction_size
)
""" only update precision when encounter accumulation (AC) adder level """
precision_counter = adder_details["output_precision"]
adder_levels_obj = [
AdderLevel(idx, name, details)
for idx, (name, details) in enumerate(adder_hierarchy.items())
]
self.adder_levels = adder_levels_obj
if __name__ == "__main__":
multiplier_input_precision = [8, 8]
multiplier_energy = 0.5
multiplier_area = 0.1
dimensions = {"D1": 8, "D2": 3, "D3": 2}
operand_spatial_sharing = {
"OS1": ((1, 0, 0), "O"),
"OS2": ((0, 1, 0), "I1"),
"OS3": ((0, 0, 1), "I1"),
"OS4": ((1, 1, 0), "I2"),
}
multiplier = Multiplier(
multiplier_input_precision, multiplier_energy, multiplier_area
)
multiplier_array = MultiplierArray(multiplier, dimensions, operand_spatial_sharing)
user_defined_adder_hierarchy = {
"AL1": {"type": "AG", "fan_in": 4, "unit_cost": 0.08, "unit_area": 0.03},
"AL2": {
"type": "AC",
"output_precision": 24,
"unit_cost": 0.1,
"unit_area": 0.05,
},
"AL3": {"type": "AG", "fan_in": 2, "unit_cost": 0.13, "unit_area": 0.07},
}
ah = AdderHierarchy(user_defined_adder_hierarchy, multiplier_array)
a = 1 | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/hardware/architecture/adder_hierarchy.py | adder_hierarchy.py |
from zigzag.classes.cacti.cacti_parser import CactiParser
## Description missing
class MemoryInstance:
## The class constructor
# Collect all the basic information of a physical memory module.
# @param name: memory module name, e.g. 'SRAM_512KB_BW_16b', 'I_RF'.
# @param size: total memory capacity (unit: bit).
# @param r_bw/w_bw: memory bandwidth (or wordlength) (unit: bit/cycle).
# @param r_cost/w_cost: memory unit data access energy.
# @param area: memory area (unit can be whatever user-defined unit).
# @param r_port: number of memory read port.
# @param w_port: number of memory write port (rd_port and wr_port can work in parallel).
# @param rw_port: number of memory port for both read and write (read and write cannot happen in parallel).
# @param latency: memory access latency (unit: number of cycles).
# @param min_r_granularity (int): The minimal number of bits than can be read in a clock cycle (can be a less than r_bw)
# @param min_w_granularity (int): The minimal number of bits that can be written in a clock cycle (can be less than w_bw)
# @param mem_type (str): The type of memory. Used for CACTI cost extraction.
# @param auto_cost_extraction (bool): Automatically extract the read cost, write cost and area using CACTI.
def __init__(
self,
name: str,
size: int,
r_bw: int,
w_bw: int = 0,
r_cost: float = 0,
w_cost: float = 0,
area: float = 0,
r_port: int = 1,
w_port: int = 1,
rw_port: int = 0,
latency: int = 1,
min_r_granularity=None,
min_w_granularity=None,
mem_type: str = "sram",
auto_cost_extraction: bool = False,
):
if auto_cost_extraction:
# Size must be a multiple of 8 when using CACTI
assert (
size % 8 == 0
), "Memory size must be a multiple of 8 when automatically extracting costs using CACTI."
cacti_parser = CactiParser()
(
_,
r_bw,
w_bw,
r_cost,
w_cost,
area,
bank,
r_port,
w_port,
rw_port,
) = cacti_parser.get_item(
mem_type=mem_type,
size=size,
r_bw=r_bw,
r_port=r_port,
w_port=w_port,
rw_port=rw_port,
bank=1,
)
self.name = name
self.size = size
self.r_bw = r_bw
self.w_bw = w_bw
self.r_cost = r_cost
self.w_cost = w_cost
self.area = area
self.r_port = r_port
self.w_port = w_port
self.rw_port = rw_port
self.latency = latency
if not min_r_granularity:
self.r_bw_min = r_bw
else:
self.r_bw_min = min_r_granularity
if not min_w_granularity:
self.w_bw_min = w_bw
else:
self.w_bw_min = min_w_granularity
## JSON Representation of this class to save it to a json file.
def __jsonrepr__(self):
return self.__dict__
def __eq__(self, other: object) -> bool:
return isinstance(other, MemoryInstance) and self.__dict__ == other.__dict__
def __hash__(self):
return id(self) # unique for every object within its lifetime
def __str__(self):
return f"MemoryInstance({self.name})"
def __repr__(self):
return str(self) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/hardware/architecture/memory_instance.py | memory_instance.py |
from zigzag.classes.hardware.architecture.memory_instance import MemoryInstance
from zigzag.classes.hardware.architecture.operational_array import OperationalArray
from zigzag.classes.hardware.architecture.memory_hierarchy import MemoryHierarchy
import networkx as nx
## The Core class houses the array of multipliers and the attached memory hierarchy.
## This class supports a singular multiplier array and memory hierarchy, runtime flexibility should be implemented on top.
class Core:
## The class constructor
# @param id
# @param operational_array
# @param memory_hierarchy
# @param dataflows
def __init__(
self,
id: int,
operational_array: OperationalArray,
memory_hierarchy: MemoryHierarchy,
dataflows: list = None,
):
self.id = id
self.operational_array = operational_array
self.memory_hierarchy = memory_hierarchy
self.dataflows = (
dataflows # save the possible spatial dataflows inside the Core
)
self.check_valid()
self.recalculate_memory_hierarchy_information()
def __str__(self) -> str:
return f"Core({self.id})"
def __repr__(self) -> str:
return str(self)
# JSON representation used for saving this object to a json file.
def __jsonrepr__(self):
return self.__dict__
def __hash__(self) -> int:
return hash(self.id)
def __eq__(self, __o: object) -> bool:
if not isinstance(__o, Core):
return False
return (
self.id == __o.id
and self.operational_array == __o.operational_array
and self.memory_hierarchy == __o.memory_hierarchy
)
def equals(self, other: object) -> bool:
return (
isinstance(other, Core)
and self.operational_array == other.operational_array
and self.memory_hierarchy == other.memory_hierarchy
)
def check_valid(self):
# TODO
pass
def recalculate_memory_hierarchy_information(self):
self.generate_memory_hierarchy_dict()
self.generate_memory_sharing_list()
def generate_memory_hierarchy_dict(self):
mem_operands = self.memory_hierarchy.nb_levels.keys()
mem_hierarchy_dict = {}
mem_size_dict = {}
mem_r_bw_dict = {}
mem_w_bw_dict = {}
mem_r_bw_min_dict = {}
mem_w_bw_min_dict = {}
for mem_op in mem_operands:
mem_hierarchy_dict[mem_op] = [
node
for node in nx.topological_sort(self.memory_hierarchy)
if mem_op in node.operands
]
mem_size_dict[mem_op] = [
node.memory_instance.size
for node in nx.topological_sort(self.memory_hierarchy)
if mem_op in node.operands
]
mem_r_bw_dict[mem_op] = [
node.memory_instance.r_bw
for node in nx.topological_sort(self.memory_hierarchy)
if mem_op in node.operands
]
mem_w_bw_dict[mem_op] = [
node.memory_instance.w_bw
for node in nx.topological_sort(self.memory_hierarchy)
if mem_op in node.operands
]
mem_r_bw_min_dict[mem_op] = [
node.memory_instance.r_bw_min
for node in nx.topological_sort(self.memory_hierarchy)
if mem_op in node.operands
]
mem_w_bw_min_dict[mem_op] = [
node.memory_instance.w_bw_min
for node in nx.topological_sort(self.memory_hierarchy)
if mem_op in node.operands
]
self.mem_hierarchy_dict = mem_hierarchy_dict
self.mem_size_dict = mem_size_dict
self.mem_r_bw_dict = mem_r_bw_dict
self.mem_w_bw_dict = mem_w_bw_dict
self.mem_r_bw_min_dict = mem_r_bw_min_dict
self.mem_w_bw_min_dict = mem_w_bw_min_dict
## Generates a list of dictionary that indicates which operand's which memory levels are sharing the same physical memory
def generate_memory_sharing_list(self):
memory_sharing_list = []
for mem_lv in self.mem_hierarchy_dict.values():
for mem in mem_lv:
operand_mem_share = mem.mem_level_of_operands
if (
len(operand_mem_share) > 1
and operand_mem_share not in memory_sharing_list
):
memory_sharing_list.append(operand_mem_share)
self.mem_sharing_list = memory_sharing_list
def get_memory_hierarchy(self):
return self.memory_hierarchy
def get_memory_hierarchy_dict(self):
return self.mem_hierarchy_dict
def get_memory_size_dict(self):
return self.mem_size_dict
def get_memory_bw_dict(self):
return self.mem_r_bw_dict, self.mem_w_bw_dict
def get_memory_bw_min_dict(self):
return self.mem_r_bw_min_dict, self.mem_w_bw_min_dict
def get_memory_sharing_list(self):
return self.mem_sharing_list
## Returns a specific memory level in the memory hierarchy for the memory operand
def get_memory_level(self, mem_op: str, mem_lv: int):
# Sort the nodes topologically and filter out all memories that don't store mem_op
memory = [
node
for node in nx.topological_sort(self.memory_hierarchy)
if mem_op in node.operands
]
return memory[mem_lv]
## Get the lowest shared memory level between mem_op1 (>= mem_lv1) and mem_op2 (>= mem_lv2).
def get_lowest_shared_mem_level_above(self, mem_op1, mem_lv1, mem_op2, mem_lv2):
for lv, mem in enumerate(self.mem_hierarchy_dict[mem_op1][mem_lv1:]):
if (
mem_op2 in mem.operands
and mem_lv2 <= mem.mem_level_of_operands[mem_op2]
):
return mem
raise Exception(
f"{mem_op1}'s level {mem_lv1} and {mem_op2}'s level {mem_lv2} don't have a shared memory above!"
)
def get_top_memory_instance(self, mem_op) -> MemoryInstance:
if mem_op not in self.memory_hierarchy.get_operands():
raise ValueError(f"Memory operand {mem_op} not in {self}.")
mem_level = self.memory_hierarchy.get_operand_top_level(mem_op)
mem_instance = mem_level.memory_instance
return mem_instance | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/hardware/architecture/core.py | core.py |
from typing import Dict, Tuple, List
from zigzag.classes.hardware.architecture.memory_instance import MemoryInstance
from zigzag.classes.hardware.architecture.operational_array import OperationalArray
from math import prod
import numpy as np
## Description missing
class MemoryPort:
port_id_counter = 0
## The class constructor
# Collect all the physical memory port related information here.
# @param port_name:
# @param port_bw: bit/cc
# @param port_bw_in:
# @param port_attr: read_only (r), write_only (w), read_write (rw)
# @param port_id: port index per memory
def __init__(
self,
port_name: str,
port_bw: int,
port_bw_min: int,
port_attr: str,
port_id=None,
):
self.name = port_name
self.bw = port_bw
self.bw_min = port_bw_min
self.attr = port_attr
self.served_op_lv_dir = []
""" to give each port a unique id number """
if port_id is None:
self.port_id = MemoryPort.port_id_counter
MemoryPort.port_id_counter += 1
else:
self.port_id = port_id
MemoryPort.port_id_counter = port_id + 1
def add_port_function(self, operand_level_direction: Tuple[str, int, str]):
self.served_op_lv_dir.append(operand_level_direction)
def __str__(self):
return str(self.name)
def __repr__(self):
return str(self.name)
def __eq__(self, other) -> bool:
return (
isinstance(other, MemoryPort)
and self.bw == other.bw
and self.bw_min == other.bw_min
and self.attr == other.attr
)
def __hash__(self):
return self.port_id
## Description missing
class MemoryLevel:
## The class constructor
# Initialize the memory level in the hierarchy with the physical memory instance
# @param memory_instance:
# @param operands:
# @param mem_level_of_operands:
# @param port_alloc: memory port allocation (physical memory port -> functional memory port)
# @param served_dimensions:
# @param operational_array:
# @param id: an identifier used for reference check.
def __init__(
self,
memory_instance: MemoryInstance,
operands: List[str],
mem_level_of_operands: Dict,
port_alloc: Tuple[dict, ...],
served_dimensions: set or str,
operational_array: OperationalArray,
id,
):
self.memory_instance = memory_instance
self.name = self.memory_instance.name
self.operands = list(operands)
self.mem_level_of_operands = mem_level_of_operands
self.served_dimensions_vec = served_dimensions
self.dimensions = operational_array.dimensions
self.nb_dimensions = operational_array.nb_dimensions
self.dimension_sizes = operational_array.dimension_sizes
self.id = id
self.check_served_dimensions()
self.assert_valid()
""" for each operand that current memory level holds, allocate
physical memory ports to its 4 potential data movement """
self.port_alloc_raw = port_alloc
self.port_allocation()
""" memory access bandwidth and energy extraction """
self.read_energy = memory_instance.r_cost
self.write_energy = memory_instance.w_cost
self.read_bw = memory_instance.r_bw
self.write_bw = memory_instance.w_bw
""" calculate memory unrolling count """
# Todo: for memory level using diagonal dimension, only allow it to have an unrolling count of '1'.
self.calc_unroll_count()
""" calculate in ideal case memory's total fanout and per-data fanout """
# Todo: not consider systolic array for now.
self.calc_fanout()
def __update_formatted_string(self):
self.formatted_string = f"MemoryLevel(instance={self.memory_instance.name},operands={self.operands},served_dimensions={self.served_dimensions})"
def __str__(self):
self.__update_formatted_string()
return self.formatted_string
def __repr__(self):
return str(self)
def __eq__(self, other) -> bool:
return (
isinstance(other, MemoryLevel)
and self.memory_instance == other.memory_instance
and self.operands == other.operands
and self.mem_level_of_operands == other.mem_level_of_operands
and self.port_list == other.port_list
and self.served_dimensions_vec == other.served_dimensions_vec
)
def __hash__(self) -> int:
return hash(self.id)
## Create port object
def port_allocation(self):
# Step 1: according to the port count of the memory instance, initialize the physical port object
# (so far, we don't know what the port will be used for. But we do know the port's id/bw/attribute)
port_list = []
r_port = self.memory_instance.r_port
w_port = self.memory_instance.w_port
rw_port = self.memory_instance.rw_port
for i in range(1, r_port + 1):
port_name = "r_port_" + str(i)
port_bw = self.memory_instance.r_bw
port_bw_min = self.memory_instance.r_bw_min
port_attr = "r"
new_port = MemoryPort(port_name, port_bw, port_bw_min, port_attr)
port_list.append(new_port)
for i in range(1, w_port + 1):
port_name = "w_port_" + str(i)
port_bw = self.memory_instance.w_bw
port_bw_min = self.memory_instance.w_bw_min
port_attr = "w"
new_port = MemoryPort(port_name, port_bw, port_bw_min, port_attr)
port_list.append(new_port)
for i in range(1, rw_port + 1):
port_name = "rw_port_" + str(i)
port_bw = (
self.memory_instance.r_bw
) # we assume the read-write port has the same bw for read and write
port_bw_min = self.memory_instance.r_bw_min
port_attr = "rw"
new_port = MemoryPort(port_name, port_bw, port_bw_min, port_attr)
port_list.append(new_port)
port_names = [port.name for port in port_list]
# Step 2: add operand, memory level, and served data movement direction for each port.
mov_LUT = {
"fh": "wr_in_by_high",
"fl": "wr_in_by_low",
"th": "rd_out_to_high",
"tl": "rd_out_to_low",
}
for idx, (op, lv) in enumerate(list(self.mem_level_of_operands.items())):
for mov, port in self.port_alloc_raw[idx].items():
if port is None:
continue
port_idx = port_names.index(port)
port_list[port_idx].add_port_function((op, lv, mov_LUT[mov]))
self.port_list = port_list
def get_port_list(self):
return self.port_list
## JSON Representation of this class to save it to a json file.
def __jsonrepr__(self):
return str(self)
# return {"memory_instance": self.memory_instance,
# "served_dimensions_vec": self.served_dimensions_vec,
# "served_dimensions": self.served_dimensions}
## Assert if the served_dimension of this MemoryLevel is valid.
# - in served_dimension tuple set, each dimension should only show up once, e.g. {(1,0), (1,1)} is not valid since the '1' in the first position showed up twice.
def assert_valid(self):
sum_served_dims = []
for op_served_dimensions in self.served_dimensions_vec:
sum_op_served_dimensions = [sum(x) for x in zip(*op_served_dimensions)]
assert not any(
dim > 1 for dim in sum_op_served_dimensions
), f"Invalid served dimensions for MemoryLevel of Memory {self}"
sum_served_dims.append(sum_op_served_dimensions)
self.sum_served_dims = sum_served_dims
def calc_unroll_count(self):
unroll_count = []
for sum_op_served_dimensions in self.sum_served_dims:
sum_served_dims_invert = [
not sum_dim for sum_dim in sum_op_served_dimensions
]
op_unroll_count = prod(
[
prod(x)
for x in zip(self.dimension_sizes, sum_served_dims_invert)
if prod(x) != 0
]
)
unroll_count.append(op_unroll_count)
assert all(
op_unroll_count == unroll_count[0] for op_unroll_count in unroll_count
), f"Not all memory unrolling counts {unroll_count} are equal for MemoryLevel of Memory {str(self)}"
self.unroll_count = unroll_count[0]
## Calculates the total fanout of this MemoryLevel.
# This equals the total amount of multipliers all instances in this level combined serve.
# To calculate the number of lower-level instances a single instance of this level serves,
# this number should be divided by the total_fanouts of all lower levels.
def calc_fanout(self):
total_fanout = 1
for served_dimension in self.served_dimensions:
total_fanout *= served_dimension.size
self.total_fanout = total_fanout
## Function that modifies the served_dimensions for this MemoryLevel if it is an empty set or 'all'.
# Empty set signals that the Memory Level has no dimensions served to the level below, thus a fanout of 1.
# 'all' signals that the MemoryLevel's served_dimensions are all dimensions, thus there is only one instance of the MemoryNode at this level.
def check_served_dimensions(self):
served_dimensions = self.served_dimensions_vec
operands = self.operands
# Modify served_dimensions to list to be able to change it if empty set or None.
served_dimensions = list(served_dimensions)
for op_idx, (op, op_served_dimensions) in enumerate(
zip(operands, served_dimensions)
):
# If served_dimensions is an empty set, it means this memory level is fully unrolled wrt operational_array
# We then convert it to be consistent with used notation
if op_served_dimensions == set():
op_served_dimensions = {(0,) * self.nb_dimensions}
served_dimensions[op_idx] = tuple(op_served_dimensions)
# If served_dimensions is 'all', it means this memory level is not unrolled
# We then convert it to a set containing all base dimensions of the operational_array (corresponds to a flat identity matrix)
if op_served_dimensions == "all":
identity_array = np.eye(self.nb_dimensions, dtype=int)
flat_identity_tuple = tuple([tuple(row) for row in identity_array])
op_served_dimensions = set(flat_identity_tuple)
served_dimensions[op_idx] = tuple(op_served_dimensions)
served_dimensions = tuple(served_dimensions)
self.served_dimensions_vec = served_dimensions
# Based on the vector representation of the served dimensions,
# we also save all the dimension objects this memory level serves.
served_dimensions = []
for op_served_dimensions_vec in self.served_dimensions_vec:
for served_dimension_vec in op_served_dimensions_vec:
non_zero_idxs = [
idx for idx, elem in enumerate(served_dimension_vec) if elem != 0
] # vector indices that are non-zero
served_dimensions += [
self.find_dimension_with_idx(idx) for idx in non_zero_idxs
]
self.served_dimensions = set(served_dimensions)
## Find the dimension object with idx 'idx'.
# @param idx
def find_dimension_with_idx(self, idx: int):
dimension = None
for dim in self.dimensions:
if dim.id == idx:
dimension = dim
break
if dimension is None:
raise ValueError("idx passed to function is not a valid dimension id.")
return dimension | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/hardware/architecture/memory_level.py | memory_level.py |
from typing import Dict
import numpy as np
from zigzag.classes.hardware.architecture.dimension import Dimension
from zigzag.classes.hardware.architecture.operational_unit import (
OperationalUnit,
Multiplier,
)
## This class captures multi-dimensional operational array size.
class OperationalArray:
## The class constructor
# @param operational_unit: an OperationalUnit object including precision and single operation energy, later we
# can add idle energy also (e.g. for situations that one or two of the input operands is zero).
# @param dimensions: define the name and size of each multiplier array dimensions, e.g. {'D1': 3, 'D2': 5}.
def __init__(self, operational_unit: OperationalUnit, dimensions: Dict[str, int]):
self.unit = operational_unit
self.total_unit_count = int(np.prod(list(dimensions.values())))
self.total_area = operational_unit.area * self.total_unit_count
base_dims = [
Dimension(idx, name, size)
for idx, (name, size) in enumerate(dimensions.items())
]
self.dimensions = base_dims
self.dimension_sizes = [dim.size for dim in base_dims]
self.nb_dimensions = len(base_dims)
# JSON Representation of this class to save it to a json file.
def __jsonrepr__(self):
return {"operational_unit": self.unit, "dimensions": self.dimensions}
def __eq__(self, __o: object) -> bool:
if not isinstance(__o, OperationalArray):
return False
return self.unit == __o.unit and self.dimensions == __o.dimensions
## Description missing
class MultiplierArray(OperationalArray):
pass
def multiplier_array_example1():
"""Multiplier array variables"""
multiplier_input_precision = [8, 8]
multiplier_energy = 0.5
multiplier_area = 0.1
dimensions = {"D1": 14, "D2": 3, "D3": 4}
operand_spatial_sharing = {
"I1": {(1, 0, 0)},
"O": {(0, 1, 0)},
"I2": {(0, 0, 1), (1, 1, 0)},
}
multiplier = Multiplier(
multiplier_input_precision, multiplier_energy, multiplier_area
)
multiplier_array = MultiplierArray(multiplier, dimensions, operand_spatial_sharing)
return multiplier_array
def multiplier_array_example2():
"""Multiplier array variables"""
multiplier_input_precision = [8, 8]
multiplier_energy = 0.5
multiplier_area = 0.1
dimensions = {"D1": 14, "D2": 12}
operand_spatial_sharing = {"I1": {(1, 0)}, "O": {(0, 1)}, "I2": {(1, 1)}}
multiplier = Multiplier(
multiplier_input_precision, multiplier_energy, multiplier_area
)
multiplier_array = MultiplierArray(multiplier, dimensions, operand_spatial_sharing)
return multiplier_array
if __name__ == "__main__":
multiplier_array = multiplier_array_example1()
for os in multiplier_array.operand_spatial_sharing:
print(
f"{os}\tdirection: {os.direction} operand: {os.operand} instances: {os.instances}"
) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/hardware/architecture/operational_array.py | operational_array.py |
import logging
from typing import Dict, List, Tuple
from math import ceil
import numpy as np
from zigzag.classes.mapping.combined_mapping import Mapping
from zigzag.classes.mapping.combined_mapping import FourWayDataMoving
from zigzag.utils import pickle_deepcopy
logger = logging.getLogger(__name__)
## Class that collects all the data transfer rate (periodic) information for each DTL (data transfer link).
class PortActivity:
## The class constructor
# @param real_cycle (int) Within each period, the actual number of cycles used for transferring the amount of data, depended on the memory bw and the data amount to be transferred at that memory level.
# @param allowed_cycle (int)
# @param period (int) The turnaround cycle at that memory level, which equals to the product of all the temporal loops of current and below memory level.
# @param period_count (int) The total number of period across the whole NN layer computation.
# @param layer_op (str)
# @param mem_lv (int)
# @param mov_dir (str)
def __init__(
self,
real_cycle: int,
allowed_cycle: int,
period: int,
period_count: int,
layer_op: str,
mem_lv: int,
mov_dir: str,
):
## Within each period, the actual number of cycles used for transferring the amount of data, depended on the memory bw and the data amount to be transferred at that memory level.
self.real_cycle = real_cycle
self.allowed_cycle = allowed_cycle
## The turnaround cycle at that memory level, which equals to the product of all the temporal loops of current and below memory level.
self.period = period
## The total number of period across the whole NN layer computation.
self.period_count = period_count
self.served_op_lv_dir = (layer_op, mem_lv, mov_dir)
""" stalling (+) or slacking (-) cycle in one period """
self.SS_per_period = real_cycle - allowed_cycle
""" stalling (+) or slacking (-) cycle in total computation """
self.SS = (real_cycle - allowed_cycle) * (period_count - 1)
""" total memory updating window allowed """
self.MUW = allowed_cycle * (period_count - 1)
def __str__(self):
return str(self.served_op_lv_dir)
def __repr__(self):
return str(self.served_op_lv_dir)
def __eq__(self, other) -> bool:
return str(self.served_op_lv_dir) == other
def __hash__(self):
return str(self.served_op_lv_dir)
## Class that collects all the data transfer rate information for each DTL (data transfer link).
class PortBeginOrEndActivity:
## The class constructor
# @param real_cycle (int) the actual number of cycles used for transferring the amount of data,
# depended on the memory bw and the data amount to be transferred at that memory level
# @param data_in_charge (int) one-period data transfer amount (bit)
# @param mem_bw (int) bit/cycle
# @param layer_op (str)
# @param mem_lv (int)
# @param mov_dir (str) data moving direction
def __init__(
self,
real_cycle: int,
data_in_charge: int,
mem_bw: int,
layer_op: str,
mem_lv: int,
mov_dir: str,
):
## the actual number of cycles used for transferring the amount of data,
# depended on the memory bw and the data amount to be transferred at that memory level
self.real_cycle = real_cycle
## one-period data transfer amount (bit)
self.data_in_charge = data_in_charge
## bit/cycle
self.mem_bw = mem_bw
self.served_op_lv_dir = (layer_op, mem_lv, mov_dir)
def __str__(self):
return str(self.served_op_lv_dir)
def __repr__(self):
return str(self.served_op_lv_dir)
## Given a certain operand's storage level (for example (A,1): operand A's 1st memory level),
# return a list of the rest operand's storage levels that share physical memory with the former one (A1)
# @param mem_op
# @param mem_lv
# @param memory_sharing_list
# @return mem_share_grp
def get_shared_mem_list(mem_op, mem_lv, memory_sharing_list) -> List[Tuple]:
for mem_share_group in memory_sharing_list:
mem_share_grp = list(mem_share_group.items())
mem_target = (mem_op, mem_lv)
if mem_target in mem_share_grp:
return mem_share_grp
## Generate the integer spatial mapping from fractional spatial mapping (due to greedy mapping support).
# Later the fractional one is used for calculating energy, and the integer one is used for calculating latency
# @param spatial_mapping
# @return spatial_mapping_int
def spatial_mapping_fractional_to_int(spatial_mapping: Dict):
spatial_mapping_int = pickle_deepcopy(spatial_mapping)
for op, su_all_lv in spatial_mapping.items():
if not su_all_lv:
continue
for lv, su_one_level in enumerate(su_all_lv):
for idx, su in enumerate(su_one_level):
if type(su[1]) != int:
spatial_mapping_int[op][lv][idx] = (su[0], ceil(su[1]))
return spatial_mapping_int
## This function calculates the union length of all the share-port MUW (memory updating window).
# The following encoding has to be used:
# - 'P' for single period length
# - 'A' for allowed MUW per period
# - 'PC' for period count within the whole layer computation
#
# Pre-process the port_duty_list to generate input_dict, which looks like:
# - input_dict = {'O1': {'P': 3, 'A': 1, 'PC': 8}, 'O2': {'P': 6, 'A': 2, 'PC': 4}, 'O3': {'P': 12, 'A': 4, 'PC': 2}}
#
# @param port_duty_list List of port activity objects
# @reutrn
def calc_MUW_union(port_duty_list):
input_dict = {}
for port_duty in port_duty_list:
"""as long as one of the port duty can make use of the whole computation time, the MUW union is set to
the whole computation time"""
if port_duty.period == port_duty.allowed_cycle:
return port_duty.period * port_duty.period_count
key = str(port_duty.served_op_lv_dir)
input_dict[key] = {
"P": port_duty.period,
"A": port_duty.allowed_cycle,
"PC": port_duty.period_count,
}
max_period = 0
max_period_operand = None
for op in input_dict:
if input_dict[op]["P"] > max_period:
max_period = input_dict[op]["P"]
max_period_operand = op
indicators = np.zeros((len(input_dict), max_period), dtype=np.int8)
for i, op in enumerate(input_dict):
"""reshape to period of this operand"""
indicators_reshape = indicators.reshape(
(len(input_dict), -1, input_dict[op]["P"])
)
""" fill in first few time units as used """
indicators_reshape[i, :, : input_dict[op]["A"]] = 1
union = max_period - (~indicators.any(0)).sum(dtype=np.uint64)
# take sum across operands => how many operand need memory for every time unit
# Subtract 1 => number of stalls
# Clip by 0 (-1 is not -1 stall)
# Sum across time units (only remaining axis)
# stall = (indicators.sum(0, dtype=np.int8) - 1).clip(min=0).sum()
""" Multiply with number of periods of largest period (as it was normalized to largest period) """
return union * input_dict[max_period_operand]["PC"]
## Class that stores inputs and runs them through the zigzag cost model.
#
# Initialize the cost model evaluation with the following inputs:
# - accelerator: the accelerator that includes the core on which to run the layer
# - layer: the layer to run
# - spatial_mapping: the spatial mapping
# - temporal_mapping: the temporal mapping
#
# From these parameters, the following attributes are computed:
# * core: The core on which the layer is ran. This should be specified in the LayerNode attributes.
# * mapping: The combined spatial and temporal mapping object where access patterns are computed.
#
# The following cost model attributes are also initialized:
# - energy_breakdown: The energy breakdown for all operands
# - energy: The total energy
#
# After initialization, the cost model evaluation is run.
class CostModelEvaluation:
## The class constructor
# After initialization, the cost model evaluation is run
# @param accelerator the accelerator that includes the core on which to run the
# @param layer the layer to run
# @param spatial_mapping the spatial mapping
# @param temporal_mapping the temporal mapping
# @param access_same_data_considered_as_no_access (optional)
def __init__(
self,
*,
accelerator,
layer,
spatial_mapping,
temporal_mapping,
access_same_data_considered_as_no_access=True,
):
self.accelerator = accelerator
self.layer = layer
self.spatial_mapping = spatial_mapping
self.temporal_mapping = temporal_mapping
self.access_same_data_considered_as_no_access = (
access_same_data_considered_as_no_access
)
self.core_id = layer.core_allocation
self.mem_level_list = (
accelerator.get_core(self.core_id).get_memory_hierarchy().mem_level_list
)
self.mem_hierarchy_dict = accelerator.get_core(
self.core_id
).get_memory_hierarchy_dict()
self.mem_size_dict = accelerator.get_core(self.core_id).get_memory_size_dict()
self.mem_r_bw_dict, self.mem_w_bw_dict = accelerator.get_core(
self.core_id
).get_memory_bw_dict()
self.mem_r_bw_min_dict, self.mem_w_bw_min_dict = accelerator.get_core(
self.core_id
).get_memory_bw_min_dict()
self.mem_sharing_list = accelerator.get_core(
self.core_id
).get_memory_sharing_list()
self.layer_op_to_mem_op = layer.memory_operand_links
self.mem_op_to_layer_op = dict(
[(value, key) for key, value in self.layer_op_to_mem_op.items()]
)
""" generate the integer spatial mapping from fractional spatial mapping (due to greedy mapping support).
Later the fractional one is used for calculating energy, and the integer one is used for calculating latency"""
self.spatial_mapping_dict_int = spatial_mapping_fractional_to_int(
self.spatial_mapping.mapping_dict_origin
)
# For constructing Mapping object, the last parameter "self.access_same_data_considered_as_no_access" is optional
self.mapping = Mapping(
self.accelerator,
self.spatial_mapping,
self.temporal_mapping,
self.layer,
self.access_same_data_considered_as_no_access,
)
self.mapping_int = Mapping(
self.accelerator,
self.spatial_mapping_dict_int,
self.temporal_mapping,
self.layer,
self.access_same_data_considered_as_no_access,
)
self.active_mem_level = self.mapping.mem_level
# Run the cost model evaluation
self.run()
def __str__(self):
return f"CostModelEvaluation(layer={self.layer}, core={self.core_id})"
def __repr__(self):
return str(self)
# JSON representation used for saving this object to a json file.
def __jsonrepr__(self):
return {
"outputs": {
"memory": {
"utilization": self.mem_utili_shared
if hasattr(self, "mem_utili_shared")
else None,
"word_accesses": self.memory_word_access,
},
"energy": {
"energy_total": self.energy_total,
"operational_energy": self.MAC_energy,
"memory_energy": self.mem_energy,
"energy_breakdown_per_level": self.energy_breakdown,
"energy_breakdown_per_level_per_operand": self.energy_breakdown_further,
},
"latency": {
"data_onloading": self.latency_total1 - self.latency_total0,
"computation": self.latency_total0,
"data_offloading": self.latency_total2 - self.latency_total1,
},
"spatial": {
"mac_utilization": {
"ideal": self.MAC_spatial_utilization,
"stalls": self.MAC_utilization0,
"stalls_onloading": self.MAC_utilization1,
"stalls_onloading_offloading": self.MAC_utilization2,
}
},
},
"inputs": {
"accelerator": self.accelerator,
"layer": self.layer,
"spatial_mapping": self.spatial_mapping
if hasattr(self, "spatial_mapping")
else None,
"temporal_mapping": self.temporal_mapping
if hasattr(self, "temporal_mapping")
else None,
},
}
## Simple JSON representation used for saving this object to a simple json file.
def __simplejsonrepr__(self):
return {"energy": self.energy_total, "latency": self.latency_total2}
## Run the cost model evaluation.
def run(self):
# - TODO: Latency calculation
self.calc_memory_utilization()
self.calc_memory_word_access()
self.calc_energy()
self.calc_latency()
## Calculate occupancy for each physical memory based on the mapping.
def calc_memory_utilization(self):
# mem_utili_individual: the memory utilization of each operand individually.
# mem_utili_shared: the memory utilization taking operand memory sharing into consideration.
mem_utili_individual = {}
effective_mem_utili_individual = {}
for layer_op in self.layer.operand_list:
mem_utili_individual[layer_op] = []
effective_mem_utili_individual[layer_op] = []
for mem_lv in range(self.active_mem_level[layer_op]):
mem_utilization = (
self.mapping.data_bit_per_level_unrolled[layer_op][mem_lv + 1]
/ self.mem_size_dict[self.layer_op_to_mem_op[layer_op]][mem_lv]
)
assert mem_utilization <= 1, (
f"Operand {layer_op} memory level {mem_lv}'s individual memory utilization is "
f"{mem_utilization}, which is larger than 1 "
f"(memory level starts from 0)"
)
mem_utili_individual[layer_op].append(mem_utilization)
# if we do not count copied data in parallel memories as effective, what is the utilization then? =>
effective_mem_utilization = (
self.mapping.effective_data_bit[layer_op][mem_lv + 1]
/ self.mem_size_dict[self.layer_op_to_mem_op[layer_op]][mem_lv]
)
effective_mem_utili_individual[layer_op].append(
effective_mem_utilization
)
mem_utili_shared = pickle_deepcopy(mem_utili_individual)
effective_mem_utili_shared = pickle_deepcopy(effective_mem_utili_individual)
for mem_share_dict in self.mem_sharing_list:
mem_utilization = 0
effective_mem_utilization = 0
for mem_op, mem_lv in mem_share_dict.items():
try:
layer_op = self.mem_op_to_layer_op[mem_op]
except: # mem to layer op might not contain this mem op (e.g. pooling layer)
continue
mem_utilization += mem_utili_individual[layer_op][mem_lv]
effective_mem_utilization += effective_mem_utili_individual[layer_op][
mem_lv
]
assert mem_utilization <= 1, (
f"Memory shared by {mem_share_dict} (memory operand, memory level) has shared utilization of "
f"{mem_utilization}, which is > 1 "
f"(memory level starts from 0)."
)
for mem_op, mem_lv in mem_share_dict.items():
try:
layer_op = self.mem_op_to_layer_op[mem_op]
except: # mem to layer op might not contain this mem op (e.g. pooling layer)
continue
mem_utili_shared[layer_op][mem_lv] = mem_utilization
effective_mem_utili_shared[layer_op][mem_lv] = effective_mem_utilization
self.mem_utili_individual = mem_utili_individual
self.mem_utili_shared = mem_utili_shared
self.effective_mem_utili_individual = effective_mem_utili_individual
self.effective_mem_utili_shared = effective_mem_utili_shared
## Calculates the memory word access based on unit memory's data element move count and the physical memory bw.
def calc_memory_word_access(self):
memory_word_access = {}
for layer_op in self.layer.operand_list:
memory_word_access[layer_op] = []
for mem_lv in range(self.mapping.mem_level[layer_op]):
"""wr_in_by_low"""
data_elem_move_per_period = self.mapping.unit_mem_data_movement[
layer_op
][mem_lv].data_trans_amount_per_period.wr_in_by_low
data_precision = self.mapping.unit_mem_data_movement[layer_op][
mem_lv
].data_precision.wr_in_by_low
if data_elem_move_per_period == 0 or data_precision == 0:
wr_in_by_low = 0
else:
total_period_count = self.mapping.unit_mem_data_movement[layer_op][
mem_lv
].data_trans_period_count.wr_in_by_low
max_bw = self.mem_w_bw_dict[self.layer_op_to_mem_op[layer_op]][
mem_lv
]
min_bw = self.mem_w_bw_min_dict[self.layer_op_to_mem_op[layer_op]][
mem_lv
]
if mem_lv > 0:
another_side_bw = self.mem_r_bw_dict[
self.layer_op_to_mem_op[layer_op]
][mem_lv - 1] * (
self.spatial_mapping.unit_unique[layer_op][mem_lv]
/ self.spatial_mapping.unit_unique[layer_op][mem_lv + 1]
)
data_elem_move_per_cycle_in_a_period = min(
(another_side_bw / data_precision),
(max_bw / data_precision),
data_elem_move_per_period,
)
cycle_in_a_period = ceil(
data_elem_move_per_period
/ data_elem_move_per_cycle_in_a_period
)
else:
data_elem_move_per_cycle_in_a_period = data_elem_move_per_period
cycle_in_a_period = 1
# wr_in_by_low = (
# ceil(
# (data_elem_move_per_cycle_in_a_period * data_precision)
# / min_bw
# )
# * (min_bw / max_bw)
# * total_period_count
# * cycle_in_a_period
# * self.mapping.spatial_mapping.unit_count[layer_op][mem_lv + 1]
# )
# 2023/06/30, solve the memory access granuarity issue - Jiacong Sun, Linyan Mei
# Originally we used the cycle_in_a_period to compute the memory word access.
# This neglected the finer-grained memory access possibility (the min_bw, the minimal memory access granuarity, like half-word access).
# Now we changed to calculation based on min_bw.
wr_in_by_low = (
ceil((data_elem_move_per_period * data_precision) / min_bw)
* (min_bw / max_bw)
* total_period_count
* self.mapping.spatial_mapping.unit_count[layer_op][mem_lv + 1]
)
""" rd_out_to_low """
data_elem_move_per_period = self.mapping.unit_mem_data_movement[
layer_op
][mem_lv].data_trans_amount_per_period.rd_out_to_low
data_precision = self.mapping.unit_mem_data_movement[layer_op][
mem_lv
].data_precision.rd_out_to_low
if data_elem_move_per_period == 0 or data_precision == 0:
rd_out_to_low = 0
else:
total_period_count = self.mapping.unit_mem_data_movement[layer_op][
mem_lv
].data_trans_period_count.rd_out_to_low
max_bw = self.mem_r_bw_dict[self.layer_op_to_mem_op[layer_op]][
mem_lv
]
min_bw = self.mem_r_bw_min_dict[self.layer_op_to_mem_op[layer_op]][
mem_lv
]
if mem_lv > 0:
another_side_bw = self.mem_w_bw_dict[
self.layer_op_to_mem_op[layer_op]
][mem_lv - 1] * (
self.spatial_mapping.unit_unique[layer_op][mem_lv]
/ self.spatial_mapping.unit_unique[layer_op][mem_lv + 1]
)
data_elem_move_per_cycle_in_a_period = min(
(another_side_bw / data_precision),
(max_bw / data_precision),
data_elem_move_per_period,
)
cycle_in_a_period = ceil(
data_elem_move_per_period
/ data_elem_move_per_cycle_in_a_period
)
# rd_out_to_low = (
# ceil(
# (data_elem_move_per_cycle_in_a_period * data_precision)
# / min_bw
# )
# * (min_bw / max_bw)
# * total_period_count
# * cycle_in_a_period
# * self.mapping.spatial_mapping.unit_count[layer_op][
# mem_lv + 1
# ]
# )
# else:
# 2023/06/30, solve the memory access granuarity issue - Jiacong Sun, Linyan Mei
# Originally we used the cycle_in_a_period to compute the memory word access.
# This neglected the finer-grained memory access possibility (the min_bw, the minimal memory access granuarity, like half-word access).
# Now we changed to calculation based on min_bw.
rd_out_to_low = (
ceil((data_elem_move_per_period * data_precision) / min_bw)
* (min_bw / max_bw)
* total_period_count
* self.mapping.spatial_mapping.unit_count[layer_op][mem_lv + 1]
)
""" rd_out_to_high """
data_elem_move_per_period = self.mapping.unit_mem_data_movement[
layer_op
][mem_lv].data_trans_amount_per_period.rd_out_to_high
if data_elem_move_per_period == 0:
rd_out_to_high = 0
else:
data_precision = self.mapping.unit_mem_data_movement[layer_op][
mem_lv
].data_precision.rd_out_to_high
total_period_count = self.mapping.unit_mem_data_movement[layer_op][
mem_lv
].data_trans_period_count.rd_out_to_high
max_bw = self.mem_r_bw_dict[self.layer_op_to_mem_op[layer_op]][
mem_lv
]
min_bw = self.mem_r_bw_min_dict[self.layer_op_to_mem_op[layer_op]][
mem_lv
]
rd_out_to_high = (
ceil((data_elem_move_per_period * data_precision) / min_bw)
* (min_bw / max_bw)
* total_period_count
* self.mapping.spatial_mapping.unit_count[layer_op][mem_lv + 1]
)
""" wr_in_by_high """
data_elem_move_per_period = self.mapping.unit_mem_data_movement[
layer_op
][mem_lv].data_trans_amount_per_period.wr_in_by_high
if data_elem_move_per_period == 0:
wr_in_by_high = 0
else:
data_precision = self.mapping.unit_mem_data_movement[layer_op][
mem_lv
].data_precision.wr_in_by_high
total_period_count = self.mapping.unit_mem_data_movement[layer_op][
mem_lv
].data_trans_period_count.wr_in_by_high
max_bw = self.mem_w_bw_dict[self.layer_op_to_mem_op[layer_op]][
mem_lv
]
min_bw = self.mem_w_bw_min_dict[self.layer_op_to_mem_op[layer_op]][
mem_lv
]
wr_in_by_high = (
ceil((data_elem_move_per_period * data_precision) / min_bw)
* (min_bw / max_bw)
* total_period_count
* self.mapping.spatial_mapping.unit_count[layer_op][mem_lv + 1]
)
""" All """
memory_word_access_single = FourWayDataMoving(
rd_out_to_low, wr_in_by_low, rd_out_to_high, wr_in_by_high
)
memory_word_access[layer_op].append(memory_word_access_single)
self.memory_word_access = memory_word_access
## Calculates the energy cost of this cost model evaluation by calculating the memory reading/writing energy.
def calc_energy(self):
# - TODO: Interconnection energy
self.calc_MAC_energy_cost()
self.calc_memory_energy_cost()
## Calculate the dynamic MAC energy
def calc_MAC_energy_cost(self):
core = self.accelerator.get_core(self.core_id)
single_MAC_energy = core.operational_array.unit.cost
self.MAC_energy = single_MAC_energy * self.layer.total_MAC_count
## Computes the memories reading/writing energy by converting the access patterns in self.mapping to
# energy breakdown using the memory hierarchy of the core on which the layer is mapped.
#
# The energy breakdown is saved in self.energy_breakdown.
#
# The energy total consumption is saved in self.energy_total.
def calc_memory_energy_cost(self):
core = self.accelerator.get_core(self.core_id)
mem_hierarchy = core.memory_hierarchy
energy_breakdown = {}
energy_breakdown_further = {}
energy_total = 0
for (layer_op, mem_access_list_per_op) in self.memory_word_access.items():
"""Retrieve the memory levels in the hierarchy for this memory operand"""
mem_op = self.layer_op_to_mem_op[layer_op]
memory_levels = mem_hierarchy.get_memory_levels(mem_op=mem_op)
breakdown = (
[]
) # Stores the energy breakdown of a single layer operand (W, I, ...)
breakdown_further = [] # Stores
for (access_count, memory_level) in zip(
mem_access_list_per_op, memory_levels
):
energy_cost_per_read_out = memory_level.read_energy
energy_cost_per_write_in = memory_level.write_energy
read_out_energy_to_above = access_count.get_total_read_outs_to_above(
scaling=energy_cost_per_read_out
)
write_in_energy_from_above = (
access_count.get_total_write_ins_from_above(
scaling=energy_cost_per_write_in
)
)
read_out_energy_to_below = access_count.get_total_read_outs_to_below(
scaling=energy_cost_per_read_out
)
write_in_energy_from_below = (
access_count.get_total_write_ins_from_below(
scaling=energy_cost_per_write_in
)
)
total_read_out_energy = (
read_out_energy_to_above + read_out_energy_to_below
)
total_write_in_energy = (
write_in_energy_from_above + write_in_energy_from_below
)
total_energy_cost_memory = total_read_out_energy + total_write_in_energy
breakdown.append(
total_energy_cost_memory
) # Here the breakdown only saves the total energy cost per memory level
breakdown_further.append(
FourWayDataMoving(
read_out_energy_to_below,
write_in_energy_from_below,
read_out_energy_to_above,
write_in_energy_from_above,
)
) # here it contains the full split
energy_total += total_energy_cost_memory
energy_breakdown[layer_op] = breakdown
energy_breakdown_further[layer_op] = breakdown_further
self.energy_breakdown = energy_breakdown
self.energy_breakdown_further = energy_breakdown_further
self.mem_energy = energy_total
self.energy_total = self.mem_energy + self.MAC_energy
logger.debug(f"Ran {self}. Total energy = {self.energy_total}")
## Calculate latency in 4 steps
#
# 1) As we already calculated the ideal data transfer rate in combined_mapping.py (in the Mapping class),
# here we start with calculating the required (or allowed) memory updating window by comparing the effective
# data size with the physical memory size at each level. If the effective data size is smaller than 50%
# of the physical memory size, then we take the whole period as the allowed memory updating window (double buffer effect);
# otherwise we take the the period divided by the top_ir_loop as the allowed memory updating window.
#
# 2) Then, we compute the real data transfer rate given the actual memory bw per functional port pair,
# assuming we have enough memory ports.
#
# 3) In reality, there is no infinite memory port to use. So, as the second step, we combine the real
# data transfer attributes per physical memory port.
#
# 4) Finally, we combine the stall/slack of each memory port to get the final latency.
def calc_latency(self):
self.calc_double_buffer_flag()
self.calc_allowed_and_real_data_transfer_cycle_per_DTL()
self.combine_data_transfer_rate_per_physical_port()
self.calc_data_loading_offloading_latency()
self.calc_overall_latency()
## This function checks the double-buffer possibility for each operand at each memory level
# (minimal memory BW requirement case) by comparing the physical memory size with the effective
# data size, taking into account the memory sharing between operands.
def calc_double_buffer_flag(self):
double_buffer_true = {}
for layer_op in self.layer.operand_list:
mem_op = self.layer_op_to_mem_op[layer_op]
""" start with False for each operand at the lowest arch level (MAC array level) """
double_buffer_true[layer_op] = [False]
for mem_lv in range(0, self.mapping_int.mem_level[layer_op]):
if self.effective_mem_utili_shared[layer_op][mem_lv] <= 0.5:
double_buffer_true[layer_op].append(True)
elif (
self.effective_mem_utili_individual[layer_op][mem_lv]
<= 1 - self.effective_mem_utili_shared[layer_op][mem_lv]
):
double_buffer_true[layer_op].append(True)
shared_mem_list = get_shared_mem_list(
mem_op, mem_lv, self.mem_sharing_list
)
""" When one of the operand in the shared memory get the "double-buffer" chance,
all operands of that shared memory level need to update the memory utilization
for later memory free space evaluation """
for shared_mem_op, shared_mem_lv in shared_mem_list:
try:
shared_layer_op = self.mem_op_to_layer_op[shared_mem_op]
except: # mem op to layer op might not have this mem op (e.g. pooling layer)
continue
self.effective_mem_utili_shared[shared_layer_op][
shared_mem_lv
] += self.effective_mem_utili_individual[layer_op][mem_lv]
else:
double_buffer_true[layer_op].append(False)
self.double_buffer_true = double_buffer_true
## Construct a 4-way data transfer pattern for each unit mem, calculate
# {allowed_mem_updating_cycle, real_data_trans_cycle, DTL_SS_cycle} per period
def calc_allowed_and_real_data_transfer_cycle_per_DTL(self):
allowed_mem_updat_cycle = {}
real_data_trans_cycle = {}
""" stall (+) or slack (-) cycle within each period per virtual data transfer link (DTL) """
DTL_SS_cycle = {}
for layer_op in self.layer.operand_list:
allowed_mem_updat_cycle[layer_op] = []
real_data_trans_cycle[layer_op] = []
DTL_SS_cycle[layer_op] = []
mem_op = self.layer_op_to_mem_op[layer_op]
for mem_lv in range(self.mapping_int.mem_level[layer_op]):
"""======================================allowed_mem_updating_cycle(below)====================================="""
""" wr_in_by_low & rd_out_to_low"""
if self.double_buffer_true[layer_op][mem_lv]:
wr_in_by_low_allowed = self.mapping_int.unit_mem_data_movement[
layer_op
][mem_lv].data_trans_period.wr_in_by_low
rd_out_to_low_allowed = self.mapping_int.unit_mem_data_movement[
layer_op
][mem_lv].data_trans_period.rd_out_to_low
else:
wr_in_by_low_allowed = self.mapping_int.unit_mem_data_movement[
layer_op
][mem_lv].inst_data_trans_window.wr_in_by_low
rd_out_to_low_allowed = self.mapping_int.unit_mem_data_movement[
layer_op
][mem_lv].inst_data_trans_window.rd_out_to_low
""" wr_in_by_high & rd_out_to_high """
if self.double_buffer_true[layer_op][mem_lv + 1]:
wr_in_by_high_allowed = self.mapping_int.unit_mem_data_movement[
layer_op
][mem_lv].data_trans_period.wr_in_by_high
rd_out_to_high_allowed = self.mapping_int.unit_mem_data_movement[
layer_op
][mem_lv].data_trans_period.rd_out_to_high
else:
wr_in_by_high_allowed = self.mapping_int.unit_mem_data_movement[
layer_op
][mem_lv].inst_data_trans_window.wr_in_by_high
rd_out_to_high_allowed = self.mapping_int.unit_mem_data_movement[
layer_op
][mem_lv].inst_data_trans_window.rd_out_to_high
""" All """
updating_window = FourWayDataMoving(
rd_out_to_low_allowed,
wr_in_by_low_allowed,
rd_out_to_high_allowed,
wr_in_by_high_allowed,
)
allowed_mem_updat_cycle[layer_op].append(updating_window)
""" ======================================allowed_mem_updating_cycle(above)===================================== """
""" =========================================real_data_trans_cycle(below)======================================== """
""" wr_in_by_low """
data_precision = self.mapping_int.unit_mem_data_movement[layer_op][
mem_lv
].data_precision.wr_in_by_low
data_trans_amount = self.mapping_int.unit_mem_data_movement[layer_op][
mem_lv
].data_trans_amount_per_period.wr_in_by_low
mem_bw = self.mem_w_bw_dict[mem_op][mem_lv]
wr_in_by_low_real = ceil(data_trans_amount * data_precision / mem_bw)
""" rd_out_to_low """
data_precision = self.mapping_int.unit_mem_data_movement[layer_op][
mem_lv
].data_precision.rd_out_to_low
data_trans_amount = self.mapping_int.unit_mem_data_movement[layer_op][
mem_lv
].data_trans_amount_per_period.rd_out_to_low
mem_bw = self.mem_r_bw_dict[mem_op][mem_lv]
rd_out_to_low_real = ceil(data_trans_amount * data_precision / mem_bw)
""" rd_out_to_high """
data_precision = self.mapping_int.unit_mem_data_movement[layer_op][
mem_lv
].data_precision.rd_out_to_high
data_trans_amount = self.mapping_int.unit_mem_data_movement[layer_op][
mem_lv
].data_trans_amount_per_period.rd_out_to_high
mem_bw = self.mem_r_bw_dict[mem_op][mem_lv]
rd_out_to_high_real = ceil(data_trans_amount * data_precision / mem_bw)
""" wr_in_by_high """
data_precision = self.mapping_int.unit_mem_data_movement[layer_op][
mem_lv
].data_precision.wr_in_by_high
data_trans_amount = self.mapping_int.unit_mem_data_movement[layer_op][
mem_lv
].data_trans_amount_per_period.wr_in_by_high
mem_bw = self.mem_w_bw_dict[mem_op][mem_lv]
wr_in_by_high_real = ceil(data_trans_amount * data_precision / mem_bw)
""" All """
real_data_trans = FourWayDataMoving(
rd_out_to_low_real,
wr_in_by_low_real,
rd_out_to_high_real,
wr_in_by_high_real,
)
real_data_trans_cycle[layer_op].append(real_data_trans)
""" =========================================real_data_trans_cycle(above)======================================= """
self.allowed_mem_updat_cycle = allowed_mem_updat_cycle
self.real_data_trans_cycle = real_data_trans_cycle
## Consider memory sharing and port sharing, combine the data transfer activity
# Step 1: collect port activity per memory instance per physical memory port
# Step 2: calculate SS combine and MUW union parameters per physical memory port
def combine_data_transfer_rate_per_physical_port(self):
# Step 1: collect port activity per memory instance per physical memory port
port_activity_collect = []
for mem_instance in self.mem_level_list:
port_activity_single = {}
port_list = mem_instance.port_list
for port in port_list:
port_activity_single[str(port)] = []
for mem_op, mem_lv, mov_dir in port.served_op_lv_dir:
try:
layer_op = self.mem_op_to_layer_op[mem_op]
except: # mem op to layer might not have this mem op (e.g. pooling layer)
continue
period_count = getattr(
self.mapping_int.unit_mem_data_movement[layer_op][
mem_lv
].data_trans_period_count,
mov_dir,
)
if period_count == 0:
# skip the inactive data movement activities because they won't impact SS
continue
period = getattr(
self.mapping_int.unit_mem_data_movement[layer_op][
mem_lv
].data_trans_period,
mov_dir,
)
real_cycle = getattr(
self.real_data_trans_cycle[layer_op][mem_lv], mov_dir
)
allowed_cycle = getattr(
self.allowed_mem_updat_cycle[layer_op][mem_lv], mov_dir
)
port_activity = PortActivity(
real_cycle,
allowed_cycle,
period,
period_count,
layer_op,
mem_lv,
mov_dir,
)
port_activity_single[str(port)].append(port_activity)
port_activity_collect.append(port_activity_single)
self.port_activity_collect = port_activity_collect
# Step 2: calculate SS combine and MUW union parameters per physical memory port
SS_comb_collect = [
{port: None for port in mem_ports} for mem_ports in port_activity_collect
]
SS_comb_list = [0]
# intermediate parameters saved for debugging purpose
MUW_union_collect = [
{port: None for port in mem_ports} for mem_ports in port_activity_collect
]
for idx, mem_ports in enumerate(port_activity_collect):
for port_name, port_activity in mem_ports.items():
if len(port_activity) == 1:
MUW_union_collect[idx][port_name] = port_activity[0].allowed_cycle
SS_comb_collect[idx][port_name] = port_activity[0].SS
SS_comb_list.append(port_activity[0].SS)
elif len(port_activity) != 0:
MUW_union_collect[idx][port_name] = calc_MUW_union(port_activity)
SS_positive_sum = 0
SS_negative_sum = 0
MUW_sum = 0
for port_d in port_activity:
if port_d.SS > 0:
SS_positive_sum += port_d.SS
else:
SS_negative_sum += port_d.SS
MUW_sum += port_d.MUW
SS_comb = SS_positive_sum + max(
0, SS_negative_sum + MUW_sum - MUW_union_collect[idx][port_name]
)
SS_comb_collect[idx][port_name] = SS_comb
SS_comb_list.append(SS_comb)
self.MUW_union_collect = MUW_union_collect
self.SS_comb_collect = SS_comb_collect
# Assuming all the memory ports can work in parallel
self.SS_comb = max(SS_comb_list)
## Calculate the initial/final data loading/off-loading cycle by separating out
# the first-time input operands' / the last-time output operand's data movement
# on corresponding ports.
def calc_data_loading_offloading_latency(self):
# Collect ports' initial data-loading and final data-offloading activities
data_loading_per_mem_inst = []
data_loading_cc_per_op = {op: {} for op in self.layer.input_operands}
data_offloading_per_mem_inst = []
data_offloading_cc_per_op = {}
for mem_inst_idx, mem_instance in enumerate(self.mem_level_list):
data_loading_single = {}
data_offloading_single = {}
port_list = mem_instance.port_list
for port in port_list:
data_loading_single[str(port)] = []
data_offloading_single[str(port)] = []
served_operands = set(
s[0] for s in port.served_op_lv_dir if s[0] in ["I1", "I2"]
)
port_is_shared_by_two_input_operands = len(served_operands) > 1
for mem_op, mem_lv, mov_dir in port.served_op_lv_dir:
try:
layer_op = self.mem_op_to_layer_op[mem_op]
except: # mem op to layer op might not have this mem op (e.g. pooling layer)
continue
period_count = getattr(
self.mapping_int.unit_mem_data_movement[layer_op][
mem_lv
].data_trans_period_count,
mov_dir,
)
if period_count == 0:
# skip for the inactive data movement
continue
if mem_op in ["I1", "I2"]:
real_cycle = getattr(
self.real_data_trans_cycle[layer_op][mem_lv], mov_dir
)
data_in_charge = getattr(
self.mapping_int.unit_mem_data_movement[layer_op][
mem_lv
].data_trans_amount_per_period,
mov_dir,
) * getattr(
self.mapping_int.unit_mem_data_movement[layer_op][
mem_lv
].data_precision,
mov_dir,
)
if mov_dir[:2] == "rd":
mem_bw = self.mem_r_bw_dict[mem_op][mem_lv]
else:
mem_bw = self.mem_w_bw_dict[mem_op][mem_lv]
port_activity = PortBeginOrEndActivity(
real_cycle,
data_in_charge,
mem_bw,
layer_op,
mem_lv,
mov_dir,
)
data_loading_single[str(port)].append(port_activity)
data_loading_cc_per_op[layer_op][
layer_op + str(mem_lv) + "_" + mov_dir
] = (real_cycle, port_is_shared_by_two_input_operands)
else:
if mov_dir in ["rd_out_to_low", "wr_in_by_high"]:
# don't consider partial sum flowing in the final data off-loading stage
continue
real_cycle = getattr(
self.real_data_trans_cycle[layer_op][mem_lv], mov_dir
)
data_in_charge = getattr(
self.mapping_int.unit_mem_data_movement[layer_op][
mem_lv
].data_trans_amount_per_period,
mov_dir,
) * getattr(
self.mapping_int.unit_mem_data_movement[layer_op][
mem_lv
].data_precision,
mov_dir,
)
if mov_dir[:2] == "rd":
mem_bw = self.mem_r_bw_dict[mem_op][mem_lv]
else:
mem_bw = self.mem_w_bw_dict[mem_op][mem_lv]
port_activity = PortBeginOrEndActivity(
real_cycle,
data_in_charge,
mem_bw,
layer_op,
mem_lv,
mov_dir,
)
data_offloading_single[str(port)].append(port_activity)
data_offloading_cc_per_op[
layer_op + str(mem_lv) + "_" + mov_dir
] = real_cycle
data_loading_per_mem_inst.append(data_loading_single)
data_offloading_per_mem_inst.append(data_offloading_single)
self.data_loading_per_mem_inst = data_loading_per_mem_inst
self.data_loading_cc_per_op = data_loading_cc_per_op
self.data_offloading_per_mem_inst = data_offloading_per_mem_inst
self.data_offloading_per_op = data_offloading_cc_per_op
# Combine ports' initial data-loading activities to get the data loading cycle amount
data_loading_cc_pair_combined_per_op = {
op: [] for op in self.layer.input_operands
}
data_loading_individual_part = {op: 0 for op in self.layer.input_operands}
data_loading_half_shared_part = {op: 0 for op in self.layer.input_operands}
data_loading_shared_part = {op: 0 for op in self.layer.input_operands}
for layer_op in self.layer.input_operands:
for mem_lv in range(self.active_mem_level[layer_op] - 1):
elem1 = data_loading_cc_per_op[layer_op][
layer_op + str(mem_lv) + "_" + "wr_in_by_high"
]
elem2 = data_loading_cc_per_op[layer_op][
layer_op + str(mem_lv + 1) + "_" + "rd_out_to_low"
]
completely_shared = elem1[1] and elem2[1]
completely_separate = not (elem1[1]) and not (elem2[1])
longest_loading_cc = max(elem1[0], elem2[0])
# for the ports that serve the same data movement purpose, take the longest data loading cycle
data_loading_cc_pair_combined = longest_loading_cc
data_loading_cc_pair_combined_per_op[layer_op].append(
data_loading_cc_pair_combined
)
if completely_separate:
data_loading_individual_part[layer_op] += longest_loading_cc
elif completely_shared:
data_loading_shared_part[layer_op] += longest_loading_cc
else:
# the data transfer link between two memory levels is half-shared,
# i.e. on one memory side, the port is shared, while on another memory side,
# there are different memories with separate ports
data_loading_half_shared_part[layer_op] = longest_loading_cc
if len(self.layer.input_operands) == 1:
data_loading_cycle = data_loading_individual_part[
self.layer.input_operands[0]
]
else:
op1 = self.layer.input_operands[0]
op2 = self.layer.input_operands[1]
possible1 = data_loading_shared_part[op1] + max(
data_loading_shared_part[op2]
+ data_loading_half_shared_part[op2]
+ data_loading_individual_part[op2],
data_loading_half_shared_part[op1] + data_loading_individual_part[op1],
)
possible2 = data_loading_shared_part[op2] + max(
data_loading_shared_part[op1]
+ data_loading_half_shared_part[op1]
+ data_loading_individual_part[op1],
data_loading_half_shared_part[op2] + data_loading_individual_part[op2],
)
data_loading_cycle = min(possible1, possible2)
self.data_loading_cc_pair_combined_per_op = data_loading_cc_pair_combined_per_op
self.data_loading_individual_part = data_loading_individual_part
self.data_loading_half_shared_part = data_loading_half_shared_part
self.data_loading_shared_part = data_loading_shared_part
self.data_loading_cycle = data_loading_cycle
# Combine ports' final data-offloading activities to get the data offloading cycle amount
# TODO Only considered the worst case for now
# (assumed that all the ports are working in series during the final data off-loading phase)
data_offloading_cc_pair_combined = []
layer_op = self.layer.output_operand
for mem_lv in range(self.active_mem_level[layer_op] - 1):
elem1 = data_offloading_cc_per_op[
layer_op + str(mem_lv) + "_" + "rd_out_to_high"
]
elem2 = data_offloading_cc_per_op[
layer_op + str(mem_lv + 1) + "_" + "wr_in_by_low"
]
longest_offloading_cc = max(elem1, elem2)
# for the ports that serve the same data movement purpose, take the longest data loading cycle
data_offloading_cc_pair_combined.append(longest_offloading_cc)
data_offloading_cycle = sum(data_offloading_cc_pair_combined)
self.data_offloading_cc_pair_combined = data_offloading_cc_pair_combined
self.data_offloading_cycle = data_offloading_cycle
## This function integrates the previous calculated SScomb, data loading and off-loading cycle to get the overall latency
def calc_overall_latency(self):
# the ideal cycle count assuming the MAC array is 100% utilized
ideal_cycle = ceil(
self.layer.total_MAC_count
/ self.accelerator.get_core(self.core_id).operational_array.total_unit_count
)
# the ideal temporal cycle count given the spatial mapping (the spatial mapping can be non-ideal)
ideal_temporal_cycle = self.mapping_int.temporal_mapping.total_cycle
MAC_spatial_utilization = ideal_cycle / ideal_temporal_cycle
# Total latency without the initial data loading and the final data off-loading
latency_total0 = ideal_temporal_cycle + self.SS_comb
MAC_utilization0 = ideal_cycle / latency_total0
# Total latency with the initial data loading, but without the final data off-loading
latency_total1 = ideal_temporal_cycle + self.SS_comb + self.data_loading_cycle
MAC_utilization1 = ideal_cycle / latency_total1
# Total latency with both the initial data loading and the final data off-loading
latency_total2 = (
ideal_temporal_cycle
+ self.SS_comb
+ self.data_loading_cycle
+ self.data_offloading_cycle
)
MAC_utilization2 = ideal_cycle / latency_total2
self.ideal_cycle = ideal_cycle
self.ideal_temporal_cycle = ideal_temporal_cycle
self.MAC_spatial_utilization = MAC_spatial_utilization
self.latency_total0 = latency_total0
self.latency_total1 = latency_total1
self.latency_total2 = latency_total2
self.MAC_utilization0 = MAC_utilization0
self.MAC_utilization1 = MAC_utilization1
self.MAC_utilization2 = MAC_utilization2
def __add__(self, other):
sum = pickle_deepcopy(self)
## Energy
sum.MAC_energy += other.MAC_energy
sum.mem_energy += other.mem_energy
for op in sum.energy_breakdown.keys():
if op in other.energy_breakdown.keys():
l = []
for i in range(
min(len(self.energy_breakdown[op]), len(other.energy_breakdown[op]))
):
l.append(
self.energy_breakdown[op][i] + other.energy_breakdown[op][i]
)
i = min(len(self.energy_breakdown[op]), len(other.energy_breakdown[op]))
l += self.energy_breakdown[op][i:]
l += other.energy_breakdown[op][i:]
sum.energy_breakdown[op] = l
for op in sum.energy_breakdown_further.keys():
if op in other.energy_breakdown_further.keys():
l = []
for i in range(
min(
len(self.energy_breakdown_further[op]),
len(other.energy_breakdown_further[op]),
)
):
l.append(
self.energy_breakdown_further[op][i]
+ other.energy_breakdown_further[op][i]
)
i = min(
len(self.energy_breakdown_further[op]),
len(other.energy_breakdown_further[op]),
)
l += self.energy_breakdown_further[op][i:]
l += other.energy_breakdown_further[op][i:]
sum.energy_breakdown_further[op] = l
# Get all the operands from other that are not in self and add them to the energy breakdown aswell
op_diff = set(other.energy_breakdown.keys()) - set(self.energy_breakdown.keys())
for op in op_diff:
sum.energy_breakdown[op] = other.energy_breakdown[op]
sum.energy_breakdown_further[op] = other.energy_breakdown_further[op]
sum.energy_total += other.energy_total
## Memory access
for op in sum.memory_word_access.keys():
if op in other.memory_word_access.keys():
l = []
for i in range(
min(
len(self.memory_word_access[op]),
len(other.memory_word_access[op]),
)
):
l.append(
self.memory_word_access[op][i] + other.memory_word_access[op][i]
)
i = min(
len(self.memory_word_access[op]), len(other.memory_word_access[op])
)
l += self.memory_word_access[op][i:]
l += other.memory_word_access[op][i:]
sum.memory_word_access[op] = l
for op in op_diff:
sum.memory_word_access[op] = other.memory_word_access[op]
## Latency
sum.data_loading_cycle += other.data_loading_cycle
sum.data_offloading_cycle += other.data_offloading_cycle
sum.ideal_cycle += other.ideal_cycle
sum.ideal_temporal_cycle += other.ideal_temporal_cycle
sum.latency_total0 += other.latency_total0
sum.latency_total1 += other.latency_total1
sum.latency_total2 += other.latency_total2
## MAC utilization
sum.MAC_spatial_utilization = sum.ideal_cycle / sum.ideal_temporal_cycle
sum.MAC_utilization0 = sum.ideal_cycle / sum.latency_total0
sum.MAC_utilization1 = sum.ideal_cycle / sum.latency_total1
sum.MAC_utilization2 = sum.ideal_cycle / sum.latency_total2
## layer
if type(sum.layer) != list:
sum.layer = [sum.layer.id]
if type(other.layer) != list:
other_layer = [other.layer.id]
sum.layer += other_layer
## core_id
if type(sum.core_id) != list:
sum.core_id = [sum.core_id]
if type(other.layer) != list:
other_core_id = [other.core_id]
sum.core_id += other_core_id
## Not addable
func = [
"calc_allowed_and_real_data_transfer_cycle_per_DTL",
"calc_data_loading_offloading_latency",
"calc_double_buffer_flag",
"calc_overall_latency",
"calc_MAC_energy_cost",
"calc_energy",
"calc_latency",
"calc_memory_energy_cost",
"calc_memory_utilization",
"calc_memory_word_access",
"combine_data_transfer_rate_per_physical_port",
"run",
]
add_attr = [
"MAC_energy",
"mem_energy",
"energy_breakdown",
"energy_breakdown_further",
"energy_total",
"memory_word_access",
"data_loading_cycle",
"data_offloading_cycle",
"ideal_cycle",
"ideal_temporal_cycle",
"latency_total0",
"latency_total1",
"latency_total2",
"MAC_spatial_utilization",
"MAC_utilization0",
"MAC_utilization1",
"MAC_utilization2",
"layer",
"core_id",
]
if hasattr(self, "accelerator") and hasattr(other, "accelerator"):
if self.accelerator.name.startswith(other.accelerator.name):
sum.accelerator = other.accelerator
add_attr.append("accelerator")
elif other.accelerator.name.startswith(self.accelerator.name):
add_attr.append("accelerator")
else:
pass
for attr in dir(sum):
if attr not in (func + add_attr) and attr[0] != "_":
delattr(sum, attr)
return sum
def __mul__(self, number):
mul = pickle_deepcopy(self)
# Energy
mul.MAC_energy *= number
mul.mem_energy *= number
mul.energy_breakdown = {
op: [
mul.energy_breakdown[op][i] * number
for i in range(len(mul.energy_breakdown[op]))
]
for op in mul.energy_breakdown.keys()
}
mul.energy_breakdown_further = {
op: [
mul.energy_breakdown_further[op][i] * number
for i in range(len(mul.energy_breakdown_further[op]))
]
for op in mul.energy_breakdown_further.keys()
}
mul.energy_total *= number
# Memory access
mul.memory_word_access = {
op: [
mul.memory_word_access[op][i] * number
for i in range(len(mul.memory_word_access[op]))
]
for op in mul.memory_word_access.keys()
}
# Latency
mul.data_loading_cycle *= number
mul.data_offloading_cycle *= number
mul.ideal_cycle *= number
mul.ideal_temporal_cycle *= number
mul.latency_total0 *= number
mul.latency_total1 *= number
mul.latency_total2 *= number
# MAC utilization
mul.MAC_spatial_utilization = mul.ideal_cycle / mul.ideal_temporal_cycle
mul.MAC_utilization0 = mul.ideal_cycle / mul.latency_total0
mul.MAC_utilization1 = mul.ideal_cycle / mul.latency_total1
mul.MAC_utilization2 = mul.ideal_cycle / mul.latency_total2
# Not addable
func = [
"calc_allowed_and_real_data_transfer_cycle_per_DTL",
"calc_data_loading_offloading_latency",
"calc_double_buffer_flag",
"calc_overall_latency",
"calc_MAC_energy_cost",
"calc_energy",
"calc_latency",
"calc_memory_energy_cost",
"calc_memory_utilization",
"calc_memory_word_access",
"combine_data_transfer_rate_per_physical_port",
"run",
]
mul_attr = [
"MAC_energy",
"mem_energy",
"energy_breakdown",
"energy_breakdown_further",
"energy_total",
"memory_word_access",
"data_loading_cycle",
"data_offloading_cycle",
"ideal_cycle",
"ideal_temporal_cycle",
"latency_total0",
"latency_total1",
"latency_total2",
"MAC_spatial_utilization",
"MAC_utilization0",
"MAC_utilization1",
"MAC_utilization2",
"layer",
"accelerator",
]
for attr in dir(mul):
if attr not in (func + mul_attr) and attr[0] != "_":
delattr(mul, attr)
return mul | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/cost_model/cost_model.py | cost_model.py |
from math import ceil
from zigzag.classes.io.onnx.parser import Parser
from zigzag.classes.io.onnx.utils import (
get_attribute_ints_with_name,
get_node_input_output_dimension_shapes, get_onnx_tensor_type,
)
from zigzag.classes.workload.layer_node import LayerNode
from zigzag.utils import pickle_deepcopy
import logging
logger = logging.getLogger(__name__)
## Parser for ONNX Conv and QLinearConv nodes into LayerNode.
class ConvParser(Parser):
## The class constructor
# @param node_id
# @param node
# @param nodes_outputs
# @param mapping
# @param onxx_model
def __init__(self, node_id, node, nodes_outputs, mapping, onnx_model) -> None:
super().__init__(node_id, node, nodes_outputs, mapping, onnx_model)
## Run the parser and return the created LayerNode object
def run(self):
layer_node = self.generate_layer_node_for_conv()
return layer_node
def generate_layer_node_for_conv(self):
## Return the name of the weight input of this node depending on its operator type
# @param node (NodeProto): The node
def get_weight_name(node):
op_type = node.op_type # 'Conv', 'QLinearConv', ...
if op_type == "Conv":
return node.input[1]
elif op_type == "QLinearConv":
return node.input[3]
else:
raise NotImplementedError(
f"Retrieving weight name for onnx node of type {op_type} is not supported."
)
## Return the data type of the input, output and weight tensors of this node.
# @param node
# @param model
def get_input_output_weight_data_type(node, model):
input_name = node.input[0]
output_name = node.output[0]
weight_name = get_weight_name(node)
input_elem_type = get_onnx_tensor_type(input_name, model).elem_type
output_elem_type = get_onnx_tensor_type(output_name, model).elem_type
weight_elem_type = get_onnx_tensor_type(weight_name, model).elem_type
return input_elem_type, output_elem_type, weight_elem_type
## Generate the necessary dictionary items required for the LayerNode creation.
# @param kernel_shape
# @param strides
# @param dilations
# @param groups
# @param padding
# @param padding
# @param ia_shape
# @param oa_shape
# @param node_mapping
def get_layer_node_input_format(
kernel_shape,
strides,
dilations,
groups,
padding,
ia_shape,
oa_shape,
node_mapping,
):
# convert the data types to precisions based on the onnx definition
# Equation
d = {}
# IMPORTANT: If any of the input loops require padding, they should be defined as the rightmost dimensions in the equation
# This is because we construct the dimensionality order and then add the padding to those last dimensions in the order
d["equation"] = "O[b][g][k][oy][ox]+=W[g][k][c][fy][fx]*I[b][g][c][iy][ix]"
# Get dimension sizes from input parameters
assert (
ia_shape[0] == oa_shape[0]
), "Batch size is different for input and output activations."
B = oa_shape[0]
if B == 0:
B = 1
G = groups
K = ceil(oa_shape[1] / G)
OX = oa_shape[2]
OY = oa_shape[3]
C = ceil(ia_shape[1] / G)
IX = ia_shape[2]
IY = ia_shape[3]
FX = kernel_shape[0]
FY = kernel_shape[1]
d["loop_dim_size"] = {
"B": B,
"K": K,
"G": G,
"OX": OX,
"OY": OY,
"C": C,
"FX": FX,
"FY": FY,
}
d["pr_loop_dim_size"] = {"IX": IX, "IY": IY}
d["dimension_relations"] = [
f"ix={strides[0]}*ox+{dilations[0]}*fx",
f"iy={strides[1]}*oy+{dilations[1]}*fy",
]
d["operand_precision"] = {"O": 16, "O_final": 8, "W": 8, "I": 8}
# d["operand_source"] = {'W': [], 'I': []}
d["constant_operands"] = ["W"]
d["core_allocation"] = node_mapping["core_allocation"]
d["spatial_mapping"] = node_mapping["spatial_mapping"]
d["temporal_ordering"] = node_mapping.get("temporal_ordering", None)
d["memory_operand_links"] = node_mapping["memory_operand_links"]
# Find the previous layer(s) that should be this node's parent(s)
node_inputs = self.node.input
preds = []
for node_input in node_inputs:
for n in self.nodes_outputs:
if node_input in self.nodes_outputs[n]:
preds.append(n)
d["operand_source"] = {"I": preds}
# Add padding information
d["padding"] = {
"IY": (padding[0], padding[2]),
"IX": (padding[1], padding[3]),
}
return d
attrs = self.node.attribute
# Find kernel shape in attrs
kernel_shape = get_attribute_ints_with_name("kernel_shape", attrs, default=None)
# Find strides in attrs
strides = get_attribute_ints_with_name("strides", attrs, default=[1, 1])
# Find dilation rate in attrs
dilations = get_attribute_ints_with_name("dilations", attrs, default=[1, 1])
# Find number of groups in attrs
groups = get_attribute_ints_with_name("group", attrs, default=1)
# Find padding in attrs
padding = get_attribute_ints_with_name("pads", attrs, default=[0, 0, 0, 0])
# Get the input and output activation shapes
ia_dimension_shape, oa_dimension_shape = get_node_input_output_dimension_shapes(
self.node, self.onnx_model
)
# Get the input and output activation and weight data type (precision)
ia_data_type, oa_data_type, w_data_type = get_input_output_weight_data_type(
self.node, self.onnx_model
)
# Get the hw mapping of this node.
if self.node.name in self.mapping:
node_mapping = self.mapping[self.node.name]
else:
try:
node_mapping = self.mapping["default"]
except:
raise ValueError(
f"There is no mapping provided for node {self.node.name}, nor a default one."
)
# Take a deepcopy of the mapping, otherwise it will be changed for other layers if using default
node_mapping = pickle_deepcopy(node_mapping)
node_attrs = get_layer_node_input_format(
kernel_shape,
strides,
dilations,
groups,
padding,
ia_dimension_shape,
oa_dimension_shape,
node_mapping,
)
node_obj = LayerNode(
self.node_id,
node_attrs,
node_name=self.node.name,
type=self.node.op_type.lower(),
)
logger.info(f"Parsed Conv node {self.node.name}")
return node_obj | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/io/onnx/conv.py | conv.py |
import enum
import importlib
import logging
from dataclasses import dataclass
from enum import auto
from os import path
from typing import List
import onnx
from onnx import AttributeProto
logger = logging.getLogger(__name__)
## Parse the input accelerator residing in accelerator_path.
# @param mapping_path
def parse_mapping_from_path(mapping_path):
# Sanity check on mapping_path
if mapping_path is None:
# Update the mapping_path to the default mapping file
if path.exists("inputs/examples/mapping/default.py"):
mapping_path = "zigzag.inputs.examples.mapping.default"
else:
raise ValueError(
"No mapping path/dict provided, and default was not found."
)
global module
module = importlib.import_module(mapping_path)
mapping = module.mapping
if "default" in mapping:
default_present = "\u2705"
else:
default_present = "\u274C"
logger.debug(
f"Parsed mapping with {len(mapping)} different entries. Default: {default_present}."
)
return mapping
def parse_onnx_model_from_path(onnx_model_path):
return onnx.load(onnx_model_path, load_external_data=False)
## Retrieves the attrs[name_idx].ints from attrs.
# If attrs[name_idx] is of type INTS, attrs[name_idx].ints is returned.
# If attrs[name_idx] is of type INT, attrs[name_idx].i is returned.
# If name does not exist in attrs, the default provided by the caller is used.
# If the caller doesn't supply a default, an error is thrown.
# @param name
# @param attrs
# @param default
def get_attribute_ints_with_name(name, attrs, default=None):
attrs_names = [attr.name for attr in attrs]
try:
name_idx = attrs_names.index(name)
attr_type = attrs[name_idx].type
if attr_type == AttributeProto.AttributeType.INT:
return attrs[name_idx].i
elif attr_type == AttributeProto.AttributeType.INTS:
return attrs[name_idx].ints
else:
raise NotImplementedError(
f"Attribute extraction of type {attr_type} not supported."
)
except ValueError:
if default is not None:
return default
else:
raise ValueError(
f"attrs has no attribute called {name} and no default was given. Names = {attrs_names}."
)
## Description missing
class OnnxTensorCategory(enum.Enum):
Input = auto()
Output = auto()
Hidden = auto()
Constant = auto()
@property
def is_output(self):
return self == OnnxTensorCategory.Output
@property
def is_input(self):
return self == OnnxTensorCategory.Input
@property
def is_hidden(self):
return self == OnnxTensorCategory.Hidden
@property
def is_constant(self):
return self == OnnxTensorCategory.Constant
@dataclass
## Description missing
class OnnxTensorType:
shape: List[int]
elem_type: int
category: OnnxTensorCategory
@staticmethod
def from_tensor_type(tensor_type, category: OnnxTensorCategory):
shape = [d.dim_value for d in tensor_type.shape.dim]
elem_type = tensor_type.elem_type
return OnnxTensorType(shape, elem_type, category)
def get_onnx_tensor_type(name, model):
for input in model.graph.input:
if input.name == name:
return OnnxTensorType.from_tensor_type(input.type.tensor_type, OnnxTensorCategory.Input)
for output in model.graph.output:
if output.name == name:
return OnnxTensorType.from_tensor_type(output.type.tensor_type, OnnxTensorCategory.Output)
for value_info in model.graph.value_info:
if value_info.name == name:
return OnnxTensorType.from_tensor_type(value_info.type.tensor_type, OnnxTensorCategory.Hidden)
for init in model.graph.initializer:
if init.name == name:
# initializers are represented a bit differently from other tensors
return OnnxTensorType(list(init.dims), init.data_type, OnnxTensorCategory.Constant)
raise KeyError(
f""
f"Could not find type for value {name} in model. "
f"Make sure you are loading in an inferred model, "
f"see https://github.com/onnx/onnx/blob/main/docs/PythonAPIOverview.md#running-shape-inference-on-an-onnx-model"
)
def get_node_input_output_dimension_shapes(node, model):
# assumed it is the first input, don't see a way to otherwise know
input_name = node.input[0]
input_shape = get_onnx_tensor_type(input_name, model).shape
output_name = node.output[0]
output_shape = get_onnx_tensor_type(output_name, model).shape
return input_shape, output_shape | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/io/onnx/utils.py | utils.py |
from zigzag.classes.io.onnx.parser import Parser
from zigzag.classes.io.onnx.utils import (
get_node_input_output_dimension_shapes,
get_attribute_ints_with_name,
)
from zigzag.classes.workload.layer_node import LayerNode
import logging
logger = logging.getLogger(__name__)
## Parses an ONNX Gemm operator into a LayerNode
class GemmParser(Parser):
## The class construcutor
# @param node_id
# @param node
# @param nodes_outputs
# @param mapping
# @param onxx_odel
def __init__(self, node_id, node, nodes_outputs, mapping, onnx_model) -> None:
super().__init__(node_id, node, nodes_outputs, mapping, onnx_model)
## Run the parser
def run(self):
layer_node = self.generate_layer_node_for_gemm()
return layer_node
def generate_layer_node_for_gemm(self):
## Generate the necessary dictionary items required for the Node creation.
def get_layer_node_input_format(B, C, K, node_mapping, nodes_outputs):
# convert the data types to precisions based on the onnx definition
# Equation
d = {}
d["equation"] = "O[b][k]+=W[k][c]*I[b][c]"
# Get dimension sizes from input parameters
K = K
C = C
B = B # Not to be confused with operand 'B' which is the weights
d["loop_dim_size"] = {"K": K, "C": C, "B": B}
d["dimension_relations"] = []
d["operand_precision"] = {"O": 16, "O_final": 8, "W": 8, "I": 8}
d["operand_source"] = {"W": [], "I": []}
d["constant_operands"] = ["W"]
d["core_allocation"] = node_mapping["core_allocation"]
d["spatial_mapping"] = node_mapping["spatial_mapping"]
d["temporal_ordering"] = node_mapping.get("temporal_ordering", None)
d["memory_operand_links"] = {"O": "O", "W": "I2", "I": "I1"}
# Find the previous layer(s) that should be this node's parent(s)
node_inputs = self.node.input
preds = []
for node_input in node_inputs:
for n in nodes_outputs:
if node_input in nodes_outputs[n]:
preds.append(n)
d["operand_source"] = {"I": preds}
return d
ia_dimension_shape, oa_dimension_shape = get_node_input_output_dimension_shapes(
self.node, self.onnx_model
)
# The Gemm node includes flags for transpose of both of its inputs.
# If the first input is transposed, we need to transpose its shape here.
transA = get_attribute_ints_with_name("transA", self.node.attribute, default=0)
if transA:
assert len(ia_dimension_shape) == 2
ia_dimension_shape = (ia_dimension_shape[1], ia_dimension_shape[0])
# If the input activations are empty (which can happen if there is a shape operator in the path)
# we try to extract the weights from the model graph initializer to get the correct input activation size
if not ia_dimension_shape:
weight_name = self.node.input[1]
initializer_names = [i.name for i in self.onnx_model.graph.initializer]
weight_name_index = initializer_names.index(weight_name)
# Get the weight dimensions
weights = self.onnx_model.graph.initializer[weight_name_index]
weight_dims = list(weights.dims)
assert (
len(weight_dims) == 2
), f"There are {len(weight_dims)} weight dimensions for Gemm node {self.node.name}"
# Check if the weights are transposed
transB = get_attribute_ints_with_name(
"transB", self.node.attribute, default=0
)
if transB:
weight_dims = [weight_dims[1], weight_dims[0]]
assert (
len(oa_dimension_shape) == 2
), "Can't infer ia_dimension_shape if oa_dimension_shape is also not known."
B = oa_dimension_shape[0]
C = weight_dims[0]
ia_dimension_shape = [B, C]
assert (
len(ia_dimension_shape) == len(oa_dimension_shape) == 2
) # First element is batch size, second is input/output channel
assert (
ia_dimension_shape[0] == oa_dimension_shape[0]
) # Batch size should be the same for input and output
# If the batch size is 0, we discard it by setting it to 1 internally inside ZigZag
batch_size = ia_dimension_shape[0]
if batch_size == 0:
B = 1
else:
B = batch_size
C = ia_dimension_shape[1]
K = oa_dimension_shape[1]
# Get the hw mapping of this node.
if self.node.name in self.mapping:
node_mapping = self.mapping[self.node.name]
else:
try:
node_mapping = self.mapping["default"]
except:
raise ValueError(
f"There is no mapping provided for node {self.node.name}, nor a default one."
)
node_attrs = get_layer_node_input_format(
B, C, K, node_mapping, self.nodes_outputs
)
node_obj = LayerNode(
self.node_id,
node_attrs,
node_name=self.node.name,
type=self.node.op_type.lower(),
)
logger.info(f"Parsed Gemm node {self.node.name}")
return node_obj | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/io/onnx/gemm.py | gemm.py |
from onnx import ModelProto
from zigzag.classes.io.onnx.default import DefaultNodeParser
from zigzag.classes.io.onnx.gemm import GemmParser
from zigzag.classes.io.onnx.matmul import MatMulParser
from zigzag.classes.io.onnx.conv import ConvParser
from zigzag.classes.io.onnx.utils import (
parse_mapping_from_path,
parse_onnx_model_from_path,
)
from zigzag.classes.workload.onnx_workload import ONNXWorkload
import logging
logger = logging.getLogger(__name__)
## Parse the ONNX model into a workload.
class ONNXModelParser:
## The class constructor
# @param onxx_model
# @param mapping_path
def __init__(self, onnx_model, mapping_path) -> None:
# Sanity checks on given onnx_model
if isinstance(onnx_model, str):
self.onnx_model_path = onnx_model
self.onnx_model = None
elif isinstance(onnx_model, ModelProto):
self.onnx_model_path = None
self.onnx_model = onnx_model
else:
raise TypeError(f"Given onnx_model is of type {type(onnx_model)}.")
# Sanity checks on given mapping
if isinstance(mapping_path, str):
self.mapping_path = mapping_path
self.mapping = None
elif isinstance(mapping_path, dict):
self.mapping_path = None
self.mapping = mapping_path
elif mapping_path is None:
self.mapping_path = None
self.mapping = None
else:
raise TypeError(f"Given mapping is of type {type(mapping_path)}.")
self.workload = None
## Run the parser
# - parse the onnx_model_path into an onnx model
# - parse the mapping_path into a mapping dict
# - iterate through the onnx model and generate the workload consisting of LayerNodes and DummyNodes
def run(self):
if not self.onnx_model:
onnx_model = parse_onnx_model_from_path(self.onnx_model_path)
self.onnx_model = onnx_model
if not self.mapping:
mapping = parse_mapping_from_path(self.mapping_path)
self.mapping = mapping
workload = self.parse_workload_from_onnx_model_and_mapping()
self.workload = workload
## Converts an onnx model into a workload object.
# We scan the model for all convolutional layers, and setup a Layer object for each of those using the mapping.
# Then we combine the layers into a workload graph.
#
# If the model isn't in the format with external data, it will be slow to manipulate it, so better to work with raw models with external data
# The line below accomplishes this.
# onnx.save_model(model, 'model_external.onnx', save_as_external_data=True, all_tensors_to_one_file=True, location='model_external_raw_data', size_threshold=1024, convert_attribute=False)
#
# In the future, assume we will have a model saved with external data, then we have to execute the code below
# if the model isn't inferred yet
#
# This approach is faster for large models because the raw model is used (w/o the external data)
# if model is not inferred:
# onnx.shape_inference.infer_shapes_path('path/to/the/model.onnx') # This will save the inferred model to the same file
# model = onnx.load('path/to/the/model.onnx') # reload the inferred model
#
# Saves for each node_id the inputs and outputs tensor names
def parse_workload_from_onnx_model_and_mapping(self):
nodes_inputs = {}
nodes_outputs = {}
# Workload Graph
workload = ONNXWorkload()
for node_id, node in enumerate(self.onnx_model.graph.node):
nodes_inputs[node_id] = node.input
nodes_outputs[node_id] = node.output
if node.op_type in ["QLinearConv", "Conv"]:
parser = ConvParser(
node_id, node, nodes_outputs, self.mapping, self.onnx_model
)
elif node.op_type in ["MatMul"]:
parser = MatMulParser(
node_id, node, nodes_outputs, self.mapping, self.onnx_model
)
elif node.op_type in ["Gemm"]:
parser = GemmParser(
node_id, node, nodes_outputs, self.mapping, self.onnx_model
)
else: # it is not a convolutional node, so create a DummyNode
parser = DefaultNodeParser(node_id, node, nodes_outputs)
node_obj = parser.run()
# Add the node_obj to the ONNXWorkload
workload.add(node_id, node_obj)
logger.info(
f"Created ONNXWorkload graph with {workload.number_of_nodes()} nodes and {workload.number_of_edges()} edges."
)
return workload
def get_onnx_model(self):
return self.onnx_model
def get_mapping(self):
return self.mapping
def get_workload(self):
return self.workload | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/io/onnx/model.py | model.py |
import onnx
from zigzag.classes.io.onnx.parser import Parser
from zigzag.classes.io.onnx.utils import get_node_input_output_dimension_shapes
from zigzag.classes.workload.layer_node import LayerNode
import logging
logger = logging.getLogger(__name__)
## Parses an ONNX MatMul operator into a LayerNode
class MatMulParser(Parser):
## The class constructor
# @param node_id
# @param node
# @param nodes_outputs
# @param mapping
# @param onnx_model
def __init__(self, node_id, node, nodes_outputs, mapping, onnx_model) -> None:
super().__init__(node_id, node, nodes_outputs, mapping, onnx_model)
## Run the parser
def run(self):
layer_node = self.generate_layer_node_for_matmul()
return layer_node
def generate_layer_node_for_matmul(self):
## Generate the necessary dictionary items required for the Node creation.
def get_layer_node_input_format(B, C, K, node_mapping, nodes_outputs):
# convert the data types to precisions based on the onnx definition
# Equation
d = {}
d["equation"] = "O[b][k]+=B[k][c]*A[b][c]"
# Get dimension sizes from input parameters
K = K
C = C
B = B # Not to be confused with operand 'B' which is the weights
d["loop_dim_size"] = {"K": K, "C": C, "B": B}
d["dimension_relations"] = []
d["operand_precision"] = {"O": 16, "O_final": 8, "B": 8, "A": 8}
d["operand_source"] = {"B": [], "A": []}
d["constant_operands"] = ["B"]
d["core_allocation"] = node_mapping["core_allocation"]
d["spatial_mapping"] = node_mapping["spatial_mapping"]
d["temporal_ordering"] = node_mapping.get("temporal_ordering", None)
d["memory_operand_links"] = {"O": "O", "B": "I2", "A": "I1"}
# Find the previous layer(s) that should be this node's parent(s)
node_inputs = self.node.input
preds = []
for node_input in node_inputs:
for n in nodes_outputs:
if node_input in nodes_outputs[n]:
preds.append(n)
d["operand_source"] = {"A": preds}
return d
ia_dimension_shape, oa_dimension_shape = get_node_input_output_dimension_shapes(
self.node, self.onnx_model
)
assert (
len(ia_dimension_shape) == len(oa_dimension_shape) == 2
) # First element is batch size, second is input/output channel
assert (
ia_dimension_shape[0] == oa_dimension_shape[0]
) # Batch size should be the same for input and output
# If the batch size is 0, we discard it by setting it to 1 internally inside ZigZag
batch_size = ia_dimension_shape[0]
if batch_size == 0:
B = 1
else:
B = batch_size
C = ia_dimension_shape[1]
K = oa_dimension_shape[1]
# Get the hw mapping of this node.
if self.node.name in self.mapping:
node_mapping = self.mapping[self.node.name]
else:
try:
node_mapping = self.mapping["default"]
except:
raise ValueError(
f"There is no mapping provided for node {self.node.name}, nor a default one."
)
node_attrs = get_layer_node_input_format(
B, C, K, node_mapping, self.nodes_outputs
)
node_obj = LayerNode(
self.node_id,
node_attrs,
node_name=self.node.name,
type=self.node.op_type.lower(),
)
logger.info(f"Parsed MatMul node {self.node.name}")
return node_obj | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/io/onnx/matmul.py | matmul.py |
import importlib
from zigzag.classes.hardware.architecture.accelerator import Accelerator
import logging
logger = logging.getLogger(__name__)
## Parse an accelerator module path into an accelerator object
class AcceleratorParser:
## The class constructor
# Initialize the parser by checking if the provided argument is a module path or accelerator object
# @param accelerator_path (str or Accelerator): The accelerator path or accelerator object
def __init__(self, accelerator) -> None:
if isinstance(accelerator, str):
self.accelerator_path = accelerator
self.accelerator = None
elif isinstance(accelerator, Accelerator):
self.accelerator_path = None
self.accelerator = accelerator
else:
raise TypeError("Given accelerator is nor a module path string or an Accelerator object.")
self.supported_accelerators = {
"ascend": "zigzag.inputs.examples.hardware.Ascend_like",
"edge-tpu": "zigzag.inputs.examples.hardware.Edge_TPU_like",
"eyeriss": "zigzag.inputs.examples.hardware.Eyeriss_like",
"meta-prototype": "zigzag.inputs.examples.hardware.Meta_prototype",
"tesla-npu": "zigzag.inputs.examples.hardware.Tesla_NPU_like",
"tpu": "zigzag.inputs.examples.hardware.TPU_like"
}
def run(self):
if not self.accelerator:
try:
accelerator = self.parse_accelerator_from_path(self.accelerator_path)
except ModuleNotFoundError:
try:
accelerator = self.parse_supported_accelerator(self.accelerator_path)
except KeyError:
raise ValueError(f"Provided accelerator path ({self.accelerator_path}) is not a valid module path, nor a supported standard accelerator. \
Supported standard accelerators = {self.get_supported_accelerators()}")
self.accelerator = accelerator
@staticmethod
## Parse the input accelerator residing in accelerator_path
# @param accelerator_path
def parse_accelerator_from_path(accelerator_path):
global module
module = importlib.import_module(accelerator_path)
accelerator = module.accelerator
logger.info(f"Parsed accelerator with cores {[core.id for core in accelerator.cores]}.")
return accelerator
def parse_supported_accelerator(self, standard_accelerator):
accelerator_path = self.supported_accelerators[standard_accelerator]
return self.parse_accelerator_from_path(accelerator_path)
def get_accelerator(self):
return self.accelerator
def get_supported_accelerators(self):
return list(self.supported_accelerators.keys()) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/io/accelerator/parser.py | parser.py |
import yaml
import os
import subprocess
import logging
logger = logging.getLogger(__name__)
## Class that provides the interface between ZigZag and CACTI.
class CactiParser:
## Path of current directory
cacti_path = os.path.dirname(os.path.realpath(__file__))
## Path to cached cacti simulated memories
MEM_POOL_PATH = f"{cacti_path}/cacti_master/example_mem_pool.yaml"
## Path to cacti python script to extract costs
CACTI_TOP_PATH = f"{cacti_path}/cacti_master/cacti_top.py"
## The class constructor
def __init__(self):
pass
## This function checks if the provided memory configuration was already used in the past.
# @param mem_type
# @param size
# @param r_bw
# @param r_port
# @param w_port
# @param rw_port
# @param bank
# @param mem_pool_path Path to cached cacti simulated memories
# @return True The requested memory item has been simulated once.
# @return False The requested memory item has not been simualted so far.
def item_exists(
self,
mem_type,
size,
r_bw,
r_port,
w_port,
rw_port,
bank,
mem_pool_path=MEM_POOL_PATH,
):
with open(mem_pool_path, "r") as fp:
memory_pool = yaml.full_load(fp)
if memory_pool != None:
for instance in memory_pool:
IO_bus_width = int(memory_pool[instance]["IO_bus_width"])
area = memory_pool[instance]["area"]
bank_count = int(memory_pool[instance]["bank_count"])
read_cost = memory_pool[instance]["cost"]["read_word"] * 1000
write_cost = memory_pool[instance]["cost"]["write_word"] * 1000
ex_rd_port = int(memory_pool[instance]["ex_rd_port"])
ex_wr_port = int(memory_pool[instance]["ex_wr_port"])
rd_wr_port = int(memory_pool[instance]["rd_wr_port"])
cache_size = int(memory_pool[instance]["size_bit"])
if (
(size == cache_size)
and (IO_bus_width == r_bw)
and (r_port == ex_rd_port)
and (w_port == ex_wr_port)
and (rw_port == rd_wr_port)
):
return True
return False
## This function simulates a new item by calling CACTI7 based on the provided parameters
# @param mem_type
# @param size
# @param r_bw
# @param r_port
# @param w_port
# @param rw_port
# @param bank
# @param mem_pool_path Path to cached cacti simulated memories
# @param cacti_top_path Path to cacti python script to extract costs
def create_item(
self,
mem_type,
size,
r_bw,
r_port,
w_port,
rw_port,
bank,
mem_pool_path=MEM_POOL_PATH,
cacti_top_path=CACTI_TOP_PATH,
):
# print("No match in Cacti memory pool found!", size, r_bw, r_port, w_port, rw_port, bank)
# os.chdir(f'{CACTI_PATH}/cacti-master/')
p = subprocess.call(
[
"python",
cacti_top_path,
"--mem_type",
str(mem_type),
"--cache_size",
str(int(size / 8)),
"--IO_bus_width",
str(r_bw),
"--ex_rd_port",
str(r_port),
"--ex_wr_port",
str(w_port),
"--rd_wr_port",
str(rw_port),
"--bank_count",
str(bank),
"--mem_pool_path",
str(mem_pool_path),
]
)
if p != 0:
raise ChildProcessError(
f"Cacti subprocess call failed with return value {p}."
)
## This functions checks first if the memory with the provided parameters was already simulated once.
# In case it hasn't been simulated, then it will create a new memory item based on the provided parameters.
# @param mem_type
# @param size
# @param r_bw
# @param r_port
# @param w_port
# @param rw_port
# @param bank
# @param mem_pool_path Path to cached cacti simulated memories
# @param cacti_top_path Path to cacti python script to extract costs
def get_item(
self,
mem_type,
size,
r_bw,
r_port,
w_port,
rw_port,
bank,
mem_pool_path=MEM_POOL_PATH,
cacti_top_path=CACTI_TOP_PATH,
):
if not os.path.exists(cacti_top_path):
raise FileNotFoundError(f"Cacti top file doesn't exist: {cacti_top_path}.")
logger.info(
f"Extracting memory costs with CACTI for size = {size} and r_bw = {r_bw}."
)
if mem_type == "rf":
new_mem_type = "sram"
new_size = int(size * 128)
new_r_bw = int(r_bw)
logger.warning(
f"Type {mem_type} -> {new_mem_type}. Size {size} -> {new_size}. BW {r_bw} -> {new_r_bw}."
)
mem_type = new_mem_type
size = new_size
r_bw = new_r_bw
if not self.item_exists(
mem_type, size, r_bw, r_port, w_port, rw_port, bank, mem_pool_path
):
self.create_item(
mem_type,
size,
r_bw,
r_port,
w_port,
rw_port,
bank,
mem_pool_path,
cacti_top_path,
)
with open(mem_pool_path, "r") as fp:
memory_pool = yaml.full_load(fp)
if memory_pool != None:
for instance in memory_pool:
IO_bus_width = int(memory_pool[instance]["IO_bus_width"])
area = memory_pool[instance]["area"]
bank_count = int(memory_pool[instance]["bank_count"])
read_cost = memory_pool[instance]["cost"]["read_word"] * 1000
write_cost = memory_pool[instance]["cost"]["write_word"] * 1000
ex_rd_port = int(memory_pool[instance]["ex_rd_port"])
ex_wr_port = int(memory_pool[instance]["ex_wr_port"])
rd_wr_port = int(memory_pool[instance]["rd_wr_port"])
cache_size = int(memory_pool[instance]["size_bit"])
memory_type = memory_pool[instance]["memory_type"]
if (
(mem_type == memory_type)
and (size == cache_size)
and (IO_bus_width == r_bw)
and (r_port == ex_rd_port)
and (w_port == ex_wr_port)
and (rw_port == rd_wr_port)
):
# print("Memory instance found in Cacti memory pool!", cache_size, IO_bus_width, ex_rd_port, ex_wr_port, rd_wr_port, bank_count, read_cost, write_cost)
return (
cache_size,
IO_bus_width,
IO_bus_width,
read_cost,
write_cost,
area,
bank_count,
ex_rd_port,
ex_wr_port,
rd_wr_port,
)
# should be never reached
raise ModuleNotFoundError(
f"No match in Cacti memory pool found {size=}, {r_bw=}, {r_port=}, {w_port=}, {rw_port=}, {bank=}"
) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/cacti/cacti_parser.py | cacti_parser.py |
import yaml
import os
import argparse
from zigzag.classes.cacti.cacti_master.cacti_config_creator import CactiConfig
parser = argparse.ArgumentParser()
parser.add_argument('--mem_type')
parser.add_argument('--cache_size')
parser.add_argument('--IO_bus_width')
parser.add_argument('--ex_rd_port')
parser.add_argument('--ex_wr_port')
parser.add_argument('--rd_wr_port')
parser.add_argument('--bank_count')
parser.add_argument('--mem_pool_path')
args = parser.parse_args()
mem_pool_path = args.mem_pool_path
cacti_master_path = os.path.dirname(mem_pool_path)
print(f"{cacti_master_path=}")
self_gen_folder_name = 'self_gen'
self_gen_path = os.path.join(cacti_master_path, self_gen_folder_name)
if not os.path.isdir(self_gen_path):
os.mkdir(self_gen_path)
os.system(f'rm -rf {self_gen_path}/*')
C = CactiConfig()
'''Function 1: set default value'''
# C.change_default_value(['technology'], [0.090])
'''Function 2: use default values to run CACTI'''
# C.cacti_auto(['default'], file_path + '/cache.cfg')
'''Function 3: use user-defined + default values to run CACTI'''
# C.cacti_auto(['single', [['technology', 'cache_size'],[0.022, 524288]]], file_path+'/cache.cfg')
'''Function 4: sweep any one variable using the default list & other default value'''
# C.cacti_auto(['sweep', ['IO_bus_width']], file_path+'/cache.cfg')
''' Combining Function 1 & 4 to do multi-variable sweep '''
mem_type = args.mem_type
if mem_type == 'sram':
mem_type = '"ram"'
else:
mem_type == '"main memory"'
cache_size = args.cache_size
IO_bus_width = args.IO_bus_width
ex_rd_port = args.ex_rd_port
ex_wr_port = args.ex_wr_port
rd_wr_port = args.rd_wr_port
bank_count = args.bank_count
technology = 0.090
C.cacti_auto(['single', [['mem_type', 'cache_size', 'IO_bus_width', 'ex_rd_port', 'ex_wr_port', 'rd_wr_port', 'technology'],[mem_type, cache_size, IO_bus_width, ex_rd_port, ex_wr_port, rd_wr_port, technology]]], cacti_master_path, f'{self_gen_path}/cache.cfg')
result = {}
with open('%s/cache.cfg.out' % self_gen_path, 'r') as fp:
raw_result = fp.readlines()
for ii, each_line in enumerate(raw_result):
if ii == 0:
attribute_list = each_line.split(',')
for each_attribute in attribute_list:
result[each_attribute] = []
else:
for jj, each_value in enumerate(each_line.split(',')):
try:
result[attribute_list[jj]].append(float(each_value))
except:
pass
for i in range(len(result[' Capacity (bytes)'])):
size_byte = result[' Capacity (bytes)'][i]
area = result[' Area (mm2)'][i]
read_word = result[' Dynamic read energy (nJ)'][i]
write_word = result[' Dynamic write energy (nJ)'][i]
mem_bw = result[' Output width (bits)'][i]
utilization_rate = 0.7
if mem_type == '"ram"':
mem_type = 'sram'
else:
mem_type = 'dram'
mem_name = str(int(size_byte)) + '_Byte_' + str(int(mem_bw)) + '_BW_' + str(ex_rd_port) + '_' + str(ex_wr_port) + '_' + str(rd_wr_port)
new_result = {'%s' % mem_name: {
'size_byte': int(size_byte),
'size_bit': int(size_byte * 8),
'area': area*2,
'cost': {'read_word': read_word, 'write_word': write_word},
'IO_bus_width': int(mem_bw),
'ex_rd_port': ex_rd_port,
'ex_wr_port': ex_wr_port,
'rd_wr_port': rd_wr_port,
'bank_count': 1,
'memory_type': mem_type
}}
with open(mem_pool_path, 'a+') as fp:
yaml.dump(new_result, fp)
fp.write('\n') | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/cacti/cacti_master/cacti_top.py | cacti_top.py |
import os
class CactiConfig:
def __init__(self):
# content = f.readlines()
self.baseline_config = ['# power gating\n',
'-Array Power Gating - "false"\n',
'-WL Power Gating - "false"\n',
'-CL Power Gating - "false"\n',
'-Bitline floating - "false"\n',
'-Interconnect Power Gating - "false"\n',
'-Power Gating Performance Loss 0.01\n',
'\n',
'# following three parameters are meaningful only for main memories\n',
'-page size (bits) 8192 \n',
'-burst length 8\n',
'-internal prefetch width 8\n',
'\n',
'# following parameter can have one of five values -- (itrs-hp, itrs-lstp, itrs-lop, lp-dram, comm-dram)\n',
'-Data array cell type - "itrs-hp"\n',
'//-Data array cell type - "itrs-lstp"\n',
'//-Data array cell type - "itrs-lop"\n',
'\n',
'# following parameter can have one of three values -- (itrs-hp, itrs-lstp, itrs-lop)\n',
'-Data array peripheral type - "itrs-hp"\n',
'//-Data array peripheral type - "itrs-lstp"\n',
'//-Data array peripheral type - "itrs-lop"\n',
'\n',
'# following parameter can have one of five values -- (itrs-hp, itrs-lstp, itrs-lop, lp-dram, comm-dram)\n',
'-Tag array cell type - "itrs-hp"\n',
'//-Tag array cell type - "itrs-lstp"\n',
'//-Tag array cell type - "itrs-lop"\n',
'\n',
'# following parameter can have one of three values -- (itrs-hp, itrs-lstp, itrs-lop)\n',
'-Tag array peripheral type - "itrs-hp"\n',
'//-Tag array peripheral type - "itrs-lstp"\n',
'//-Tag array peripheral type - "itrs-lop\n',
'\n',
'\n',
'// 300-400 in steps of 10\n',
'-operating temperature (K) 360\n',
'\n',
'# to model special structure like branch target buffers, directory, etc. \n',
'# change the tag size parameter\n',
'# if you want cacti to calculate the tagbits, set the tag size to "default"\n',
'-tag size (b) "default"\n',
'//-tag size (b) 22\n',
'\n',
'# fast - data and tag access happen in parallel\n',
'# sequential - data array is accessed after accessing the tag array\n',
'# normal - data array lookup and tag access happen in parallel\n',
'# final data block is broadcasted in data array h-tree \n',
'# after getting the signal from the tag array\n',
'//-access mode (normal, sequential, fast) - "fast"\n',
'-access mode (normal, sequential, fast) - "normal"\n',
'//-access mode (normal, sequential, fast) - "sequential"\n',
'\n',
'\n',
'# DESIGN OBJECTIVE for UCA (or banks in NUCA)\n',
'-design objective (weight delay, dynamic power, leakage power, cycle time, area) 0:0:0:100:0\n',
'\n',
'# Percentage deviation from the minimum value \n',
'# Ex: A deviation value of 10:1000:1000:1000:1000 will try to find an organization\n',
'# that compromises at most 10% delay. \n',
'# NOTE: Try reasonable values for % deviation. Inconsistent deviation\n',
'# percentage values will not produce any valid organizations. For example,\n',
'# 0:0:100:100:100 will try to identify an organization that has both\n',
'# least delay and dynamic power. Since such an organization is not possible, CACTI will\n',
'# throw an error. Refer CACTI-6 Technical report for more details\n',
'-deviate (delay, dynamic power, leakage power, cycle time, area) 20:100000:100000:100000:100000\n',
'\n',
'# Objective for NUCA\n',
'-NUCAdesign objective (weight delay, dynamic power, leakage power, cycle time, area) 100:100:0:0:100\n',
'-NUCAdeviate (delay, dynamic power, leakage power, cycle time, area) 10:10000:10000:10000:10000\n',
'\n',
'# Set optimize tag to ED or ED^2 to obtain a cache configuration optimized for\n',
'# energy-delay or energy-delay sq. product\n',
'# Note: Optimize tag will disable weight or deviate values mentioned above\n',
'# Set it to NONE to let weight and deviate values determine the \n',
'# appropriate cache configuration\n',
'//-Optimize ED or ED^2 (ED, ED^2, NONE): "ED"\n',
'-Optimize ED or ED^2 (ED, ED^2, NONE): "ED^2"\n',
'//-Optimize ED or ED^2 (ED, ED^2, NONE): "NONE"\n',
'\n',
'-Cache model (NUCA, UCA) - "UCA"\n',
'//-Cache model (NUCA, UCA) - "NUCA"\n',
'\n',
'# In order for CACTI to find the optimal NUCA bank value the following\n',
'# variable should be assigned 0.\n',
'-NUCA bank count 0\n',
'\n',
'# NOTE: for nuca network frequency is set to a default value of \n',
'# 5GHz in time.c. CACTI automatically\n',
'# calculates the maximum possible frequency and downgrades this value if necessary\n',
'\n',
'# By default CACTI considers both full-swing and low-swing \n',
'# wires to find an optimal configuration. However, it is possible to \n',
'# restrict the search space by changing the signaling from "default" to \n',
'# "fullswing" or "lowswing" type.\n',
'-Wire signaling (fullswing, lowswing, default) - "Global_30"\n',
'//-Wire signaling (fullswing, lowswing, default) - "default"\n',
'//-Wire signaling (fullswing, lowswing, default) - "lowswing"\n',
'\n',
'//-Wire inside mat - "global"\n',
'-Wire inside mat - "semi-global"\n',
'//-Wire outside mat - "global"\n',
'-Wire outside mat - "semi-global"\n',
'\n',
'-Interconnect projection - "conservative"\n',
'//-Interconnect projection - "aggressive"\n',
'\n',
'# Contention in network (which is a function of core count and cache level) is one of\n',
'# the critical factor used for deciding the optimal bank count value\n',
'# core count can be 4, 8, or 16\n',
'//-Core count 4\n',
'-Core count 8\n',
'//-Core count 16\n',
'-Cache level (L2/L3) - "L3"\n',
'\n',
'-Add ECC - "true"\n',
'\n',
'//-Print level (DETAILED, CONCISE) - "CONCISE"\n',
'-Print level (DETAILED, CONCISE) - "DETAILED"\n',
'\n',
'# for debugging\n',
'-Print input parameters - "true"\n',
'//-Print input parameters - "false"\n',
'# force CACTI to model the cache with the \n',
'# following Ndbl, Ndwl, Nspd, Ndsam,\n',
'# and Ndcm values\n',
'//-Force cache config - "true"\n',
'-Force cache config - "false"\n',
'-Ndwl 1\n',
'-Ndbl 1\n',
'-Nspd 0\n',
'-Ndcm 1\n',
'-Ndsam1 0\n',
'-Ndsam2 0\n',
'\n',
'\n',
'\n',
'#### Default CONFIGURATION values for baseline external IO parameters to DRAM. More details can be found in the CACTI-IO technical report (), especially Chapters 2 and 3.\n',
'\n',
'# Memory Type (D3=DDR3, D4=DDR4, L=LPDDR2, W=WideIO, S=Serial). Additional memory types can be defined by the user in extio_technology.cc, along with their technology and configuration parameters.\n',
'\n',
'-dram_type "DDR3"\n',
'//-dram_type "DDR4"\n',
'//-dram_type "LPDDR2"\n',
'//-dram_type "WideIO"\n',
'//-dram_type "Serial"\n',
'\n',
'# Memory State (R=Read, W=Write, I=Idle or S=Sleep) \n',
'\n',
'//-io state "READ"\n',
'-io state "WRITE"\n',
'//-io state "IDLE"\n',
'//-io state "SLEEP"\n',
'\n',
'#Address bus timing. To alleviate the timing on the command and address bus due to high loading (shared across all memories on the channel), the interface allows for multi-cycle timing options. \n',
'\n',
'//-addr_timing 0.5 //DDR\n',
'-addr_timing 1.0 //SDR (half of DQ rate)\n',
'//-addr_timing 2.0 //2T timing (One fourth of DQ rate)\n',
'//-addr_timing 3.0 // 3T timing (One sixth of DQ rate)\n',
'\n',
'# Memory Density (Gbit per memory/DRAM die)\n',
'\n',
'-mem_density 4 Gb //Valid values 2^n Gb\n',
'\n',
'# IO frequency (MHz) (frequency of the external memory interface).\n',
'\n',
'-bus_freq 800 MHz //As of current memory standards (2013), valid range 0 to 1.5 GHz for DDR3, 0 to 533 MHz for LPDDR2, 0 - 800 MHz for WideIO and 0 - 3 GHz for Low-swing differential. However this can change, and the user is free to define valid ranges based on new memory types or extending beyond existing standards for existing dram types.\n',
'\n',
'# Duty Cycle (fraction of time in the Memory State defined above)\n',
'\n',
'-duty_cycle 1.0 //Valid range 0 to 1.0\n',
'\n',
'# Activity factor for Data (0->1 transitions) per cycle (for DDR, need to account for the higher activity in this parameter. E.g. max. activity factor for DDR is 1.0, for SDR is 0.5)\n',
' \n',
'-activity_dq 1.0 //Valid range 0 to 1.0 for DDR, 0 to 0.5 for SDR\n',
'\n',
'# Activity factor for Control/Address (0->1 transitions) per cycle (for DDR, need to account for the higher activity in this parameter. E.g. max. activity factor for DDR is 1.0, for SDR is 0.5)\n',
'\n',
'-activity_ca 0.5 //Valid range 0 to 1.0 for DDR, 0 to 0.5 for SDR, 0 to 0.25 for 2T, and 0 to 0.17 for 3T\n',
'\n',
'# Number of DQ pins \n',
'\n',
'-num_dq 72 //Number of DQ pins. Includes ECC pins.\n',
'\n',
'# Number of DQS pins. DQS is a data strobe that is sent along with a small number of data-lanes so the source synchronous timing is local to these DQ bits. Typically, 1 DQS per byte (8 DQ bits) is used. The DQS is also typucally differential, just like the CLK pin. \n',
'\n',
'-num_dqs 18 //2 x differential pairs. Include ECC pins as well. Valid range 0 to 18. For x4 memories, could have 36 DQS pins.\n',
'\n',
'# Number of CA pins \n',
'\n',
'-num_ca 25 //Valid range 0 to 35 pins.\n',
'\n',
'# Number of CLK pins. CLK is typically a differential pair. In some cases additional CLK pairs may be used to limit the loading on the CLK pin. \n',
'\n',
'-num_clk 2 //2 x differential pair. Valid values: 0/2/4.\n',
'\n',
'# Number of Physical Ranks\n',
'\n',
'-num_mem_dq 2 //Number of ranks (loads on DQ and DQS) per buffer/register. If multiple LRDIMMs or buffer chips exist, the analysis for capacity and power is reported per buffer/register. \n',
'\n',
'# Width of the Memory Data Bus\n',
'\n',
'-mem_data_width 8 //x4 or x8 or x16 or x32 memories. For WideIO upto x128.\n',
'\n',
'# RTT Termination Resistance\n',
'\n',
'-rtt_value 10000\n',
'\n',
'# RON Termination Resistance\n',
'\n',
'-ron_value 34\n',
'\n',
'# Time of flight for DQ\n',
'\n',
'-tflight_value\n',
'\n',
'# Parameter related to MemCAD\n',
'\n',
'# Number of BoBs: 1,2,3,4,5,6,\n',
'-num_bobs 1\n',
'\t\n',
'# Memory System Capacity in GB\n',
'-capacity 80\t\n',
'\t\n',
'# Number of Channel per BoB: 1,2. \n',
'-num_channels_per_bob 1\t\n',
'\n',
'# First Metric for ordering different design points\t\n',
'-first metric "Cost"\n',
'#-first metric "Bandwidth"\n',
'#-first metric "Energy"\n',
'\t\n',
'# Second Metric for ordering different design points\t\n',
'#-second metric "Cost"\n',
'-second metric "Bandwidth"\n',
'#-second metric "Energy"\n',
'\n',
'# Third Metric for ordering different design points\t\n',
'#-third metric "Cost"\n',
'#-third metric "Bandwidth"\n',
'-third metric "Energy"\t\n',
'\t\n',
'\t\n',
'# Possible DIMM option to consider\n',
'#-DIMM model "JUST_UDIMM"\n',
'#-DIMM model "JUST_RDIMM"\n',
'#-DIMM model "JUST_LRDIMM"\n',
'-DIMM model "ALL"\n',
'\n',
'#if channels of each bob have the same configurations\n',
'#-mirror_in_bob "T"\n',
'-mirror_in_bob "F"\n',
'\n',
'#if we want to see all channels/bobs/memory configurations explored\t\n',
'#-verbose "T"\n',
'#-verbose "F"\n',
'\n',
'=======USER DEFINE======= \n']
self.config_options = {}
self.config_options['cache_size'] = {'string': '-size (bytes) ',
'option': [64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768,
65536, 131072, 262144, 524288, 1048576, 2097152, 4194304,
8388608, 16777216, 33554432, 134217728, 67108864,
1073741824],
'default': 64}
self.config_options['line_size'] = {'string': '-block size (bytes) ',
'option': [8, 16, 24],
'default': 64}
# Unit for IO_bus_width is bit.
self.config_options['IO_bus_width'] = {'string': '-output/input bus width ',
'option': [4, 8, 16, 24, 32, 64, 128],
'default': 128}
self.config_options['associativity'] = {'string': '-associativity ',
'option': [0, 1, 2, 4],
'default': 1}
self.config_options['rd_wr_port'] = {'string': '-read-write port ',
'option': [0, 1, 2, 3, 4],
'default': 0}
self.config_options['ex_rd_port'] = {'string': '-exclusive read port ',
'option': [0, 1, 2, 3, 4],
'default': 2}
self.config_options['ex_wr_port'] = {'string': '-exclusive write port ',
'option': [0, 1, 2, 3, 4],
'default': 2}
self.config_options['single_rd_port'] = {'string': '-single ended read ports ',
'option': [0, 1, 2, 3, 4],
'default': 0}
self.config_options['bank_count'] = {'string': '-UCA bank count ',
'option': [1, 2, 4, 8, 16],
'default': 1}
self.config_options['technology'] = {'string': '-technology (u) ',
'option': [0.022, 0.028, 0.040, 0.032, 0.065, 0.090],
'default': 0.090}
self.config_options['mem_type'] = {'string': '-cache type ',
'option': ['"cache"', '"ram"', '"main memory"'],
'default': '"ram"'}
return
def change_default_value(self, name_list, new_value_list):
for idx, name in enumerate(name_list):
self.config_options[name]['default'] = new_value_list[idx]
def write_config(self, user_config, path):
f = open(path, "w+")
f.write(''.join(self.baseline_config))
f.write(''.join(user_config))
f.close()
def call_cacti(self, cacti_master_path, self_gen_cfg_path):
# os.system('./cacti -infile ./self_gen/cache.cfg')
print('##########################################################################################')
original_cwd = os.getcwd()
# Change the directory to the cacti master directory as using absolute paths yields a "Segmentation fault"
os.chdir(cacti_master_path)
common_path = os.path.commonpath([cacti_master_path, self_gen_cfg_path])
if common_path != cacti_master_path:
raise NotImplementedError("Config path for cacti should be inside cacti_master folder.")
self_gen_cfg_path_relative = f"./{os.path.relpath(self_gen_cfg_path, start=cacti_master_path)}"
cacti_cmd = f'./cacti -infile {self_gen_cfg_path_relative}'
stream = os.popen(cacti_cmd)
output = stream.readlines()
for l in output:
print(l, end = '')
# Change back to the original working directory
os.chdir(original_cwd)
return output
def cacti_auto(self, user_input, cacti_master_path, self_gen_cfg_path):
'''
user_input format can be 1 out of these 3:
user_input = ['default']
user_input = ['single', [['mem_type', 'technology', ...], ['"ram"', 0.028, ...]]
user_input = ['sweep', ['IO_bus_width'/'']]
'''
print(f"{self_gen_cfg_path=}")
user_config = []
if user_input[0] == 'default':
for itm in self.config_options.keys():
user_config.append(self.config_options[itm]['string'] + str(self.config_options[itm]['default']) + '\n')
self.write_config(user_config, self_gen_cfg_path)
self.call_cacti(cacti_master_path, self_gen_cfg_path)
if user_input[0] == 'single':
for itm in self.config_options.keys():
if itm in user_input[1][0]:
ii = user_input[1][0].index(itm)
user_config.append(self.config_options[itm]['string'] + str(user_input[1][1][ii]) + '\n')
else:
user_config.append(self.config_options[itm]['string'] + str(self.config_options[itm]['default']) + '\n')
self.write_config(user_config, self_gen_cfg_path)
self.call_cacti(cacti_master_path, self_gen_cfg_path)
if user_input[0] == 'sweep':
# produce non-sweeping term
common_part = []
for itm in self.config_options.keys():
if itm not in user_input[1]:
common_part.append(self.config_options[itm]['string'] + str(self.config_options[itm]['default']) + '\n')
for itm in user_input[1]:
for va in self.config_options[itm]['option']:
user_config.append([self.config_options[itm]['string'] + str(va) + '\n'])
for ii in range(len(user_config)):
user_config[ii] += common_part
for ii in range(len(user_config)):
self.write_config(user_config[ii], self_gen_cfg_path)
self.call_cacti(cacti_master_path, self_gen_cfg_path) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/cacti/cacti_master/cacti_config_creator.py | cacti_config_creator.py |
-----------------------------------------------------------
____ __ ________ __
/\ _`\ /\ \__ __ /\_____ \ /'__`\
\ \ \/\_\ __ ___\ \ ,_\/\_\ \/___//'/'/\ \/\ \
\ \ \/_/_ /'__`\ /'___\ \ \/\/\ \ /' /' \ \ \ \ \
\ \ \L\ \/\ \L\.\_/\ \__/\ \ \_\ \ \ /' /'__ \ \ \_\ \
\ \____/\ \__/.\_\ \____\\ \__\\ \_\ /\_/ /\_\ \ \____/
\/___/ \/__/\/_/\/____/ \/__/ \/_/ \// \/_/ \/___/
A Tool to Model Caches/Memories, 3D stacking, and off-chip IO
-----------------------------------------------------------
CACTI is an analytical tool that takes a set of cache/memory para-
meters as input and calculates its access time, power, cycle
time, and area.
CACTI was originally developed by Dr. Jouppi and Dr. Wilton
in 1993 and since then it has undergone six major
revisions.
List of features (version 1-7):
===============================
The following is the list of features supported by the tool.
* Power, delay, area, and cycle time model for
direct mapped caches
set-associative caches
fully associative caches
Embedded DRAM memories
Commodity DRAM memories
* Support for modeling multi-ported uniform cache access (UCA)
and multi-banked, multi-ported non-uniform cache access (NUCA).
* Leakage power calculation that also considers the operating
temperature of the cache.
* Router power model.
* Interconnect model with different delay, power, and area
properties including low-swing wire model.
* An interface to perform trade-off analysis involving power, delay,
area, and bandwidth.
* All process specific values used by the tool are obtained
from ITRS and currently, the tool supports 90nm, 65nm, 45nm,
and 32nm technology nodes.
* Chip IO model to calculate latency and energy for DDR bus. Users can model
different loads (fan-outs) and evaluate the impact on frequency and energy.
This model can be used to study LR-DIMMs, R-DIMMs, etc.
Version 7.0 is derived from 6.5 and merged with CACTI 3D.
It has many new additions apart from code refinements and
bug fixes: new IO model, 3D memory model, and power gating models.
Ref: CACTI-IO: CACTI With OFF-chip Power-Area-Timing Models
MemCAD: An Interconnect Exploratory Tool for Innovative Memories Beyond DDR4
CACTI-3DD: Architecture-level modeling for 3D die-stacked DRAM main memory
--------------------------------------------------------------------------
Version 6.5 has a new c++ code base and includes numerous bug fixes.
CACTI 5.3 and 6.0 activate an entire row of mats to read/write a single
block of data. This technique improves reliability at the cost of
power. CACTI 6.5 activates minimum number of mats just enough to retrieve
a block to minimize power.
How to use the tool?
====================
Prior versions of CACTI take input parameters such as cache
size and technology node as a set of command line arguments.
To avoid a long list of command line arguments,
CACTI 6.5 & & let users specify their cache model in a more
detailed manner by using a config file (cache.cfg).
-> define the cache model using cache.cfg
-> run the "cacti" binary <./cacti -infile cache.cfg>
CACTI also provides a command line interface similar to earlier versions. The command line interface can be used as
./cacti cache_size line_size associativity rw_ports excl_read_ports excl_write_ports
single_ended_read_ports search_ports banks tech_node output_width specific_tag tag_width
access_mode cache main_mem obj_func_delay obj_func_dynamic_power obj_func_leakage_power
obj_func_cycle_time obj_func_area dev_func_delay dev_func_dynamic_power dev_func_leakage_power
dev_func_area dev_func_cycle_time ed_ed2_none temp wt data_arr_ram_cell_tech_flavor_in
data_arr_peri_global_tech_flavor_in tag_arr_ram_cell_tech_flavor_in tag_arr_peri_global_tech_flavor_in
interconnect_projection_type_in wire_inside_mat_type_in wire_outside_mat_type_in
REPEATERS_IN_HTREE_SEGMENTS_in VERTICAL_HTREE_WIRES_OVER_THE_ARRAY_in
BROADCAST_ADDR_DATAIN_OVER_VERTICAL_HTREES_in PAGE_SIZE_BITS_in BURST_LENGTH_in
INTERNAL_PREFETCH_WIDTH_in force_wiretype wiretype force_config ndwl ndbl nspd ndcm
ndsam1 ndsam2 ecc
For complete documentation of the tool, please refer
to the following publications and reports.
CACTI-5.3 & 6 reports - Details on Meory/cache organizations and tradeoffs.
Latency/Energy tradeoffs for large caches and NUCA design:
"Optimizing NUCA Organizations and Wiring Alternatives for Large Caches With CACTI 6.0", that appears in MICRO 2007.
Memory IO design: CACTI-IO: CACTI With OFF-chip Power-Area-Timing Models,
MemCAD: An Interconnect Exploratory Tool for Innovative Memories Beyond DDR4
CACTI-IO Technical Report - http://www.hpl.hp.com/techreports/2013/HPL-2013-79.pdf
3D model:
CACTI-3DD: Architecture-level modeling for 3D die-stacked DRAM main memory
We are still improving the tool and refining the code. If you
have any comments, questions, or suggestions please write to
us.
Naveen Muralimanohar
[email protected]
Ali Shafiee
[email protected]
Vaishnav Srinivas
[email protected] | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/classes/cacti/cacti_master/README | README |
from typing import Dict, List, Tuple
from typing import TYPE_CHECKING
from collections import defaultdict
import matplotlib.pyplot as plt
from matplotlib.colors import hsv_to_rgb
import numpy as np
from zigzag.classes.mapping.combined_mapping import FourWayDataMoving
from zigzag.classes.cost_model.cost_model import CostModelEvaluation
# MPL FONT SIZES
SMALLEST_SIZE = 10
SMALLER_SIZE = 12
SMALL_SIZE = 14
MEDIUM_SIZE = 16
BIG_SIZE = 18
BIGGER_SIZE = 20
plt.rc("font", size=SMALLEST_SIZE) # controls default text sizes
plt.rc("axes", titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc("axes", labelsize=BIGGER_SIZE) # fontsize of the x and y labels
plt.rc("xtick", labelsize=SMALLER_SIZE) # fontsize of the tick labels
plt.rc("ytick", labelsize=SMALLER_SIZE) # fontsize of the tick labels
plt.rc("legend", fontsize=SMALL_SIZE) # legend fontsize
plt.rc("figure", titlesize=MEDIUM_SIZE) # fontsize of the figure title
def bar_plot_cost_model_evaluations_total(
cmes: List[CostModelEvaluation],
labels,
save_path: str = "plot.png",
):
"""Plot total energy and latency of each cost model evaluation in a bar chart.
Args:
cmes (List[CostModelEvaluation]): List of CostModelEvaluations to compare.
save_path (str): Path to save the plot to.
"""
assert len(cmes) == len(
labels
), "Please match a label for each cost model evaluation."
energies = [cme.energy_total for cme in cmes]
latencies = [cme.latency_total2 for cme in cmes]
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
colormap = plt.get_cmap("Set1")
color_energy = colormap.colors[0]
color_latency = colormap.colors[1]
h1 = rects1 = ax1.bar(
x - width / 2, energies, width, label="Energy", color=color_energy
)
h2 = rects2 = ax2.bar(
x + width / 2, latencies, width, label="Latency", color=color_latency
)
# Add some text for labels, title and custom x-axis tick labels, etc.
ax1.set_ylabel("Energy [pJ]", fontsize=15)
ax2.set_ylabel("Latency [cycle]", fontsize=15)
ax1.set_xticks(x, labels)
handles1, labels1 = ax1.get_legend_handles_labels()
handles2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(
handles1 + handles2,
labels1 + labels2,
bbox_to_anchor=(0.5, 1.035),
loc="lower center",
borderaxespad=0,
ncol=2,
)
ax1.bar_label(rects1, padding=3, fmt="%.1e")
ax2.bar_label(rects2, padding=3, fmt="%.1e")
# ax1.figure.texts.append(ax1.texts.pop())
# ax2.figure.texts.append(ax2.texts.pop())
# ax1.set_title(fig_title)
fig.tight_layout()
plt.savefig(save_path)
def bar_plot_cost_model_evaluations_breakdown(
cmes: List[CostModelEvaluation], save_path: str, xtick_rotation=90
):
memory_word_access_summed = {
d: defaultdict(lambda: defaultdict(lambda: FourWayDataMoving(0, 0, 0, 0)))
for d in range(len(cmes))
}
mac_costs = defaultdict(lambda: 0)
memory_instances = {}
la_break_down = {
d: {
"Ideal computation": 0,
"Spatial stall": 0,
"Temporal stall": 0,
"Data loading": 0,
"Data off-loading": 0,
}
for d in range(len(cmes))
}
la_tot = {d: 0 for d in range(len(cmes))}
for d, cme in enumerate(cmes):
mh = cme.accelerator.get_core(cme.layer.core_allocation).memory_hierarchy
mac_costs[d] = cme.MAC_energy
la_break_down[d]["Ideal computation"] = cme.ideal_cycle
la_break_down[d]["Spatial stall"] = cme.ideal_temporal_cycle - cme.ideal_cycle
la_break_down[d]["Temporal stall"] = (
cme.latency_total0 - cme.ideal_temporal_cycle
)
la_break_down[d]["Data loading"] = cme.latency_total1 - cme.latency_total0
la_break_down[d]["Data off-loading"] = cme.latency_total2 - cme.latency_total1
la_tot[d] = cme.latency_total2
for operand in cme.energy_breakdown_further:
mem_op = cme.layer.memory_operand_links[operand]
operand_memory_levels = mh.get_memory_levels(mem_op)
for j in range(len(cme.energy_breakdown_further[operand])):
mem = operand_memory_levels[j].name
memory_instances[mem] = operand_memory_levels[j]
memory_word_access_summed[d][operand][
mem
] += cme.energy_breakdown_further[operand][j]
all_mems = set()
for v in memory_word_access_summed.values():
for vv in v.values():
for vvv in vv.keys():
all_mems.add(vvv)
all_mems = sorted(
list(all_mems), key=lambda m: memory_instances[m].memory_instance.size
)
all_ops = set()
for v in memory_word_access_summed.values():
for vv in v.keys():
all_ops.add(vv)
all_ops = sorted(list(all_ops))
""" plotting start """
""" Energy part """
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(10, 8))
hues = np.linspace(0, 1, len(all_ops) + 1)[:-1]
hatches = ["////", "\\\\\\\\", "xxxx", "++++"]
x = 0
xticks = {}
for d, cme in enumerate(cmes):
total_energy = 0
startx_of_layer = x
# mac
ax1.bar([x], [mac_costs[d]], width=1, bottom=0, facecolor="k")
total_energy += mac_costs[d]
highest_bar = mac_costs[d]
xticks[x] = "MAC"
x += 1
# mems
for mem in all_mems:
bottom = 0
for op_i, operand in enumerate(all_ops):
for dir_i, dir in enumerate(memory_word_access_summed[d][operand][mem]):
height = memory_word_access_summed[d][operand][mem][dir]
ax1.bar(
[x],
[height],
width=1,
bottom=[bottom],
facecolor=hsv_to_rgb((hues[op_i], 1, 1)),
hatch=hatches[dir_i],
)
bottom += height
xticks[x] = mem
total_energy += bottom
x += 1
highest_bar = max(bottom, highest_bar)
x
ax1.text(
x * 0.5 + startx_of_layer * 0.5,
1.05 * highest_bar,
"tot:{:,d}".format(int(total_energy)),
horizontalalignment="center",
verticalalignment="bottom",
weight="bold",
)
x += len(all_mems) / 4
for op, h in zip(all_ops, hues):
ax1.bar(0, 0, width=1, facecolor=hsv_to_rgb((h, 1, 1)), label=op)
for dir_i, dir in enumerate(memory_word_access_summed[d][operand][mem]):
ax1.bar(
[0],
[0],
width=1,
bottom=0,
facecolor=(1, 1, 1),
hatch=hatches[dir_i],
label=dir,
)
ax1.legend(loc="upper left")
ax1.set_xticks(list(xticks.keys()), list(xticks.values()), rotation=xtick_rotation)
ax1.set_ylim(0, 1.1 * ax1.get_ylim()[1])
ax1.set_ylabel("Energy (pJ)", fontsize=15)
""" Latency part """
x2 = list(range(len(la_break_down)))
y2 = {ky: [] for ky in la_break_down[0].keys()}
for _, design_point in la_break_down.items():
for ky, val in design_point.items():
y2[ky].append(val)
hues = np.linspace(0, 1, len(y2) + 1)[:-1]
for idx, (ky, va) in enumerate(y2.items()):
if idx == 0:
ax2.bar(
np.array(x2),
va,
width=0.4,
color=hsv_to_rgb((hues[idx], 1, 1)),
label=ky,
)
li_pre = va
else:
ax2.bar(
np.array(x2),
va,
width=0.4,
color=hsv_to_rgb((hues[idx], 1, 1)),
label=ky,
bottom=li_pre,
)
li_pre = [x + y for x, y in zip(li_pre, va)]
for x in x2:
ax2.text(
x,
la_tot[x] * 1.05,
"tot:{:,d}".format(int(la_tot[x])),
horizontalalignment="center",
verticalalignment="bottom",
weight="bold",
)
ax2.legend()
ax2.set_xticks(x2, x2, rotation=xtick_rotation)
ax2.set_ylim(0, 1.1 * ax2.get_ylim()[1])
ax2.set_xlabel("Layers", fontsize=15)
ax2.set_ylabel("Latency (cycle)", fontsize=15)
fig.tight_layout()
plt.savefig(save_path)
if __name__ == "__main__":
import pickle
with open("../list_of_cmes.pickle", "rb") as handle:
list_of_cme = pickle.load(handle)
bar_plot_cost_model_evaluations_breakdown(list_of_cme, "plot.png") | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/visualization/results/plot_cme.py | plot_cme.py |
from copy import deepcopy
def create_printing_block(row, col):
return [[" "] * col for _ in range(row)]
def modify_printing_block(old_block, start_row, start_col, new_str):
new_block = deepcopy(old_block)
new_block[start_row][start_col : start_col + len(new_str)] = new_str
return new_block
def print_printing_block(printing_block):
print()
for i in range(len(printing_block)):
print("".join(printing_block[i]))
def print_good_tm_format(tm, mem_name, cme_name, mem_op_to_layer_op):
op_list = list(tm.keys())
tm_list = [tp for li in tm[op_list[0]] for tp in li]
# get required interval between operands (e.g., 'W', 'I', 'O'), based on actual mem name length
max_mem_name_len = 0
for operand in op_list:
for lv in range(len(mem_name[operand])):
if len(mem_name[operand][lv]) > max_mem_name_len:
max_mem_name_len = len(mem_name[operand][lv])
interval = max_mem_name_len + 10
tot_row = 2 * (len(tm_list) + 1) + 8
tot_col = int(2 * (len(tm_list) + 1) + 3.75 * interval)
tot_col_cut = 2 * (len(tm_list) + 1) + interval
tm_block = create_printing_block(tot_row, tot_col)
title = f" Temporal Mapping - {cme_name} "
dash = "*" * int((tot_col - len(title)) / 2)
tm_block = modify_printing_block(tm_block, 1, 0, dash + title + dash)
i = 2
for op in mem_op_to_layer_op.keys():
tm_block = modify_printing_block(
tm_block, i, 1, f"{op} ({mem_op_to_layer_op[op]}): " + str(tm[op])
)
i += 1
tm_block = modify_printing_block(tm_block, 6, 0, "-" * tot_col)
tm_block = modify_printing_block(tm_block, 7, 1, "Temporal Loops")
tm_block = modify_printing_block(tm_block, 8, 0, "-" * tot_col)
finish_row = 2 * len(tm_list) + 7
for i, li in enumerate(tm_list):
tm_block = modify_printing_block(
tm_block,
finish_row - 2 * i,
len(tm_list) - i,
"for " + str(li[0]) + " in " + "[0:" + str(li[1]) + ")",
)
tm_block = modify_printing_block(
tm_block, 2 * (i + 1) + 1 + 7, 0, "-" * tot_col
)
# print mem name to each level
for idx, operand in enumerate(op_list):
column_position = tot_col_cut + idx * interval
tm_block = modify_printing_block(tm_block, 7, column_position, operand)
i = 0
for level, lv_li in enumerate(tm[operand]):
for _ in lv_li:
tm_block = modify_printing_block(
tm_block,
finish_row - 2 * i,
column_position,
str(mem_name[operand][level]),
)
i += 1
# tm_block = modify_printing_block(tm_block, finish_row + 2, 1,
# "(Notes: Temporal Mapping starts from the innermost memory level. MAC level is out of Temporal Mapping's scope.)")
print_printing_block(tm_block)
def print_mapping(cme):
tm = cme.temporal_mapping.mapping_dic_stationary
mem_op_to_layer_op = cme.mem_op_to_layer_op
layer_op_to_mem_op = cme.layer_op_to_mem_op
mem_name = {}
for (mem_op, mems_all_levels) in cme.accelerator.cores[
0
].mem_hierarchy_dict.items():
layer_op = mem_op_to_layer_op[mem_op]
mem_name[layer_op] = []
for mem_a_level in mems_all_levels:
mem_name[layer_op].append(mem_a_level.name)
assert len(tm[layer_op]) == len(
mem_name[layer_op]
), f"Temporal mapping level {len(tm[layer_op])} and memory hierarchy level {len(mem_name[layer_op])} of operand {layer_op} do not match."
cme_name = str(cme)
print_good_tm_format(tm, mem_name, cme_name, layer_op_to_mem_op)
if __name__ == "__main__":
import pickle
with open("../list_of_cmes.pickle", "rb") as handle:
list_of_cme = pickle.load(handle)
for cme in list_of_cme:
print_mapping(cme) | zigzag-dse | /zigzag_dse-2.4.2-py3-none-any.whl/zigzag/visualization/results/print_mapping.py | print_mapping.py |

[](https://badge.fury.io/py/ZigZag)
[](https://github.com/jbn/ZigZag/stargazers)
[](https://github.com/jbn/ZigZag/blob/main/LICENSE.txt)

# ZigZag
ZigZag provides functions for identifying the peaks and valleys of a time
series. Additionally, it provides a function for computing the maximum drawdown.
For fastest understanding, `view the IPython notebook demo tutorial <https://github.com/jbn/ZigZag/blob/master/zigzag_demo.ipynb>`_.
## Contributing
------------
This is an admittedly small project. Still, if you have any contributions,
please `fork this project on github <https://github.com/jbn/ZigZag>`_ and
send me a pull request.
| zigzag | /zigzag-0.3.0.tar.gz/zigzag-0.3.0/README.md | README.md |
# Installation
## Pre-Requisites
- Debian 11
- python3.10
- pip3.10
- cargo-update
## Install python3.10
1. Ensure that your system is updated and the required packages installed.
`sudo apt update && sudo apt upgrade -y`
1. Install the required dependencies:
`sudo apt install build-essential zlib1g-dev libncurses5-dev libgdbm-dev libnss3-dev libssl-dev libreadline-dev libffi-dev libsqlite3-dev wget libbz2-dev`
1. get python3.10 tar.gz
`wget https://www.python.org/ftp/python/3.10.0/Python-3.10.0.tgz`
`tar -xf Python-3.10.*.tgz`
`cd Python-3.10.*/`
`./configure --enable-optimizations`
`make -j4`
`sudo make altinstall`
## **Install Cargo and Solc prerequisites**
1. `sudo apt install lsb-release wget software-properties-common gnupg pkg-config libssl-dev build-essential cmake git libboost-all-dev libjsoncpp-dev jq`
## Install rust
1. `wget -O rustup.sh https://sh.rustup.rs`
2. `bash rustup.sh -y`
3. `source "$HOME/.cargo/env"`
## Install cargo-update
`cargo install cargo-update`
## Install ziion cli
`sudo pip3.10 install ziion`
## Upgrade ziion cli
`sudo pip3.10 install ziion -U`
# How to use it
## ARM/AMD
`ziion --help`: Show help message and exit.
`ziion --version`: Show version message and exit.
`ziion list-metapackages`: List metapackages that can be updated with the cli and exit.
`ziion self-update`: Update ziion cli to latest version and exit.
## ARM
`ziion update [cargo|solc] [--dryrun]`:
- If cargo|solc packages are installed: Update cargo|solc packages to the latest version if needed.
- If cargo|solc packages are not installed: Install latest version of cargo|solc packages.
`ziion solc-select [version] [--install]`:
- Changes solc's current version to be used.
- if --install is provided the installation of the specified version is forced.
`ziion solc-select versions`: Shows installed version and the one which is currently in use.
## AMD
`ziion update [cargo] [--dryrun]`:
- If cargo packages are installed: Update cargo packages to the latest version if needed.
- If cargo packages are not installed: Install latest version of cargo packages.
| ziion | /ziion-1.0.8.tar.gz/ziion-1.0.8/README.md | README.md |
import os
import sys
import shutil
import gzip
import urllib.request
import subprocess
import platform
from packaging import version
import ziion_cli.dispatcher
import ziion_cli.utils
from ziion_cli.constants import (
CARGO_ARTIFACTS_DIR,
CARGO_DIR,
S3_BUCKET_URL,
CARGO_AMD_FOLDER_S3,
CARGO_ARM_FOLDER_S3
)
def installed_versions():
output = subprocess.check_output(["cargo", "install", "--list"])
return ziion_cli.utils.parse_str_to_dict(output.decode("utf-8"))
def list_packages_to_be_updated(s3_packages_list, local_packages_list):
packages_to_update = []
print("\n{:<30} {:<15} {:<15} {:<18}".format(
'Package', 'Installed', 'Latest', 'Need update'))
print("-"*75)
for package in s3_packages_list:
if package not in local_packages_list:
print("{:<30} {:<15} {:<18} {:<15}".format(
package, " No ", s3_packages_list[package], " No "))
packages_to_update.append(package)
elif (package in local_packages_list) and (version.parse(s3_packages_list[package]) == version.parse(local_packages_list[package])):
print("{:<30} {:<15} {:<18} {:<15}".format(
package, local_packages_list[package], s3_packages_list[package], " No "))
elif version.parse(s3_packages_list[package]) > version.parse(local_packages_list[package]):
print("{:<30} {:<15} {:<18} {:<15}".format(
package, local_packages_list[package], s3_packages_list[package], " Yes "))
packages_to_update.append(package)
print("\n")
return packages_to_update
def update_necessary_packages(s3_packages_list, local_packages_list):
packages = list_packages_to_be_updated(
s3_packages_list, local_packages_list)
if platform.machine() == 'x86_64':
s3_folder = CARGO_AMD_FOLDER_S3
elif platform.machine() == 'aarch64':
s3_folder = CARGO_ARM_FOLDER_S3
for package in packages:
url = S3_BUCKET_URL + s3_folder + package + ".gz"
try:
urllib.request.urlretrieve(url, "/tmp/" + package + ".gz")
except urllib.error.HTTPError as e:
print("ERROR:" + package +
" could not be updated correctly. " + e.reason)
sys.exit()
with gzip.open("/tmp/" + package + ".gz", "rb") as f_in, open(CARGO_ARTIFACTS_DIR.joinpath(package), "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove("/tmp/" + package + ".gz")
os.chmod(CARGO_ARTIFACTS_DIR.joinpath(package), 0o755)
print(package + " updated successfully.")
if packages:
print("Download .crates.toml")
url = S3_BUCKET_URL + s3_folder + ".crates.toml"
try:
urllib.request.urlretrieve(url, str(CARGO_DIR) + "/.crates.toml")
except urllib.error.HTTPError as e:
print("ERROR: .crates.toml could not be updated correctly. " + e.reason)
sys.exit()
print("\n") | ziion | /ziion-1.0.8.tar.gz/ziion-1.0.8/ziion_cli/cargo.py | cargo.py |
import os
from pathlib import Path
import urllib.request
import argparse
import gzip
import shutil
import ziion_cli.utils
from ziion_cli.constants import (
SOLC_SELECT_DIR,
SOLC_ARTIFACTS_DIR,
S3_BUCKET_URL,
SOLC_ARM_FOLDER_S3
)
def switch_global_version(version: str, always_install: bool) -> None:
if version in installed_versions():
with open(f"{SOLC_SELECT_DIR}/global-version", "w", encoding="utf-8") as f:
f.write(version)
print("Switched global version to", version)
elif version in ziion_cli.utils.get_metadata_versions("solc"):
if always_install:
install_artifacts([version])
switch_global_version(version, always_install)
else:
print(
f"ziion-cli solc-select error: '{version}' must be installed prior to use.")
else:
print(f"ziion-cli solc-select error: Unknown version '{version}'")
def installed_versions() -> list[str]:
try:
return [
f.replace("solc-", "") for f in sorted(os.listdir(SOLC_ARTIFACTS_DIR)) if f.startswith("solc-")
]
except OSError as e:
print(f"Unable to open file: {e}")
return []
def install_artifacts(versions: list[str]) -> bool:
#releases = utils.get_metadata_versions("solc")
for version in versions:
if "all" not in versions:
if versions and version not in versions:
continue
url = S3_BUCKET_URL + SOLC_ARM_FOLDER_S3 + "solc-v" + version + ".gz"
print(f"Installing '{version}'...")
try:
urllib.request.urlretrieve(url, f"/tmp/solc-{version}.gz")
except urllib.error.HTTPError as e:
print(e.reason)
with gzip.open(f"/tmp/solc-{version}.gz", "rb") as f_in, open(SOLC_ARTIFACTS_DIR.joinpath(f"solc-{version}"), "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(f"/tmp/solc-{version}.gz")
with open(f"{SOLC_SELECT_DIR}/global-version", "w+", encoding="utf-8") as f:
f.write(version)
# verify_checksum(version)
Path.chmod(SOLC_ARTIFACTS_DIR.joinpath(f"solc-{version}"), 0o775)
print(f"Version '{version}' installed and configured as default.\n")
return True
def current_version():
version = os.environ.get("SOLC_VERSION")
source = "SOLC_VERSION"
if version:
if version not in installed_versions():
raise argparse.ArgumentTypeError(
f"Version '{version}' not installed (set by {source}). Run `solc-select install {version}`."
)
else:
source = SOLC_SELECT_DIR.joinpath("global-version")
if Path.is_file(source):
with open(source, encoding="utf-8") as f:
version = f.read()
else:
raise argparse.ArgumentTypeError(
"No solc version set. Run `solc-select use VERSION` or set SOLC_VERSION environment variable."
)
return version, source | ziion | /ziion-1.0.8.tar.gz/ziion-1.0.8/ziion_cli/solc_select.py | solc_select.py |
import subprocess
import sys
import ziion_cli.cargo
import ziion_cli.solc_select
import ziion_cli.utils
import ziion_cli.ziion
from ziion_cli.constants import (
SOLC_SELECT_DIR,
SOLC_ARTIFACTS_DIR
)
def update_packages(metapackage, dryrun):
match metapackage:
case "cargo":
s3_packages_list = ziion_cli.utils.get_metadata_versions(
metapackage)
local_packages_list = ziion_cli.cargo.installed_versions()
if dryrun:
ziion_cli.cargo.list_packages_to_be_updated(
s3_packages_list, local_packages_list)
else:
ziion_cli.cargo.update_necessary_packages(
s3_packages_list, local_packages_list)
case "solc":
s3_packages_list = ziion_cli.utils.get_metadata_versions(
metapackage)
local_packages_list = ziion_cli.solc_select.installed_versions()
missing_artifacts = []
for i in s3_packages_list:
if i not in local_packages_list:
missing_artifacts.append(i)
if dryrun:
print("These versions can be installed: ")
for version in missing_artifacts:
print("- " + version)
elif not dryrun and missing_artifacts != []:
ziion_cli.solc_select.install_artifacts(missing_artifacts)
else:
print("Solc artifacts are up to date!")
def solc_select_imp(version, install='False'):
ziion_cli.solc_select.switch_global_version(version, install)
def solc_select_get_versions():
try:
with open(f"{SOLC_SELECT_DIR}/global-version", "r", encoding="utf-8") as f:
current_version = f.read()
for i in ziion_cli.solc_select.installed_versions():
if current_version == i:
print(i + " (current, set by " +
str(SOLC_SELECT_DIR) + "/global-version)")
else:
print(i)
except FileNotFoundError:
print(
"No solc version selected for current usage. Use ziion solc-select [Version] first.")
def solc_imp():
res = ziion_cli.solc_select.current_version()
if res:
(version, _) = res
path = SOLC_ARTIFACTS_DIR.joinpath(f"solc-{version}")
try:
subprocess.run(
[str(path)] + sys.argv[1:],
check=True,
)
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
else:
sys.exit(1)
def update_cli():
ziion_cli.ziion.self_update() | ziion | /ziion-1.0.8.tar.gz/ziion-1.0.8/ziion_cli/dispatcher.py | dispatcher.py |
===============================================
python-zijinlib
===============================================
Zijinlib is a common python package written by me. Every project in this repo will import modules from this package.
Description
================
Breif introductions.
### zj.file
Some methods of processing the file.
#### zj.file.sort_suffix(path, suffix)
在给定path中选出指定后缀名的文件,返回这些文件的绝对路径的数组。
#### zj.file.detect_damaged_pictures(filelist)
检测给定的图片路径列表中是否有损坏的图片。
#### zj.file.delete_from_list(dellist)
删除给定文件路径列表中的文件。
#### zj.file.search_two_suffixes(sourcelist, targetlist)
在给定的两组不同后缀名的文件路径列表中选出没有成对后缀的文件,返回这些文件的绝对路径的数组。
### zj.mail
Some methods of sending emails via python.
#### zj.mail.send_email(emailaddr, content, subject)
向给定emailaddr发送邮件,返回发送结果。
Installation
=======
So before you use, please run the code below in the root directory.
`pip install git+https://github.com/yuanyuanzijin/python-zijinlib`
or
`python setup.py install`
Since this project is in a period of rapid development, please confirm the current version of your package when using this project. We recommend that you run the above instructions to update your package when you update this repo.
Usage
=============
The document will be written soon. Before it has been written, please refer the examples in the projects folder.
The project folder contains several examlpes of using this python package. For specific instructions, see the various folders. Each folder is a script project. | zijinlib | /zijinlib-0.12.1.tar.gz/zijinlib-0.12.1/README.rst | README.rst |
import time
from zik.client.external_task_client import ExternalTaskClient, ENGINE_LOCAL_BASE_URL
from zik.external_task.external_task import ExternalTask
from zik.external_task.external_task_executor import ExternalTaskExecutor
from zik.utils.log_utils import log_with_context
from zik.utils.auth_basic import obfuscate_password
from zik.utils.utils import get_exception_detail
class ExternalTaskWorker:
DEFAULT_SLEEP_SECONDS = 300
def __init__(self, worker_id, base_url=ENGINE_LOCAL_BASE_URL, config=None):
config = config if config is not None else {} # To avoid to have a mutable default for a parameter
self.worker_id = worker_id
self.client = ExternalTaskClient(self.worker_id, base_url, config)
self.executor = ExternalTaskExecutor(self.worker_id, self.client)
self.config = config
self._log_with_context(f"Created new External Task Worker with config: {obfuscate_password(self.config)}")
def subscribe(self, topic_names, action, process_variables=None):
while True:
self._fetch_and_execute_safe(topic_names, action, process_variables)
self._log_with_context("Stopping worker") # Fixme: This code seems to be unreachable?
def _fetch_and_execute_safe(self, topic_names, action, process_variables=None):
try:
self.fetch_and_execute(topic_names, action, process_variables)
except NoExternalTaskFound:
self._log_with_context(f"no External Task found for Topics: {topic_names}, "
f"Process variables: {process_variables}", topic=topic_names)
except BaseException as e:
sleep_seconds = self._get_sleep_seconds()
self._log_with_context(f'error fetching and executing tasks: {get_exception_detail(e)} '
f'for topic(s)={topic_names} with Process variables: {process_variables}. '
f'retrying after {sleep_seconds} seconds', exc_info=True)
time.sleep(sleep_seconds)
def fetch_and_execute(self, topic_names, action, process_variables=None):
self._log_with_context(f"Fetching and Executing external tasks for Topics: {topic_names} "
f"with Process variables: {process_variables}")
resp_json = self._fetch_and_lock(topic_names, process_variables)
tasks = self._parse_response(resp_json, topic_names, process_variables)
if len(tasks) == 0:
raise NoExternalTaskFound(f"no External Task found for Topics: {topic_names}, "
f"Process variables: {process_variables}")
self._execute_tasks(tasks, action)
def _fetch_and_lock(self, topic_names, process_variables=None):
self._log_with_context(f"Fetching and Locking external tasks for Topics: {topic_names} "
f"with Process variables: {process_variables}")
return self.client.fetch_and_lock(topic_names, process_variables)
def _parse_response(self, resp_json, topic_names, process_variables):
tasks = []
if resp_json:
for context in resp_json:
task = ExternalTask(context)
tasks.append(task)
tasks_count = len(tasks)
self._log_with_context(f"{tasks_count} External task(s) found for "
f"Topics: {topic_names}, Process variables: {process_variables}")
return tasks
def _execute_tasks(self, tasks, action):
for task in tasks:
self._execute_task(task, action)
def _execute_task(self, task, action):
try:
self.executor.execute_task(task, action)
except Exception as e:
self._log_with_context(f'error when executing task: {get_exception_detail(e)}',
topic=task.get_topic_name(), task_id=task.get_task_id(),
log_level='error', exc_info=True)
raise e
def _log_with_context(self, msg, topic=None, task_id=None, log_level='info', **kwargs):
context = {"WORKER_ID": str(self.worker_id), "TOPIC": topic, "TASK_ID": task_id}
log_with_context(msg, context=context, log_level=log_level, **kwargs)
def _get_sleep_seconds(self):
return self.config.get("sleepSeconds", self.DEFAULT_SLEEP_SECONDS)
class NoExternalTaskFound(Exception):
pass | zik-client | /zik-client-0.0.3.tar.gz/zik-client-0.0.3/zik/external_task/external_task_worker.py | external_task_worker.py |
import logging
from zik.utils.log_utils import log_with_context
logger = logging.getLogger(__name__)
class ExternalTaskExecutor:
def __init__(self, worker_id, external_task_client):
self.worker_id = worker_id
self.external_task_client = external_task_client
def execute_task(self, task, action):
topic = task.get_topic_name()
task_id = task.get_task_id()
self._log_with_context(f"Executing external task for Topic: {topic}", task_id=task_id)
task_result = action(task)
# in case task result is not set inside action function, set it in task here
task.set_task_result(task_result)
self._handle_task_result(task_result)
return task_result
def _handle_task_result(self, task_result):
task = task_result.get_task()
topic = task.get_topic_name()
task_id = task.get_task_id()
if task_result.is_success():
self._handle_task_success(task_id, task_result, topic)
elif task_result.is_bpmn_error():
self._handle_task_bpmn_error(task_id, task_result, topic)
elif task_result.is_failure():
self._handle_task_failure(task_id, task_result, topic)
else:
err_msg = f"task result for task_id={task_id} must be either complete/failure/BPMNError"
self._log_with_context(err_msg, task_id=task_id, log_level='warning')
raise Exception(err_msg)
def _strip_long_variables(self, variables):
"""remove value of complex variables for the dict"""
if not variables:
return variables
cleaned = {}
for k, v in variables.items():
if isinstance(v, dict) and v.get("type", "") in ("File", "Bytes"):
cleaned[k] = {**v, "value": "..."}
else:
cleaned[k] = v
return cleaned
def _handle_task_success(self, task_id, task_result, topic):
self._log_with_context(f"Marking task complete for Topic: {topic}", task_id)
if self.external_task_client.complete(task_id, task_result.global_variables, task_result.local_variables):
self._log_with_context(f"Marked task completed - Topic: {topic} "
f"global_variables: {self._strip_long_variables(task_result.global_variables)} "
f"local_variables: {self._strip_long_variables(task_result.local_variables)}", task_id)
else:
self._log_with_context(f"Not able to mark task completed - Topic: {topic} "
f"global_variables: {self._strip_long_variables(task_result.global_variables)} "
f"local_variables: {self._strip_long_variables(task_result.local_variables)}", task_id)
raise Exception(f"Not able to mark complete for task_id={task_id} "
f"for topic={topic}, worker_id={self.worker_id}")
def _handle_task_failure(self, task_id, task_result, topic):
self._log_with_context(f"Marking task failed - Topic: {topic} task_result: {task_result}", task_id)
if self.external_task_client.failure(task_id, task_result.error_message, task_result.error_details,
task_result.retries, task_result.retry_timeout):
self._log_with_context(f"Marked task failed - Topic: {topic} task_result: {task_result}", task_id)
else:
self._log_with_context(f"Not able to mark task failure - Topic: {topic}", task_id=task_id)
raise Exception(f"Not able to mark failure for task_id={task_id} "
f"for topic={topic}, worker_id={self.worker_id}")
def _handle_task_bpmn_error(self, task_id, task_result, topic):
bpmn_error_handled = self.external_task_client.bpmn_failure(task_id, task_result.bpmn_error_code,
task_result.error_message,
task_result.global_variables)
if bpmn_error_handled:
self._log_with_context(f"BPMN Error Handled: {bpmn_error_handled} "
f"Topic: {topic} task_result: {task_result}")
else:
self._log_with_context(f"Not able to mark BPMN error - Topic: {topic}", task_id=task_id)
raise Exception(f"Not able to mark BPMN Error for task_id={task_id} "
f"for topic={topic}, worker_id={self.worker_id}")
def _log_with_context(self, msg, task_id=None, log_level='info', **kwargs):
context = {"WORKER_ID": self.worker_id, "TASK_ID": task_id}
log_with_context(msg, context=context, log_level=log_level, **kwargs) | zik-client | /zik-client-0.0.3.tar.gz/zik-client-0.0.3/zik/external_task/external_task_executor.py | external_task_executor.py |
from zik.variables.properties import Properties
from zik.variables.variables import Variables
class ExternalTask:
def __init__(self, context):
self._context = context
self._variables = Variables(context.get("variables", {}))
self._task_result = TaskResult.empty_task_result(task=self)
self._extProperties = Properties(context.get("extensionProperties", {}))
def get_worker_id(self):
return self._context["workerId"]
def get_process_instance_id(self):
return self._context["processInstanceId"]
def get_variables(self):
return self._variables.to_dict()
def get_extension_properties(self) -> dict:
return self._extProperties.to_dict()
def get_task_id(self):
return self._context["id"]
def get_activity_id(self):
return self._context["activityId"]
def get_topic_name(self):
return self._context["topicName"]
def get_variable(self, variable_name, with_meta=False):
return self._variables.get_variable(variable_name, with_meta=with_meta)
def get_extension_property(self, property_name) -> str:
return self._extProperties.get_property(property_name)
def get_tenant_id(self):
return self._context.get("tenantId", None)
def get_business_key(self):
return self._context.get("businessKey", None)
def get_task_result(self):
return self._task_result
def set_task_result(self, task_result):
self._task_result = task_result
def complete(self, global_variables={}, local_variables={}):
self._task_result = TaskResult.success(self, global_variables, local_variables)
return self._task_result
def failure(self, error_message, error_details, max_retries, retry_timeout):
retries = self._calculate_retries(max_retries)
self._task_result = TaskResult.failure(
self,
error_message=error_message,
error_details=error_details,
retries=retries,
retry_timeout=retry_timeout,
)
return self._task_result
def _calculate_retries(self, max_retries):
retries = self._context.get("retries", None)
retries = int(retries - 1) if retries and retries >= 1 else max_retries
return retries
def bpmn_error(self, error_code, error_message, variables={}):
self._task_result = TaskResult.bpmn_error(
self,
error_code=error_code,
error_message=error_message,
variables=variables,
)
return self._task_result
def __str__(self):
return f"{self._context}"
class TaskResult:
def __init__(
self,
task,
success=False,
global_variables={},
local_variables={},
bpmn_error_code=None,
error_message=None,
error_details={},
retries=0,
retry_timeout=300000,
):
self.task = task
self.success_state = success
self.global_variables = global_variables
self.local_variables = local_variables
self.bpmn_error_code = bpmn_error_code
self.error_message = error_message
self.error_details = error_details
self.retries = retries
self.retry_timeout = retry_timeout
@classmethod
def success(cls, task, global_variables, local_variables={}):
return TaskResult(
task,
success=True,
global_variables=global_variables,
local_variables=local_variables,
)
@classmethod
def failure(cls, task, error_message, error_details, retries, retry_timeout):
return TaskResult(
task,
success=False,
error_message=error_message,
error_details=error_details,
retries=retries,
retry_timeout=retry_timeout,
)
@classmethod
def bpmn_error(cls, task, error_code, error_message, variables={}):
return TaskResult(
task,
success=False,
bpmn_error_code=error_code,
error_message=error_message,
global_variables=variables,
)
@classmethod
def empty_task_result(cls, task):
return TaskResult(task, success=False)
def is_success(self):
return (
self.success_state
and self.bpmn_error_code is None
and self.error_message is None
)
def is_failure(self):
return (
not self.success_state
and self.error_message is not None
and not self.is_bpmn_error()
)
def is_bpmn_error(self):
return not self.success_state and self.bpmn_error_code
def get_task(self):
return self.task
def __str__(self):
if self.is_success():
return f"success: task_id={self.task.get_task_id()}, global_variables={self.global_variables}, local_variables={self.local_variables}"
elif self.is_failure():
return (
f"failure: task_id={self.task.get_task_id()}, "
f"error_message={self.error_message}, error_details={self.error_details}, "
f"retries={self.retries}, retry_timeout={self.retry_timeout}"
)
elif self.is_bpmn_error():
return f"bpmn_error: task_id={self.task.get_task_id()}, error_code={self.bpmn_error_code}"
return "empty_task_result" | zik-client | /zik-client-0.0.3.tar.gz/zik-client-0.0.3/zik/external_task/external_task.py | external_task.py |
import logging
import requests
from zik.client.engine_client import EngineClient, ENGINE_LOCAL_BASE_URL
from zik.utils.response_utils import raise_exception_if_not_ok
from zik.utils.utils import join
from zik.variables.variables import Variables
logger = logging.getLogger(__name__)
class ProcessDefinitionClient(EngineClient):
def __init__(self, engine_base_url=ENGINE_LOCAL_BASE_URL, config=None):
super().__init__(engine_base_url, config=config)
def get_process_definitions(
self,
process_key,
version_tag,
tenant_ids,
sort_by="version",
sort_order="desc",
offset=0,
limit=1,
):
url = self.get_process_definitions_url()
url_params = self.get_process_definitions_url_params(
process_key, version_tag, tenant_ids, sort_by, sort_order, offset, limit
)
response = requests.get(url, headers=self._get_headers(), params=url_params)
raise_exception_if_not_ok(response)
return response.json()
def get_process_definitions_url(self):
return f"{self.engine_base_url}/process-definition"
def get_process_definitions_url_params(
self,
process_key,
version_tag=None,
tenant_ids=None,
sort_by="version",
sort_order="desc",
offset=0,
limit=1,
):
"""
offset starts with zero
sort_order can be "asc" or "desc
"""
url_params = {
"key": process_key,
"versionTagLike": f"{version_tag}%" if version_tag else None,
"tenantIdIn": join(tenant_ids, ","),
"sortBy": sort_by,
"sortOrder": sort_order,
"firstResult": offset,
"maxResults": limit,
}
url_params = {k: v for k, v in url_params.items() if v is not None and v != ""}
return url_params
def start_process_by_version(
self, process_key, version_tag, variables, tenant_id=None, business_key=None
):
"""
Start a process instance with the process_key and specified version tag and variables passed.
If multiple versions with same version tag found, it triggers the latest one
:param process_key: Mandatory
:param version_tag:
:param variables: Mandatory - can be empty dict
:param tenant_id: Optional
:param business_key: Optional
:return: response json
"""
tenant_ids = [tenant_id] if tenant_id else []
process_definitions = self.get_process_definitions(
process_key,
version_tag,
tenant_ids,
sort_by="version",
sort_order="desc",
offset=0,
limit=1,
)
if len(process_definitions) == 0:
raise Exception(
f"cannot start process because no process definitions found "
f"for process_key: {process_key}, version_tag: {version_tag} and tenant_id: {tenant_id}"
)
process_definition_id = process_definitions[0]["id"]
version = process_definitions[0]["version"]
if len(process_definitions) > 1:
logger.info(
f"multiple process definitions found for process_key: {process_key}, "
f"version_tag: {version_tag} and tenant_id: {tenant_id}, "
f"using latest process_definition_id: {process_definition_id} with version: {version}"
)
else:
logger.info(
f"exactly one process definition found for process_key: {process_key}, "
f"version_tag: {version_tag} and tenant_id: {tenant_id}, "
f"using process_definition_id: {process_definition_id} with version: {version}"
)
url = self.get_start_process_url(process_definition_id)
body = {"variables": Variables.format(variables)}
if business_key:
body["businessKey"] = business_key
response = requests.post(url, headers=self._get_headers(), json=body)
raise_exception_if_not_ok(response)
return response.json()
def get_start_process_url(self, process_definition_id):
return (
f"{self.engine_base_url}/process-definition/{process_definition_id}/start"
) | zik-client | /zik-client-0.0.3.tar.gz/zik-client-0.0.3/zik/process_definition/process_definition_client.py | process_definition_client.py |
import base64
import logging
from http import HTTPStatus
import requests
from zik.utils.auth_basic import AuthBasic
from zik.utils.response_utils import raise_exception_if_not_ok
from zik.utils.utils import join
from zik.variables.variables import Variables
logger = logging.getLogger(__name__)
ENGINE_LOCAL_BASE_URL = "http://localhost:8080/engine-rest"
class EngineClient:
def __init__(self, engine_base_url=ENGINE_LOCAL_BASE_URL, config=None):
config = config if config is not None else {}
self.config = config.copy()
self.engine_base_url = engine_base_url
def get_start_process_instance_url(self, process_key, tenant_id=None):
if tenant_id:
return f"{self.engine_base_url}/process-definition/key/{process_key}/tenant-id/{tenant_id}/start"
return f"{self.engine_base_url}/process-definition/key/{process_key}/start"
def start_process(self, process_key, variables, tenant_id=None, business_key=None):
"""
Start a process instance with the process_key and variables passed.
:param process_key: Mandatory
:param variables: Mandatory - can be empty dict
:param tenant_id: Optional
:param business_key: Optional
:return: response json
"""
url = self.get_start_process_instance_url(process_key, tenant_id)
body = {
"variables": Variables.format(variables)
}
if business_key:
body["businessKey"] = business_key
response = requests.post(url, headers=self._get_headers(), json=body)
raise_exception_if_not_ok(response)
return response.json()
def get_process_instance(self, instance_id=None, variables=frozenset([]), tenant_ids=frozenset([]), definition_id=None, definition_key=None):
url = f"{self.engine_base_url}/process-instance"
url_params = self.__get_process_instance_url_params(instance_id, tenant_ids, variables, definition_id, definition_key)
response = requests.get(url, headers=self._get_headers(), params=url_params)
raise_exception_if_not_ok(response)
return response.json()
@staticmethod
def __get_process_instance_url_params(instance_id, tenant_ids, variables, definition_id, definition_key):
url_params = {}
if instance_id:
url_params["processInstanceIds"] = instance_id
if definition_id:
url_params["processDefinitionId"] = definition_id
if definition_key:
url_params["processDefinitionKey"] = definition_key
var_filter = join(variables, ',')
if var_filter:
url_params["variables"] = var_filter
tenant_ids_filter = join(tenant_ids, ',')
if tenant_ids_filter:
url_params["tenantIdIn"] = tenant_ids_filter
return url_params
@property
def auth_basic(self) -> dict:
if not self.config.get("auth_basic") or not isinstance(self.config.get("auth_basic"), dict):
return {}
token = AuthBasic(**self.config.get("auth_basic").copy()).token
return {"Authorization": token}
def _get_headers(self):
headers = {
"Content-Type": "application/json"
}
if self.auth_basic:
headers.update(self.auth_basic)
return headers
def correlate_message(self, message_name, process_instance_id=None, tenant_id=None, business_key=None,
process_variables=None):
"""
Correlates a message to the process engine to either trigger a message start event or
an intermediate message catching event.
:param message_name:
:param process_instance_id:
:param tenant_id:
:param business_key:
:param process_variables:
:return: response json
"""
url = f"{self.engine_base_url}/message"
body = {
"messageName": message_name,
"resultEnabled": True,
"processVariables": Variables.format(process_variables) if process_variables else None,
"processInstanceId": process_instance_id,
"tenantId": tenant_id,
"withoutTenantId": not tenant_id,
"businessKey": business_key,
}
if process_instance_id:
body.pop("tenantId")
body.pop("withoutTenantId")
body = {k: v for k, v in body.items() if v is not None}
response = requests.post(url, headers=self._get_headers(), json=body)
raise_exception_if_not_ok(response)
return response.json()
def get_jobs(self,
offset: int,
limit: int,
tenant_ids=None,
with_failure=None,
process_instance_id=None,
task_name=None,
sort_by="jobDueDate",
sort_order="desc"):
# offset starts with zero
# sort_order can be "asc" or "desc
url = f"{self.engine_base_url}/job"
params = {
"firstResult": offset,
"maxResults": limit,
"sortBy": sort_by,
"sortOrder": sort_order,
}
if process_instance_id:
params["processInstanceId"] = process_instance_id
if task_name:
params["failedActivityId"] = task_name
if with_failure:
params["withException"] = "true"
if tenant_ids:
params["tenantIdIn"] = ','.join(tenant_ids)
response = requests.get(url, params=params, headers=self._get_headers())
raise_exception_if_not_ok(response)
return response.json()
def set_job_retry(self, job_id, retries=1):
url = f"{self.engine_base_url}/job/{job_id}/retries"
body = {"retries": retries}
response = requests.put(url, headers=self._get_headers(), json=body)
raise_exception_if_not_ok(response)
return response.status_code == HTTPStatus.NO_CONTENT
def get_process_instance_variable(self, process_instance_id, variable_name, with_meta=False):
url = f"{self.engine_base_url}/process-instance/{process_instance_id}/variables/{variable_name}"
response = requests.get(url, headers=self._get_headers())
raise_exception_if_not_ok(response)
resp_json = response.json()
url_with_data = f"{url}/data"
response = requests.get(url_with_data, headers=self._get_headers())
raise_exception_if_not_ok(response)
decoded_value = base64.encodebytes(response.content).decode("utf-8")
if with_meta:
return dict(resp_json, value=decoded_value)
return decoded_value | zik-client | /zik-client-0.0.3.tar.gz/zik-client-0.0.3/zik/client/engine_client.py | engine_client.py |
import logging
from http import HTTPStatus
import requests
from zik.client.engine_client import ENGINE_LOCAL_BASE_URL
from zik.utils.log_utils import log_with_context
from zik.utils.response_utils import raise_exception_if_not_ok
from zik.utils.utils import str_to_list
from zik.utils.auth_basic import AuthBasic, obfuscate_password
from zik.variables.variables import Variables
logger = logging.getLogger(__name__)
class ExternalTaskClient:
default_config = {
"maxTasks": 1,
"lockDuration": 300000, # in milliseconds
"asyncResponseTimeout": 30000,
"retries": 3,
"retryTimeout": 300000,
"httpTimeoutMillis": 30000,
"timeoutDeltaMillis": 5000,
"includeExtensionProperties": True # enables Camunda Extension Properties
}
def __init__(self, worker_id, engine_base_url=ENGINE_LOCAL_BASE_URL, config=None):
config = config if config is not None else {}
self.worker_id = worker_id
self.external_task_base_url = engine_base_url + "/external-task"
self.config = type(self).default_config.copy()
self.config.update(config)
self.is_debug = config.get('isDebug', False)
self.http_timeout_seconds = self.config.get('httpTimeoutMillis') / 1000
self._log_with_context(f"Created External Task client with config: {obfuscate_password(self.config)}")
def get_fetch_and_lock_url(self):
return f"{self.external_task_base_url}/fetchAndLock"
def fetch_and_lock(self, topic_names, process_variables=None):
url = self.get_fetch_and_lock_url()
body = {
"workerId": str(self.worker_id), # convert to string to make it JSON serializable
"maxTasks": self.config["maxTasks"],
"topics": self._get_topics(topic_names, process_variables),
"asyncResponseTimeout": self.config["asyncResponseTimeout"]
}
if self.is_debug:
self._log_with_context(f"trying to fetch and lock with request payload: {body}")
http_timeout_seconds = self.__get_fetch_and_lock_http_timeout_seconds()
response = requests.post(url, headers=self._get_headers(), json=body, timeout=http_timeout_seconds)
raise_exception_if_not_ok(response)
resp_json = response.json()
if self.is_debug:
self._log_with_context(f"fetch and lock response json: {resp_json} for request: {body}")
return response.json()
def __get_fetch_and_lock_http_timeout_seconds(self):
# use HTTP timeout slightly more than async Response / long polling timeout
return (self.config["timeoutDeltaMillis"] + self.config["asyncResponseTimeout"]) / 1000
def _get_topics(self, topic_names, process_variables):
topics = []
for topic in str_to_list(topic_names):
topics.append({
"topicName": topic,
"lockDuration": self.config["lockDuration"],
"processVariables": process_variables if process_variables else {},
# enables Camunda Extension Properties
"includeExtensionProperties": self.config.get("includeExtensionProperties") or False
})
return topics
def complete(self, task_id, global_variables, local_variables=None):
url = self.get_task_complete_url(task_id)
body = {
"workerId": self.worker_id,
"variables": Variables.format(global_variables),
"localVariables": Variables.format(local_variables)
}
response = requests.post(url, headers=self._get_headers(), json=body, timeout=self.http_timeout_seconds)
raise_exception_if_not_ok(response)
return response.status_code == HTTPStatus.NO_CONTENT
def get_task_complete_url(self, task_id):
return f"{self.external_task_base_url}/{task_id}/complete"
def failure(self, task_id, error_message, error_details, retries, retry_timeout):
url = self.get_task_failure_url(task_id)
logger.info(f"setting retries to: {retries} for task: {task_id}")
body = {
"workerId": self.worker_id,
"errorMessage": error_message,
"retries": retries,
"retryTimeout": retry_timeout,
}
if error_details:
body["errorDetails"] = error_details
response = requests.post(url, headers=self._get_headers(), json=body, timeout=self.http_timeout_seconds)
raise_exception_if_not_ok(response)
return response.status_code == HTTPStatus.NO_CONTENT
def get_task_failure_url(self, task_id):
return f"{self.external_task_base_url}/{task_id}/failure"
def bpmn_failure(self, task_id, error_code, error_message, variables=None):
url = self.get_task_bpmn_error_url(task_id)
body = {
"workerId": self.worker_id,
"errorCode": error_code,
"errorMessage": error_message,
"variables": Variables.format(variables),
}
if self.is_debug:
self._log_with_context(f"trying to report bpmn error with request payload: {body}")
resp = requests.post(url, headers=self._get_headers(), json=body, timeout=self.http_timeout_seconds)
resp.raise_for_status()
return resp.status_code == HTTPStatus.NO_CONTENT
def get_task_bpmn_error_url(self, task_id):
return f"{self.external_task_base_url}/{task_id}/bpmnError"
@property
def auth_basic(self) -> dict:
if not self.config.get("auth_basic") or not isinstance(self.config.get("auth_basic"), dict):
return {}
token = AuthBasic(**self.config.get("auth_basic").copy()).token
return {"Authorization": token}
def _get_headers(self):
headers = {
"Content-Type": "application/json"
}
if self.auth_basic:
headers.update(self.auth_basic)
return headers
def _log_with_context(self, msg, log_level='info', **kwargs):
context = {"WORKER_ID": self.worker_id}
log_with_context(msg, context=context, log_level=log_level, **kwargs) | zik-client | /zik-client-0.0.3.tar.gz/zik-client-0.0.3/zik/client/external_task_client.py | external_task_client.py |
```
usage: zik-dl [-h] [--artists ARTISTS] [--album ALBUM] [--split] url
Linux command-line program to download music from YouTube and other websites.
positional arguments:
url URL of the song/album to download
optional arguments:
-h, --help show this help message and exit
--artists ARTISTS comma-separated artists names like "Louis Armstrong,Ella Fitzgerald"
--album ALBUM album name
--split split song in multiple songs based on timestamps (youtube video description)
--cover cover path like "~/Images/cover1.jpg"
```
| zik-dl | /zik-dl-0.6.0.tar.gz/zik-dl-0.6.0/README.md | README.md |
import os
import argparse
import sys
class File():
""" Class handles a work with given file"""
def __init__(self, file, folder, id_file, file_format):
self.folder = folder
self.file = file
self.id = id_file
self.format = file_format
def remove_numbers(self):
return ''.join([i for i in self.file if not i.isdigit()])
def insert_number(self, file_name):
file = file_name.split(".")
file_with_number = file[0] + self.format.format(self.id) + "." + file[1]
return file_with_number
def add_a(self):
# To prevent file already exists error
return "a" + self.file
def remove_a(self):
# To prevent file already exists error
return self.file[1:]
def rename_as(self, old_name, new_name):
os.rename(os.path.join(self.folder, self.file), os.path.join(self.folder, new_name))
self.file = new_name
class Sorter():
""" Class handles a work with given folder """
def __init__(self, args):
self.folder = args.folder
self.file_id = args.file_id if args.file_id else 0
self.reversed = args.reversed if args.reversed is not None else False
self.custom_name = args.name if args.name else ""
if not os.path.isdir(self.folder):
print(f"Given folder {self.folder} does not exist.")
return
self.format_n = "{:0"+str(args.digits_number if args.digits_number else len(str(len(os.listdir(self.folder)))))+"d}"
# Sets the default files
self.files = []
file_i = self.file_id
for file in self.get_folder_files():
# Get the file
f = File(file, self.folder, file_i, self.format_n)
self.files.append(f)
file_i += 1
try:
if self.custom_name:
self.rename_files_with_name(self.custom_name)
if not self.reversed:
self.order_files()
else:
self.reverse_order_files()
self.remove_a_filename()
except PermissionError as e:
# Handles PermissionErrors
print(e)
print(f"You need to start cmd as an administrator to edit folder {self.folder}")
def get_folder_files(self):
return os.listdir(self.folder)
def add_a_filename(self):
# New file can't overwrite the one in a folder
for file in self.files:
file.rename_as(file.file, file.add_a())
def remove_a_filename(self):
# New file can't overwrite the one in a folder
for file in self.files:
file.rename_as(file.file, file.remove_a())
def rename_files_with_name(self, name):
for file in self.files:
file.rename_as(file.file, file.insert_number(name + "." + file.file.split(".")[1]))
def order_files(self):
for file in self.files:
file.rename_as(file.file, file.add_a())
file.rename_as(file.file, file.insert_number(file.remove_numbers()))
def reverse_order_files(self):
counter = self.file_id
for file in reversed(self.files):
file.rename_as(file.file, file.add_a())
file.rename_as(file.file, file.insert_number(file.remove_numbers()))
counter += 1
def set_argparse():
parser = argparse.ArgumentParser(description='Reorders/reverses/renames files in a folder.')
parser.add_argument('folder', metavar='folder', type=str, help='Path to the folder you want reordered.')
parser.add_argument('--digits_number', '-d', nargs='?', type=int, help='number of digits the file_id will have (2 - 01, 3 - 001, 3 - 001,...)')
parser.add_argument('--file_id', '-fid', nargs='?', type=int, help='Defines from which number the sorter will sort the files')
parser.add_argument('--name', '-n', nargs='?', help='Reorder with a custom name.')
parser.add_argument('--reversed', '-r', action='store_true', default=False, help='Sorter will reorder the files from the end.')
return parser.parse_args()
def main():
# Sets the Argpars and runs the Sorter
Sorter(set_argparse()) | zikasort | /zikasort-0.1.tar.gz/zikasort-0.1/zikasort.py | zikasort.py |
!function(a,b){"object"==typeof exports&&"undefined"!=typeof module?b(exports):"function"==typeof define&&define.amd?define(["exports"],b):b(a.RSVP=a.RSVP||{})}(this,function(a){"use strict";function b(a,b){for(var c=0,d=a.length;c<d;c++)if(a[c]===b)return c;return-1}function c(a){var b=a._promiseCallbacks;return b||(b=a._promiseCallbacks={}),b}function d(a,b){if(2!==arguments.length)return wa[a];wa[a]=b}function e(a){var b=typeof a;return null!==a&&("object"===b||"function"===b)}function f(a){return"function"==typeof a}function g(a){return null!==a&&"object"==typeof a}function h(a){return null!==a&&"object"==typeof a}function i(){setTimeout(function(){for(var a=0;a<Aa.length;a++){var b=Aa[a],c=b.payload;c.guid=c.key+c.id,c.childGuid=c.key+c.childId,c.error&&(c.stack=c.error.stack),wa.trigger(b.name,b.payload)}Aa.length=0},50)}function j(a,b,c){1===Aa.push({name:a,payload:{key:b._guidKey,id:b._id,eventName:a,detail:b._result,childId:c&&c._id,label:b._label,timeStamp:za(),error:wa["instrument-with-stack"]?new Error(b._label):null}})&&i()}function k(a,b){var c=this;if(a&&"object"==typeof a&&a.constructor===c)return a;var d=new c(m,b);return s(d,a),d}function l(){return new TypeError("A promises callback cannot return that same promise.")}function m(){}function n(a){try{return a.then}catch(a){return Ea.error=a,Ea}}function o(a,b,c,d){try{a.call(b,c,d)}catch(a){return a}}function p(a,b,c){wa.async(function(a){var d=!1,e=o(c,b,function(c){d||(d=!0,b!==c?s(a,c,void 0):u(a,c))},function(b){d||(d=!0,v(a,b))},"Settle: "+(a._label||" unknown promise"));!d&&e&&(d=!0,v(a,e))},a)}function q(a,b){b._state===Ca?u(a,b._result):b._state===Da?(b._onError=null,v(a,b._result)):w(b,void 0,function(c){b!==c?s(a,c,void 0):u(a,c)},function(b){return v(a,b)})}function r(a,b,c){b.constructor===a.constructor&&c===C&&a.constructor.resolve===k?q(a,b):c===Ea?(v(a,Ea.error),Ea.error=null):f(c)?p(a,b,c):u(a,b)}function s(a,b){a===b?u(a,b):e(b)?r(a,b,n(b)):u(a,b)}function t(a){a._onError&&a._onError(a._result),x(a)}function u(a,b){a._state===Ba&&(a._result=b,a._state=Ca,0===a._subscribers.length?wa.instrument&&j("fulfilled",a):wa.async(x,a))}function v(a,b){a._state===Ba&&(a._state=Da,a._result=b,wa.async(t,a))}function w(a,b,c,d){var e=a._subscribers,f=e.length;a._onError=null,e[f]=b,e[f+Ca]=c,e[f+Da]=d,0===f&&a._state&&wa.async(x,a)}function x(a){var b=a._subscribers,c=a._state;if(wa.instrument&&j(c===Ca?"fulfilled":"rejected",a),0!==b.length){for(var d=void 0,e=void 0,f=a._result,g=0;g<b.length;g+=3)d=b[g],e=b[g+c],d?A(c,d,e,f):e(f);a._subscribers.length=0}}function y(){this.error=null}function z(a,b){try{return a(b)}catch(a){return Fa.error=a,Fa}}function A(a,b,c,d){var e=f(c),g=void 0,h=void 0;if(e){if((g=z(c,d))===Fa)h=g.error,g.error=null;else if(g===b)return void v(b,l())}else g=d;b._state!==Ba||(e&&void 0===h?s(b,g):void 0!==h?v(b,h):a===Ca?u(b,g):a===Da&&v(b,g))}function B(a,b){var c=!1;try{b(function(b){c||(c=!0,s(a,b))},function(b){c||(c=!0,v(a,b))})}catch(b){v(a,b)}}function C(a,b,c){var d=this,e=d._state;if(e===Ca&&!a||e===Da&&!b)return wa.instrument&&j("chained",d,d),d;d._onError=null;var f=new d.constructor(m,c),g=d._result;if(wa.instrument&&j("chained",d,f),e===Ba)w(d,f,a,b);else{var h=e===Ca?a:b;wa.async(function(){return A(e,f,h,g)})}return f}function D(a,b,c){return a===Ca?{state:"fulfilled",value:c}:{state:"rejected",reason:c}}function E(a,b){return ya(a)?new Ga(this,a,!0,b).promise:this.reject(new TypeError("Promise.all must be called with an array"),b)}function F(a,b){var c=this,d=new c(m,b);if(!ya(a))return v(d,new TypeError("Promise.race must be called with an array")),d;for(var e=0;d._state===Ba&&e<a.length;e++)w(c.resolve(a[e]),void 0,function(a){return s(d,a)},function(a){return v(d,a)});return d}function G(a,b){var c=this,d=new c(m,b);return v(d,a),d}function H(){throw new TypeError("You must pass a resolver function as the first argument to the promise constructor")}function I(){throw new TypeError("Failed to construct 'Promise': Please use the 'new' operator, this object constructor cannot be called as a function.")}function J(){this.value=void 0}function K(a){try{return a.then}catch(a){return Ka.value=a,Ka}}function L(a,b,c){try{a.apply(b,c)}catch(a){return Ka.value=a,Ka}}function M(a,b){for(var c={},d=a.length,e=new Array(d),f=0;f<d;f++)e[f]=a[f];for(var g=0;g<b.length;g++){c[b[g]]=e[g+1]}return c}function N(a){for(var b=a.length,c=new Array(b-1),d=1;d<b;d++)c[d-1]=a[d];return c}function O(a,b){return{then:function(c,d){return a.call(b,c,d)}}}function P(a,b){var c=function(){for(var c=this,d=arguments.length,e=new Array(d+1),f=!1,g=0;g<d;++g){var h=arguments[g];if(!f){if((f=S(h))===La){var i=new Ja(m);return v(i,La.value),i}f&&!0!==f&&(h=O(f,h))}e[g]=h}var j=new Ja(m);return e[d]=function(a,c){a?v(j,a):void 0===b?s(j,c):!0===b?s(j,N(arguments)):ya(b)?s(j,M(arguments,b)):s(j,c)},f?R(j,e,a,c):Q(j,e,a,c)};return c.__proto__=a,c}function Q(a,b,c,d){var e=L(c,d,b);return e===Ka&&v(a,e.value),a}function R(a,b,c,d){return Ja.all(b).then(function(b){var e=L(c,d,b);return e===Ka&&v(a,e.value),a})}function S(a){return!(!a||"object"!=typeof a)&&(a.constructor===Ja||K(a))}function T(a,b){return Ja.all(a,b)}function U(a,b){if(!a)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!b||"object"!=typeof b&&"function"!=typeof b?a:b}function V(a,b){if("function"!=typeof b&&null!==b)throw new TypeError("Super expression must either be null or a function, not "+typeof b);a.prototype=Object.create(b&&b.prototype,{constructor:{value:a,enumerable:!1,writable:!0,configurable:!0}}),b&&(Object.setPrototypeOf?Object.setPrototypeOf(a,b):a.__proto__=b)}function W(a,b){return ya(a)?new Ma(Ja,a,b).promise:Ja.reject(new TypeError("Promise.allSettled must be called with an array"),b)}function X(a,b){return Ja.race(a,b)}function Y(a,b){if(!a)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!b||"object"!=typeof b&&"function"!=typeof b?a:b}function Z(a,b){if("function"!=typeof b&&null!==b)throw new TypeError("Super expression must either be null or a function, not "+typeof b);a.prototype=Object.create(b&&b.prototype,{constructor:{value:a,enumerable:!1,writable:!0,configurable:!0}}),b&&(Object.setPrototypeOf?Object.setPrototypeOf(a,b):a.__proto__=b)}function $(a,b){return g(a)?new Oa(Ja,a,b).promise:Ja.reject(new TypeError("Promise.hash must be called with an object"),b)}function _(a,b){if(!a)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!b||"object"!=typeof b&&"function"!=typeof b?a:b}function aa(a,b){if("function"!=typeof b&&null!==b)throw new TypeError("Super expression must either be null or a function, not "+typeof b);a.prototype=Object.create(b&&b.prototype,{constructor:{value:a,enumerable:!1,writable:!0,configurable:!0}}),b&&(Object.setPrototypeOf?Object.setPrototypeOf(a,b):a.__proto__=b)}function ba(a,b){return g(a)?new Pa(Ja,a,!1,b).promise:Ja.reject(new TypeError("RSVP.hashSettled must be called with an object"),b)}function ca(a){throw setTimeout(function(){throw a}),a}function da(a){var b={resolve:void 0,reject:void 0};return b.promise=new Ja(function(a,c){b.resolve=a,b.reject=c},a),b}function ea(a,b,c){return ya(a)?f(b)?Ja.all(a,c).then(function(a){for(var d=a.length,e=new Array(d),f=0;f<d;f++)e[f]=b(a[f]);return Ja.all(e,c)}):Ja.reject(new TypeError("RSVP.map expects a function as a second argument"),c):Ja.reject(new TypeError("RSVP.map must be called with an array"),c)}function fa(a,b){return Ja.resolve(a,b)}function ga(a,b){return Ja.reject(a,b)}function ha(a,b){return Ja.all(a,b)}function ia(a,b){return Ja.resolve(a,b).then(function(a){return ha(a,b)})}function ja(a,b,c){return ya(a)||g(a)&&void 0!==a.then?f(b)?(ya(a)?ha(a,c):ia(a,c)).then(function(a){for(var d=a.length,e=new Array(d),f=0;f<d;f++)e[f]=b(a[f]);return ha(e,c).then(function(b){for(var c=new Array(d),e=0,f=0;f<d;f++)b[f]&&(c[e]=a[f],e++);return c.length=e,c})}):Ja.reject(new TypeError("RSVP.filter expects function as a second argument"),c):Ja.reject(new TypeError("RSVP.filter must be called with an array or promise"),c)}function ka(a,b){Xa[Qa]=a,Xa[Qa+1]=b,2===(Qa+=2)&&Ya()}function la(){var a=process.nextTick,b=process.versions.node.match(/^(?:(\d+)\.)?(?:(\d+)\.)?(\*|\d+)$/);return Array.isArray(b)&&"0"===b[1]&&"10"===b[2]&&(a=setImmediate),function(){return a(qa)}}function ma(){return void 0!==Ra?function(){Ra(qa)}:pa()}function na(){var a=0,b=new Ua(qa),c=document.createTextNode("");return b.observe(c,{characterData:!0}),function(){return c.data=a=++a%2}}function oa(){var a=new MessageChannel;return a.port1.onmessage=qa,function(){return a.port2.postMessage(0)}}function pa(){return function(){return setTimeout(qa,1)}}function qa(){for(var a=0;a<Qa;a+=2){(0,Xa[a])(Xa[a+1]),Xa[a]=void 0,Xa[a+1]=void 0}Qa=0}function ra(){try{var a=require,b=a("vertx");return Ra=b.runOnLoop||b.runOnContext,ma()}catch(a){return pa()}}function sa(a,b,c){return b in a?Object.defineProperty(a,b,{value:c,enumerable:!0,configurable:!0,writable:!0}):a[b]=c,a}function ta(){wa.on.apply(wa,arguments)}function ua(){wa.off.apply(wa,arguments)}var va={mixin:function(a){return a.on=this.on,a.off=this.off,a.trigger=this.trigger,a._promiseCallbacks=void 0,a},on:function(a,d){if("function"!=typeof d)throw new TypeError("Callback must be a function");var e=c(this),f=void 0;f=e[a],f||(f=e[a]=[]),-1===b(f,d)&&f.push(d)},off:function(a,d){var e=c(this),f=void 0,g=void 0;if(!d)return void(e[a]=[]);f=e[a],-1!==(g=b(f,d))&&f.splice(g,1)},trigger:function(a,b,d){var e=c(this),f=void 0;if(f=e[a])for(var g=0;g<f.length;g++)(0,f[g])(b,d)}},wa={instrument:!1};va.mixin(wa);var xa=void 0;xa=Array.isArray?Array.isArray:function(a){return"[object Array]"===Object.prototype.toString.call(a)};var ya=xa,za=Date.now||function(){return(new Date).getTime()},Aa=[],Ba=void 0,Ca=1,Da=2,Ea=new y,Fa=new y,Ga=function(){function a(a,b,c,d){this._instanceConstructor=a,this.promise=new a(m,d),this._abortOnReject=c,this._init.apply(this,arguments)}return a.prototype._init=function(a,b){var c=b.length||0;this.length=c,this._remaining=c,this._result=new Array(c),this._enumerate(b),0===this._remaining&&u(this.promise,this._result)},a.prototype._enumerate=function(a){for(var b=this.length,c=this.promise,d=0;c._state===Ba&&d<b;d++)this._eachEntry(a[d],d)},a.prototype._settleMaybeThenable=function(a,b){var c=this._instanceConstructor,d=c.resolve;if(d===k){var e=n(a);if(e===C&&a._state!==Ba)a._onError=null,this._settledAt(a._state,b,a._result);else if("function"!=typeof e)this._remaining--,this._result[b]=this._makeResult(Ca,b,a);else if(c===Ja){var f=new c(m);r(f,a,e),this._willSettleAt(f,b)}else this._willSettleAt(new c(function(b){return b(a)}),b)}else this._willSettleAt(d(a),b)},a.prototype._eachEntry=function(a,b){h(a)?this._settleMaybeThenable(a,b):(this._remaining--,this._result[b]=this._makeResult(Ca,b,a))},a.prototype._settledAt=function(a,b,c){var d=this.promise;d._state===Ba&&(this._abortOnReject&&a===Da?v(d,c):(this._remaining--,this._result[b]=this._makeResult(a,b,c),0===this._remaining&&u(d,this._result)))},a.prototype._makeResult=function(a,b,c){return c},a.prototype._willSettleAt=function(a,b){var c=this;w(a,void 0,function(a){return c._settledAt(Ca,b,a)},function(a){return c._settledAt(Da,b,a)})},a}(),Ha="rsvp_"+za()+"-",Ia=0,Ja=function(){function a(b,c){this._id=Ia++,this._label=c,this._state=void 0,this._result=void 0,this._subscribers=[],wa.instrument&&j("created",this),m!==b&&("function"!=typeof b&&H(),this instanceof a?B(this,b):I())}return a.prototype._onError=function(a){var b=this;wa.after(function(){b._onError&&wa.trigger("error",a,b._label)})},a.prototype.catch=function(a,b){return this.then(void 0,a,b)},a.prototype.finally=function(a,b){var c=this,d=c.constructor;return c.then(function(b){return d.resolve(a()).then(function(){return b})},function(b){return d.resolve(a()).then(function(){throw b})},b)},a}();Ja.cast=k,Ja.all=E,Ja.race=F,Ja.resolve=k,Ja.reject=G,Ja.prototype._guidKey=Ha,Ja.prototype.then=C;var Ka=new J,La=new J,Ma=function(a){function b(b,c,d){return U(this,a.call(this,b,c,!1,d))}return V(b,a),b}(Ga);Ma.prototype._makeResult=D;var Na=Object.prototype.hasOwnProperty,Oa=function(a){function b(b,c){var d=!(arguments.length>2&&void 0!==arguments[2])||arguments[2],e=arguments[3];return Y(this,a.call(this,b,c,d,e))}return Z(b,a),b.prototype._init=function(a,b){this._result={},this._enumerate(b),0===this._remaining&&u(this.promise,this._result)},b.prototype._enumerate=function(a){var b=this.promise,c=[];for(var d in a)Na.call(a,d)&&c.push({position:d,entry:a[d]});var e=c.length;this._remaining=e;for(var f=void 0,g=0;b._state===Ba&&g<e;g++)f=c[g],this._eachEntry(f.entry,f.position)},b}(Ga),Pa=function(a){function b(b,c,d){return _(this,a.call(this,b,c,!1,d))}return aa(b,a),b}(Oa);Pa.prototype._makeResult=D;var Qa=0,Ra=void 0,Sa="undefined"!=typeof window?window:void 0,Ta=Sa||{},Ua=Ta.MutationObserver||Ta.WebKitMutationObserver,Va="undefined"==typeof self&&"undefined"!=typeof process&&"[object process]"==={}.toString.call(process),Wa="undefined"!=typeof Uint8ClampedArray&&"undefined"!=typeof importScripts&&"undefined"!=typeof MessageChannel,Xa=new Array(1e3),Ya=void 0;Ya=Va?la():Ua?na():Wa?oa():void 0===Sa&&"function"==typeof require?ra():pa();if("object"==typeof self)self;else{if("object"!=typeof global)throw new Error("no global: `self` or `global` found");global}var Za;wa.async=ka,wa.after=function(a){return setTimeout(a,0)};var $a=fa,_a=function(a,b){return wa.async(a,b)};if("undefined"!=typeof window&&"object"==typeof window.__PROMISE_INSTRUMENTATION__){var ab=window.__PROMISE_INSTRUMENTATION__;d("instrument",!0);for(var bb in ab)ab.hasOwnProperty(bb)&&ta(bb,ab[bb])}var cb=(Za={asap:ka,cast:$a,Promise:Ja,EventTarget:va,all:T,allSettled:W,race:X,hash:$,hashSettled:ba,rethrow:ca,defer:da,denodeify:P,configure:d,on:ta,off:ua,resolve:fa,reject:ga,map:ea},sa(Za,"async",_a),sa(Za,"filter",ja),Za);a.default=cb,a.asap=ka,a.cast=$a,a.Promise=Ja,a.EventTarget=va,a.all=T,a.allSettled=W,a.race=X,a.hash=$,a.hashSettled=ba,a.rethrow=ca,a.defer=da,a.denodeify=P,a.configure=d,a.on=ta,a.off=ua,a.resolve=fa,a.reject=ga,a.map=ea,a.async=_a,a.filter=ja,Object.defineProperty(a,"__esModule",{value:!0})});var EPUBJS=EPUBJS||{};EPUBJS.core={};var ELEMENT_NODE=1,TEXT_NODE=3,COMMENT_NODE=8,DOCUMENT_NODE=9;EPUBJS.core.getEl=function(a){return document.getElementById(a)},EPUBJS.core.getEls=function(a){return document.getElementsByClassName(a)},EPUBJS.core.request=function(a,b,c){var d,e=window.URL,f=e?"blob":"arraybuffer",g=new RSVP.defer,h=new XMLHttpRequest,i=XMLHttpRequest.prototype,j=function(){var a;this.readyState==this.DONE&&(200!==this.status&&0!==this.status||!this.response?g.reject({message:this.response,stack:(new Error).stack}):(a="xml"==b?this.responseXML?this.responseXML:(new DOMParser).parseFromString(this.response,"application/xml"):"xhtml"==b?this.responseXML?this.responseXML:(new DOMParser).parseFromString(this.response,"application/xhtml+xml"):"html"==b?this.responseXML?this.responseXML:(new DOMParser).parseFromString(this.response,"text/html"):"json"==b?JSON.parse(this.response):"blob"==b?e?this.response:new Blob([this.response]):this.response,g.resolve(a)))};return"overrideMimeType"in i||Object.defineProperty(i,"overrideMimeType",{value:function(a){}}),h.onreadystatechange=j,h.open("GET",a,!0),c&&(h.withCredentials=!0),b||(d=EPUBJS.core.uri(a),b=d.extension,b={htm:"html"}[b]||b),"blob"==b&&(h.responseType=f),"json"==b&&h.setRequestHeader("Accept","application/json"),"xml"==b&&(h.responseType="document",h.overrideMimeType("text/xml")),"xhtml"==b&&(h.responseType="document"),"html"==b&&(h.responseType="document"),"binary"==b&&(h.responseType="arraybuffer"),h.send(),g.promise},EPUBJS.core.toArray=function(a){var b=[];for(var c in a){var d;a.hasOwnProperty(c)&&(d=a[c],d.ident=c,b.push(d))}return b},EPUBJS.core.uri=function(a){var b,c,d,e={protocol:"",host:"",path:"",origin:"",directory:"",base:"",filename:"",extension:"",fragment:"",href:a},f=a.indexOf("blob:"),g=a.indexOf("://"),h=a.indexOf("?"),i=a.indexOf("#");return 0===f?(e.protocol="blob",e.base=a.indexOf(0,i),e):(-1!=i&&(e.fragment=a.slice(i+1),a=a.slice(0,i)),-1!=h&&(e.search=a.slice(h+1),a=a.slice(0,h),href=e.href),-1!=g?(e.protocol=a.slice(0,g),b=a.slice(g+3),d=b.indexOf("/"),-1===d?(e.host=e.path,e.path=""):(e.host=b.slice(0,d),e.path=b.slice(d)),e.origin=e.protocol+"://"+e.host,e.directory=EPUBJS.core.folder(e.path),e.base=e.origin+e.directory):(e.path=a,e.directory=EPUBJS.core.folder(a),e.base=e.directory),e.filename=a.replace(e.base,""),c=e.filename.lastIndexOf("."),-1!=c&&(e.extension=e.filename.slice(c+1)),e)},EPUBJS.core.folder=function(a){var b=a.lastIndexOf("/");if(-1==b);return a.slice(0,b+1)},EPUBJS.core.dataURLToBlob=function(a){var b,c,d,e,f,g=";base64,";if(-1==a.indexOf(g))return b=a.split(","),c=b[0].split(":")[1],d=b[1],new Blob([d],{type:c});b=a.split(g),c=b[0].split(":")[1],d=window.atob(b[1]),e=d.length,f=new Uint8Array(e);for(var h=0;h<e;++h)f[h]=d.charCodeAt(h);return new Blob([f],{type:c})},EPUBJS.core.addScript=function(a,b,c){var d,e;e=!1,d=document.createElement("script"),d.type="text/javascript",d.async=!1,d.src=a,d.onload=d.onreadystatechange=function(){e||this.readyState&&"complete"!=this.readyState||(e=!0,b&&b())},c=c||document.body,c.appendChild(d)},EPUBJS.core.addScripts=function(a,b,c){var d=a.length,e=0,f=function(){e++,d==e?b&&b():EPUBJS.core.addScript(a[e],f,c)};EPUBJS.core.addScript(a[e],f,c)},EPUBJS.core.addCss=function(a,b,c){var d,e;e=!1,d=document.createElement("link"),d.type="text/css",d.rel="stylesheet",d.href=a,d.onload=d.onreadystatechange=function(){e||this.readyState&&"complete"!=this.readyState||(e=!0,b&&b())},c=c||document.body,c.appendChild(d)},EPUBJS.core.prefixed=function(a){var b=["Webkit","Moz","O","ms"],c=a[0].toUpperCase()+a.slice(1),d=b.length;if(void 0!==document.documentElement.style[a])return a;for(var e=0;e<d;e++)if(void 0!==document.documentElement.style[b[e]+c])return b[e]+c;return a},EPUBJS.core.resolveUrl=function(a,b){var c,d,e=[],f=EPUBJS.core.uri(b),g=a.split("/");return f.host?b:(g.pop(),d=b.split("/"),d.forEach(function(a){".."===a?g.pop():e.push(a)}),c=g.concat(e),c.join("/"))},EPUBJS.core.uuid=function(){var a=(new Date).getTime();return"xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(/[xy]/g,function(b){var c=(a+16*Math.random())%16|0;return a=Math.floor(a/16),("x"==b?c:7&c|8).toString(16)})},EPUBJS.core.insert=function(a,b,c){var d=EPUBJS.core.locationOf(a,b,c);return b.splice(d,0,a),d},EPUBJS.core.locationOf=function(a,b,c,d,e){var f,g=d||0,h=e||b.length,i=parseInt(g+(h-g)/2);return c||(c=function(a,b){return a>b?1:a<b?-1:(a=b)?0:void 0}),h-g<=0?i:(f=c(b[i],a),h-g==1?f>0?i:i+1:0===f?i:-1===f?EPUBJS.core.locationOf(a,b,c,i,h):EPUBJS.core.locationOf(a,b,c,g,i))},EPUBJS.core.indexOfSorted=function(a,b,c,d,e){var f,g=d||0,h=e||b.length,i=parseInt(g+(h-g)/2);return c||(c=function(a,b){return a>b?1:a<b?-1:(a=b)?0:void 0}),h-g<=0?-1:(f=c(b[i],a),h-g==1?0===f?i:-1:0===f?i:-1===f?EPUBJS.core.indexOfSorted(a,b,c,i,h):EPUBJS.core.indexOfSorted(a,b,c,g,i))},EPUBJS.core.queue=function(a){var b=[],c=a,d=function(a,c,d){return b.push({funcName:a,args:c,context:d}),b},e=function(){var a;b.length&&(a=b.shift(),c[a.funcName].apply(a.context||c,a.args))};return{enqueue:d,dequeue:e,flush:function(){for(;b.length;)e()},clear:function(){b=[]},length:function(){return b.length}}},EPUBJS.core.getElementXPath=function(a){return a&&a.id?'//*[@id="'+a.id+'"]':EPUBJS.core.getElementTreeXPath(a)},EPUBJS.core.getElementTreeXPath=function(a){var b,c,d,e,f=[],g="http://www.w3.org/1999/xhtml"===a.ownerDocument.documentElement.getAttribute("xmlns");for(a.nodeType===Node.TEXT_NODE&&(b=EPUBJS.core.indexOfTextNode(a)+1,f.push("text()["+b+"]"),a=a.parentNode);a&&1==a.nodeType;a=a.parentNode){b=0;for(var h=a.previousSibling;h;h=h.previousSibling)h.nodeType!=Node.DOCUMENT_TYPE_NODE&&h.nodeName==a.nodeName&&++b;c=a.nodeName.toLowerCase(),d=g?"xhtml:"+c:c,e=b?"["+(b+1)+"]":"",f.splice(0,0,d+e)}return f.length?"./"+f.join("/"):null},EPUBJS.core.nsResolver=function(a){return{xhtml:"http://www.w3.org/1999/xhtml",epub:"http://www.idpf.org/2007/ops"}[a]||null},EPUBJS.core.cleanStringForXpath=function(a){var b=a.match(/[^'"]+|['"]/g);return b=b.map(function(a){return"'"===a?'"\'"':'"'===a?"'\"'":"'"+a+"'"}),"concat('',"+b.join(",")+")"},EPUBJS.core.indexOfTextNode=function(a){for(var b,c=a.parentNode,d=c.childNodes,e=-1,f=0;f<d.length&&(b=d[f],b.nodeType===Node.TEXT_NODE&&e++,b!=a);f++);return e},EPUBJS.core.defaults=function(a){for(var b=1,c=arguments.length;b<c;b++){var d=arguments[b];for(var e in d)void 0===a[e]&&(a[e]=d[e])}return a},EPUBJS.core.extend=function(a){return[].slice.call(arguments,1).forEach(function(b){b&&Object.getOwnPropertyNames(b).forEach(function(c){Object.defineProperty(a,c,Object.getOwnPropertyDescriptor(b,c))})}),a},EPUBJS.core.clone=function(a){return EPUBJS.core.isArray(a)?a.slice():EPUBJS.core.extend({},a)},EPUBJS.core.isElement=function(a){return!(!a||1!=a.nodeType)},EPUBJS.core.isNumber=function(a){return!isNaN(parseFloat(a))&&isFinite(a)},EPUBJS.core.isString=function(a){return"string"==typeof a||a instanceof String},EPUBJS.core.isArray=Array.isArray||function(a){return"[object Array]"===Object.prototype.toString.call(a)},EPUBJS.core.values=function(a){var b,c,d,e=-1;if(!a)return[];for(b=Object.keys(a),c=b.length,d=Array(c);++e<c;)d[e]=a[b[e]];return d},EPUBJS.core.indexOfNode=function(a,b){for(var c,d=a.parentNode,e=d.childNodes,f=-1,g=0;g<e.length&&(c=e[g],c.nodeType===b&&f++,c!=a);g++);return f},EPUBJS.core.indexOfTextNode=function(a){return EPUBJS.core.indexOfNode(a,TEXT_NODE)},EPUBJS.core.indexOfElementNode=function(a){return EPUBJS.core.indexOfNode(a,ELEMENT_NODE)};var EPUBJS=EPUBJS||{};EPUBJS.reader={},EPUBJS.reader.plugins={},function(a,b){var c=(a.ePubReader,a.ePubReader=function(a,b){return new EPUBJS.Reader(a,b)});"function"==typeof define&&define.amd?define(function(){return Reader}):"undefined"!=typeof module&&module.exports&&(module.exports=c)}(window,jQuery),EPUBJS.Reader=function(a,b){var c,d,e,f=this,g=$("#viewer"),h=window.location.search;this.settings=EPUBJS.core.defaults(b||{},{bookPath:a,restore:!1,reload:!1,bookmarks:void 0,annotations:void 0,contained:void 0,bookKey:void 0,styles:void 0,sidebarReflow:!1,generatePagination:!1,history:!0}),h&&(e=h.slice(1).split("&"),e.forEach(function(a){var b=a.split("="),c=b[0],d=b[1]||"";f.settings[c]=decodeURIComponent(d)})),this.setBookKey(this.settings.bookPath),this.settings.restore&&this.isSaved()&&this.applySavedSettings(),this.settings.styles=this.settings.styles||{fontSize:"100%"},this.book=c=new ePub(this.settings.bookPath,this.settings),this.offline=!1,this.sidebarOpen=!1,this.settings.bookmarks||(this.settings.bookmarks=[]),this.settings.annotations||(this.settings.annotations=[]),this.settings.generatePagination&&c.generatePagination(g.width(),g.height()),this.rendition=c.renderTo("viewer",{ignoreClass:"annotator-hl",width:"100%",height:"100%"}),this.settings.previousLocationCfi?this.displayed=this.rendition.display(this.settings.previousLocationCfi):this.displayed=this.rendition.display(),c.ready.then(function(){f.ReaderController=EPUBJS.reader.ReaderController.call(f,c),f.SettingsController=EPUBJS.reader.SettingsController.call(f,c),f.ControlsController=EPUBJS.reader.ControlsController.call(f,c),f.SidebarController=EPUBJS.reader.SidebarController.call(f,c),f.BookmarksController=EPUBJS.reader.BookmarksController.call(f,c),f.NotesController=EPUBJS.reader.NotesController.call(f,c),window.addEventListener("hashchange",this.hashChanged.bind(this),!1),document.addEventListener("keydown",this.adjustFontSize.bind(this),!1),this.rendition.on("keydown",this.adjustFontSize.bind(this)),this.rendition.on("keydown",f.ReaderController.arrowKeys.bind(this)),this.rendition.on("selected",this.selectedRange.bind(this))}.bind(this)).then(function(){f.ReaderController.hideLoader()}.bind(this));for(d in EPUBJS.reader.plugins)EPUBJS.reader.plugins.hasOwnProperty(d)&&(f[d]=EPUBJS.reader.plugins[d].call(f,c));return c.loaded.metadata.then(function(a){f.MetaController=EPUBJS.reader.MetaController.call(f,a)}),c.loaded.navigation.then(function(a){f.TocController=EPUBJS.reader.TocController.call(f,a)}),window.addEventListener("beforeunload",this.unload.bind(this),!1),this},EPUBJS.Reader.prototype.adjustFontSize=function(a){var b,c=2,d=a.ctrlKey||a.metaKey;this.settings.styles&&(this.settings.styles.fontSize||(this.settings.styles.fontSize="100%"),b=parseInt(this.settings.styles.fontSize.slice(0,-1)),d&&187==a.keyCode&&(a.preventDefault(),this.book.setStyle("fontSize",b+c+"%")),d&&189==a.keyCode&&(a.preventDefault(),this.book.setStyle("fontSize",b-c+"%")),d&&48==a.keyCode&&(a.preventDefault(),this.book.setStyle("fontSize","100%")))},EPUBJS.Reader.prototype.addBookmark=function(a){this.isBookmarked(a)>-1||(this.settings.bookmarks.push(a),this.trigger("reader:bookmarked",a))},EPUBJS.Reader.prototype.removeBookmark=function(a){var b=this.isBookmarked(a);-1!==b&&(this.settings.bookmarks.splice(b,1),this.trigger("reader:unbookmarked",b))},EPUBJS.Reader.prototype.isBookmarked=function(a){return this.settings.bookmarks.indexOf(a)},EPUBJS.Reader.prototype.clearBookmarks=function(){this.settings.bookmarks=[]},EPUBJS.Reader.prototype.addNote=function(a){this.settings.annotations.push(a)},EPUBJS.Reader.prototype.removeNote=function(a){var b=this.settings.annotations.indexOf(a);-1!==b&&delete this.settings.annotations[b]},EPUBJS.Reader.prototype.clearNotes=function(){this.settings.annotations=[]},EPUBJS.Reader.prototype.setBookKey=function(a){return this.settings.bookKey||(this.settings.bookKey="epubjsreader:"+EPUBJS.VERSION+":"+window.location.host+":"+a),this.settings.bookKey},EPUBJS.Reader.prototype.isSaved=function(a){return!!localStorage&&null!==localStorage.getItem(this.settings.bookKey)},EPUBJS.Reader.prototype.removeSavedSettings=function(){if(!localStorage)return!1;localStorage.removeItem(this.settings.bookKey)},EPUBJS.Reader.prototype.applySavedSettings=function(){var a;if(!localStorage)return!1;try{a=JSON.parse(localStorage.getItem(this.settings.bookKey))}catch(a){return!1}return!!a&&(a.styles&&(this.settings.styles=EPUBJS.core.defaults(this.settings.styles||{},a.styles)),this.settings=EPUBJS.core.defaults(this.settings,a),!0)},EPUBJS.Reader.prototype.saveSettings=function(){if(this.book&&(this.settings.previousLocationCfi=this.rendition.currentLocation().start.cfi),!localStorage)return!1;localStorage.setItem(this.settings.bookKey,JSON.stringify(this.settings))},EPUBJS.Reader.prototype.unload=function(){this.settings.restore&&localStorage&&this.saveSettings()},EPUBJS.Reader.prototype.hashChanged=function(){var a=window.location.hash.slice(1);this.rendition.display(a)},EPUBJS.Reader.prototype.selectedRange=function(a){var b="#"+a;this.settings.history&&window.location.hash!=b&&(history.pushState({},"",b),this.currentLocationCfi=a)},RSVP.EventTarget.mixin(EPUBJS.Reader.prototype),EPUBJS.reader.BookmarksController=function(){var a=this.book,b=this.rendition,c=$("#bookmarksView"),d=c.find("#bookmarks"),e=document.createDocumentFragment(),f=function(){c.show()},g=function(){c.hide()},h=0,i=function(c){var d=document.createElement("li"),e=document.createElement("a");d.id="bookmark-"+h,d.classList.add("list_item");var f,g=a.spine.get(c);return g.index in a.navigation.toc?(f=a.navigation.toc[g.index],e.textContent=f.label):e.textContent=c,e.href=c,e.classList.add("bookmark_link"),e.addEventListener("click",function(a){var c=this.getAttribute("href");b.display(c),a.preventDefault()},!1),d.appendChild(e),h++,d};return this.settings.bookmarks.forEach(function(a){var b=i(a);e.appendChild(b)}),d.append(e),this.on("reader:bookmarked",function(a){var b=i(a);d.append(b)}),this.on("reader:unbookmarked",function(a){$("#bookmark-"+a).remove()}),{show:f,hide:g}},EPUBJS.reader.ControlsController=function(a){var b=this,c=this.rendition,d=($("#store"),$("#fullscreen")),e=($("#fullscreenicon"),$("#cancelfullscreenicon"),$("#slider")),f=($("#main"),$("#sidebar"),$("#setting")),g=$("#bookmark");return e.on("click",function(){b.sidebarOpen?(b.SidebarController.hide(),e.addClass("icon-menu"),e.removeClass("icon-right")):(b.SidebarController.show(),e.addClass("icon-right"),e.removeClass("icon-menu"))}),"undefined"!=typeof screenfull&&(d.on("click",function(){screenfull.toggle($("#container")[0])}),screenfull.raw&&document.addEventListener(screenfull.raw.fullscreenchange,function(){fullscreen=screenfull.isFullscreen,fullscreen?d.addClass("icon-resize-small").removeClass("icon-resize-full"):d.addClass("icon-resize-full").removeClass("icon-resize-small")})),f.on("click",function(){b.SettingsController.show()}),g.on("click",function(){var a=b.rendition.currentLocation().start.cfi;-1===b.isBookmarked(a)?(b.addBookmark(a),g.addClass("icon-bookmark").removeClass("icon-bookmark-empty")):(b.removeBookmark(a),g.removeClass("icon-bookmark").addClass("icon-bookmark-empty"))}),c.on("relocated",function(a){var c=a.start.cfi,d="#"+c;-1===b.isBookmarked(c)?g.removeClass("icon-bookmark").addClass("icon-bookmark-empty"):g.addClass("icon-bookmark").removeClass("icon-bookmark-empty"),b.currentLocationCfi=c,b.settings.history&&window.location.hash!=d&&history.pushState({},"",d)}),{}},EPUBJS.reader.MetaController=function(a){var b=a.title,c=a.creator,d=$("#book-title"),e=$("#chapter-title"),f=$("#title-seperator");document.title=b+" – "+c,d.html(b),e.html(c),f.show()},EPUBJS.reader.NotesController=function(){var a=this.book,b=this.rendition,c=this,d=$("#notesView"),e=$("#notes"),f=$("#note-text"),g=$("#note-anchor"),h=c.settings.annotations,i=a.renderer,j=[],k=new ePub.CFI,l=function(){d.show()},m=function(){d.hide()},n=function(d){var e,h,i,j,l,m=a.renderer.doc;if(m.caretPositionFromPoint?(e=m.caretPositionFromPoint(d.clientX,d.clientY),h=e.offsetNode,i=e.offset):m.caretRangeFromPoint&&(e=m.caretRangeFromPoint(d.clientX,d.clientY),h=e.startContainer,i=e.startOffset),3!==h.nodeType)for(var q=0;q<h.childNodes.length;q++)if(3==h.childNodes[q].nodeType){h=h.childNodes[q];break}i=h.textContent.indexOf(".",i),-1===i?i=h.length:i+=1,j=k.generateCfiFromTextNode(h,i,a.renderer.currentChapter.cfiBase),l={annotatedAt:new Date,anchor:j,body:f.val()},c.addNote(l),o(l),p(l),f.val(""),g.text("Attach"),f.prop("disabled",!1),b.off("click",n)},o=function(a){var c=document.createElement("li"),d=document.createElement("a");c.innerHTML=a.body,d.innerHTML=" context »",d.href="#"+a.anchor,d.onclick=function(){return b.display(a.anchor),!1},c.appendChild(d),e.append(c)},p=function(b){var c=a.renderer.doc,d=document.createElement("span"),e=document.createElement("a");d.classList.add("footnotesuperscript","reader_generated"),d.style.verticalAlign="super",d.style.fontSize=".75em",d.style.lineHeight="1em",e.style.padding="2px",e.style.backgroundColor="#fffa96",e.style.borderRadius="5px",e.style.cursor="pointer",d.id="note-"+EPUBJS.core.uuid(),e.innerHTML=h.indexOf(b)+1+"[Reader]",d.appendChild(e),k.addMarker(b.anchor,c,d),q(d,b.body)},q=function(a,d){var e=a.id,f=function(){var c,f,l,m,n=i.height,o=i.width,p=225;j[e]||(j[e]=document.createElement("div"),j[e].setAttribute("class","popup"),pop_content=document.createElement("div"),j[e].appendChild(pop_content),pop_content.innerHTML=d,pop_content.setAttribute("class","pop_content"),i.render.document.body.appendChild(j[e]),j[e].addEventListener("mouseover",g,!1),j[e].addEventListener("mouseout",h,!1),b.on("locationChanged",k,this),b.on("locationChanged",h,this)),c=j[e],f=a.getBoundingClientRect(),l=f.left,m=f.top,c.classList.add("show"),popRect=c.getBoundingClientRect(),c.style.left=l-popRect.width/2+"px",c.style.top=m+"px",p>n/2.5&&(p=n/2.5,pop_content.style.maxHeight=p+"px"),popRect.height+m>=n-25?(c.style.top=m-popRect.height+"px",c.classList.add("above")):c.classList.remove("above"),l-popRect.width<=0?(c.style.left=l+"px",c.classList.add("left")):c.classList.remove("left"),l+popRect.width/2>=o?(c.style.left=l-300+"px",popRect=c.getBoundingClientRect(),c.style.left=l-popRect.width+"px",popRect.height+m>=n-25?(c.style.top=m-popRect.height+"px",c.classList.add("above")):c.classList.remove("above"),c.classList.add("right")):c.classList.remove("right")},g=function(){j[e].classList.add("on")},h=function(){j[e].classList.remove("on")},k=function(){setTimeout(function(){j[e].classList.remove("show")},100)},m=function(){c.ReaderController.slideOut(),l()};a.addEventListener("mouseover",f,!1),a.addEventListener("mouseout",k,!1),a.addEventListener("click",m,!1)};return g.on("click",function(a){g.text("Cancel"),f.prop("disabled","true"),b.on("click",n)}),h.forEach(function(a){o(a)}),{show:l,hide:m}},EPUBJS.reader.ReaderController=function(a){var b=$("#main"),c=$("#divider"),d=$("#loader"),e=$("#next"),f=$("#prev"),g=this,a=this.book,h=this.rendition,i=function(){h.currentLocation().start.cfi;g.settings.sidebarReflow?(b.removeClass("single"),b.one("transitionend",function(){h.resize()})):b.removeClass("closed")},j=function(){var a=h.currentLocation();if(a){a.start.cfi;g.settings.sidebarReflow?(b.addClass("single"),b.one("transitionend",function(){h.resize()})):b.addClass("closed")}},k=function(){d.show(),n()},l=function(){d.hide()},m=function(){c.addClass("show")},n=function(){c.removeClass("show")},o=!1,p=function(b){37==b.keyCode&&("rtl"===a.package.metadata.direction?h.next():h.prev(),f.addClass("active"),o=!0,setTimeout(function(){o=!1,f.removeClass("active")},100),b.preventDefault()),39==b.keyCode&&("rtl"===a.package.metadata.direction?h.prev():h.next(),e.addClass("active"),o=!0,setTimeout(function(){o=!1,e.removeClass("active")},100),b.preventDefault())};return document.addEventListener("keydown",p,!1),e.on("click",function(b){"rtl"===a.package.metadata.direction?h.prev():h.next(),b.preventDefault()}),f.on("click",function(b){"rtl"===a.package.metadata.direction?h.next():h.prev(),b.preventDefault()}),h.on("layout",function(a){!0===a.spread?m():n()}),h.on("relocated",function(a){a.atStart&&f.addClass("disabled"),a.atEnd&&e.addClass("disabled")}),{slideOut:j,slideIn:i,showLoader:k,hideLoader:l,showDivider:m,hideDivider:n,arrowKeys:p}},EPUBJS.reader.SettingsController=function(){var a=(this.book,this),b=$("#settings-modal"),c=$(".overlay"),d=function(){b.addClass("md-show")},e=function(){b.removeClass("md-show")};return $("#sidebarReflow").on("click",function(){a.settings.sidebarReflow=!a.settings.sidebarReflow}),b.find(".closer").on("click",function(){e()}),c.on("click",function(){e()}),{show:d,hide:e}},EPUBJS.reader.SidebarController=function(a){var b=this,c=$("#sidebar"),d=$("#panels"),e="Toc",f=function(a){var c=a+"Controller";e!=a&&void 0!==b[c]&&(b[e+"Controller"].hide(),b[c].show(),e=a,d.find(".active").removeClass("active"),d.find("#show-"+a).addClass("active"))},g=function(){return e},h=function(){b.sidebarOpen=!0,b.ReaderController.slideOut(),c.addClass("open")},i=function(){b.sidebarOpen=!1,b.ReaderController.slideIn(),c.removeClass("open")};return d.find(".show_view").on("click",function(a){var b=$(this).data("view");f(b),a.preventDefault()}),{show:h,hide:i,getActivePanel:g,changePanelTo:f}},EPUBJS.reader.TocController=function(a){var b=(this.book,this.rendition),c=$("#tocView"),d=document.createDocumentFragment(),e=!1,f=function(a,b){var c=document.createElement("ul");return b||(b=1),a.forEach(function(a){var d=document.createElement("li"),e=document.createElement("a");toggle=document.createElement("a");var g;d.id="toc-"+a.id,d.classList.add("list_item"),e.textContent=a.label,e.href=a.href,e.classList.add("toc_link"),d.appendChild(e),a.subitems&&a.subitems.length>0&&(b++,g=f(a.subitems,b),toggle.classList.add("toc_toggle"),d.insertBefore(toggle,e),d.appendChild(g)),c.appendChild(d)}),c},g=function(){c.show()},h=function(){c.hide()},i=function(a){var b=a.id,d=c.find("#toc-"+b),f=c.find(".currentChapter");c.find(".openChapter");d.length&&(d!=f&&d.has(e).length>0&&f.removeClass("currentChapter"),d.addClass("currentChapter"),d.parents("li").addClass("openChapter"))};b.on("renderered",i);var j=f(a);return d.appendChild(j),c.append(d),c.find(".toc_link").on("click",function(a){var d=this.getAttribute("href");a.preventDefault(),b.display(d),c.find(".currentChapter").addClass("openChapter").removeClass("currentChapter"),$(this).parent("li").addClass("currentChapter")}),c.find(".toc_toggle").on("click",function(a){var b=$(this).parent("li"),c=b.hasClass("openChapter");a.preventDefault(),c?b.removeClass("openChapter"):b.addClass("openChapter")}),{show:g,hide:h}}; | zikongli-jingdian-taozhuang-x3 | /zikongli-jingdian-taozhuang-x3-2022.10.15.0.tar.gz/zikongli-jingdian-taozhuang-x3-2022.10.15.0/ZikongliJingdianTaozhuangX3/js/reader.min.js | reader.min.js |
EPUBJS.Hooks.register("beforeChapterDisplay").endnotes=function(a,b){var c=b.contents.querySelectorAll("a[href]"),d=Array.prototype.slice.call(c),e=EPUBJS.core.folder(location.pathname),f=(EPUBJS.cssPath,{});EPUBJS.core.addCss(EPUBJS.cssPath+"popup.css",!1,b.render.document.head),d.forEach(function(a){function c(){var c,h,n=b.height,o=b.width,p=225;m||(c=j.cloneNode(!0),m=c.querySelector("p")),f[i]||(f[i]=document.createElement("div"),f[i].setAttribute("class","popup"),pop_content=document.createElement("div"),f[i].appendChild(pop_content),pop_content.appendChild(m),pop_content.setAttribute("class","pop_content"),b.render.document.body.appendChild(f[i]),f[i].addEventListener("mouseover",d,!1),f[i].addEventListener("mouseout",e,!1),b.on("renderer:pageChanged",g,this),b.on("renderer:pageChanged",e,this)),c=f[i],h=a.getBoundingClientRect(),k=h.left,l=h.top,c.classList.add("show"),popRect=c.getBoundingClientRect(),c.style.left=k-popRect.width/2+"px",c.style.top=l+"px",p>n/2.5&&(p=n/2.5,pop_content.style.maxHeight=p+"px"),popRect.height+l>=n-25?(c.style.top=l-popRect.height+"px",c.classList.add("above")):c.classList.remove("above"),k-popRect.width<=0?(c.style.left=k+"px",c.classList.add("left")):c.classList.remove("left"),k+popRect.width/2>=o?(c.style.left=k-300+"px",popRect=c.getBoundingClientRect(),c.style.left=k-popRect.width+"px",popRect.height+l>=n-25?(c.style.top=l-popRect.height+"px",c.classList.add("above")):c.classList.remove("above"),c.classList.add("right")):c.classList.remove("right")}function d(){f[i].classList.add("on")}function e(){f[i].classList.remove("on")}function g(){setTimeout(function(){f[i].classList.remove("show")},100)}var h,i,j,k,l,m;"noteref"==a.getAttribute("epub:type")&&(h=a.getAttribute("href"),i=h.replace("#",""),j=b.render.document.getElementById(i),a.addEventListener("mouseover",c,!1),a.addEventListener("mouseout",g,!1))}),a&&a()},EPUBJS.Hooks.register("beforeChapterDisplay").mathml=function(a,b){if(b.currentChapter.manifestProperties.indexOf("mathml")!==-1){b.render.iframe.contentWindow.mathmlCallback=a;var c=document.createElement("script");c.type="text/x-mathjax-config",c.innerHTML=' MathJax.Hub.Register.StartupHook("End",function () { window.mathmlCallback(); }); MathJax.Hub.Config({jax: ["input/TeX","input/MathML","output/SVG"],extensions: ["tex2jax.js","mml2jax.js","MathEvents.js"],TeX: {extensions: ["noErrors.js","noUndefined.js","autoload-all.js"]},MathMenu: {showRenderer: false},menuSettings: {zoom: "Click"},messageStyle: "none"}); ',b.doc.body.appendChild(c),EPUBJS.core.addScript("http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML",null,b.doc.head)}else a&&a()},EPUBJS.Hooks.register("beforeChapterDisplay").smartimages=function(a,b){var c=b.contents.querySelectorAll("img"),d=Array.prototype.slice.call(c),e=b.height;if("reflowable"!=b.layoutSettings.layout)return void a();d.forEach(function(a){var c=function(){var c,d=a.getBoundingClientRect(),f=d.height,g=d.top,h=a.getAttribute("data-height"),i=h||f,j=Number(getComputedStyle(a,"").fontSize.match(/(\d*(\.\d*)?)px/)[1]),k=j?j/2:0;e=b.contents.clientHeight,g<0&&(g=0),a.style.maxWidth="100%",i+g>=e?(g<e/2?(c=e-g-k,a.style.maxHeight=c+"px",a.style.width="auto"):(i>e&&(a.style.maxHeight=e+"px",a.style.width="auto",d=a.getBoundingClientRect(),i=d.height),a.style.display="block",a.style.WebkitColumnBreakBefore="always",a.style.breakBefore="column"),a.setAttribute("data-height",c)):(a.style.removeProperty("max-height"),a.style.removeProperty("margin-top"))},d=function(){b.off("renderer:resized",c),b.off("renderer:chapterUnload",this)};a.addEventListener("load",c,!1),b.on("renderer:resized",c),b.on("renderer:chapterUnload",d),c()}),a&&a()},EPUBJS.Hooks.register("beforeChapterDisplay").transculsions=function(a,b){var c=b.contents.querySelectorAll("[transclusion]");Array.prototype.slice.call(c).forEach(function(a){function c(){j=g,k=h,j>chapter.colWidth&&(d=chapter.colWidth/j,j=chapter.colWidth,k*=d),f.width=j,f.height=k}var d,e=a.getAttribute("ref"),f=document.createElement("iframe"),g=a.getAttribute("width"),h=a.getAttribute("height"),i=a.parentNode,j=g,k=h;c(),b.listenUntil("renderer:resized","renderer:chapterUnloaded",c),f.src=e,i.replaceChild(f,a)}),a&&a()}; | zikongli-jingdian-taozhuang-x3 | /zikongli-jingdian-taozhuang-x3-2022.10.15.0.tar.gz/zikongli-jingdian-taozhuang-x3-2022.10.15.0/ZikongliJingdianTaozhuangX3/js/hooks.min.js | hooks.min.js |
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) :
typeof define === 'function' && define.amd ? define(['exports'], factory) :
(factory((global.RSVP = global.RSVP || {})));
}(this, (function (exports) { 'use strict';
function indexOf(callbacks, callback) {
for (var i = 0, l = callbacks.length; i < l; i++) {
if (callbacks[i] === callback) {
return i;
}
}
return -1;
}
function callbacksFor(object) {
var callbacks = object._promiseCallbacks;
if (!callbacks) {
callbacks = object._promiseCallbacks = {};
}
return callbacks;
}
/**
@class RSVP.EventTarget
*/
var EventTarget = {
/**
`RSVP.EventTarget.mixin` extends an object with EventTarget methods. For
Example:
```javascript
let object = {};
RSVP.EventTarget.mixin(object);
object.on('finished', function(event) {
// handle event
});
object.trigger('finished', { detail: value });
```
`EventTarget.mixin` also works with prototypes:
```javascript
let Person = function() {};
RSVP.EventTarget.mixin(Person.prototype);
let yehuda = new Person();
let tom = new Person();
yehuda.on('poke', function(event) {
console.log('Yehuda says OW');
});
tom.on('poke', function(event) {
console.log('Tom says OW');
});
yehuda.trigger('poke');
tom.trigger('poke');
```
@method mixin
@for RSVP.EventTarget
@private
@param {Object} object object to extend with EventTarget methods
*/
mixin: function (object) {
object['on'] = this['on'];
object['off'] = this['off'];
object['trigger'] = this['trigger'];
object._promiseCallbacks = undefined;
return object;
},
/**
Registers a callback to be executed when `eventName` is triggered
```javascript
object.on('event', function(eventInfo){
// handle the event
});
object.trigger('event');
```
@method on
@for RSVP.EventTarget
@private
@param {String} eventName name of the event to listen for
@param {Function} callback function to be called when the event is triggered.
*/
on: function (eventName, callback) {
if (typeof callback !== 'function') {
throw new TypeError('Callback must be a function');
}
var allCallbacks = callbacksFor(this),
callbacks = void 0;
callbacks = allCallbacks[eventName];
if (!callbacks) {
callbacks = allCallbacks[eventName] = [];
}
if (indexOf(callbacks, callback) === -1) {
callbacks.push(callback);
}
},
/**
You can use `off` to stop firing a particular callback for an event:
```javascript
function doStuff() { // do stuff! }
object.on('stuff', doStuff);
object.trigger('stuff'); // doStuff will be called
// Unregister ONLY the doStuff callback
object.off('stuff', doStuff);
object.trigger('stuff'); // doStuff will NOT be called
```
If you don't pass a `callback` argument to `off`, ALL callbacks for the
event will not be executed when the event fires. For example:
```javascript
let callback1 = function(){};
let callback2 = function(){};
object.on('stuff', callback1);
object.on('stuff', callback2);
object.trigger('stuff'); // callback1 and callback2 will be executed.
object.off('stuff');
object.trigger('stuff'); // callback1 and callback2 will not be executed!
```
@method off
@for RSVP.EventTarget
@private
@param {String} eventName event to stop listening to
@param {Function} callback optional argument. If given, only the function
given will be removed from the event's callback queue. If no `callback`
argument is given, all callbacks will be removed from the event's callback
queue.
*/
off: function (eventName, callback) {
var allCallbacks = callbacksFor(this),
callbacks = void 0,
index = void 0;
if (!callback) {
allCallbacks[eventName] = [];
return;
}
callbacks = allCallbacks[eventName];
index = indexOf(callbacks, callback);
if (index !== -1) {
callbacks.splice(index, 1);
}
},
/**
Use `trigger` to fire custom events. For example:
```javascript
object.on('foo', function(){
console.log('foo event happened!');
});
object.trigger('foo');
// 'foo event happened!' logged to the console
```
You can also pass a value as a second argument to `trigger` that will be
passed as an argument to all event listeners for the event:
```javascript
object.on('foo', function(value){
console.log(value.name);
});
object.trigger('foo', { name: 'bar' });
// 'bar' logged to the console
```
@method trigger
@for RSVP.EventTarget
@private
@param {String} eventName name of the event to be triggered
@param {*} options optional value to be passed to any event handlers for
the given `eventName`
*/
trigger: function (eventName, options, label) {
var allCallbacks = callbacksFor(this),
callbacks = void 0,
callback = void 0;
if (callbacks = allCallbacks[eventName]) {
// Don't cache the callbacks.length since it may grow
for (var i = 0; i < callbacks.length; i++) {
callback = callbacks[i];
callback(options, label);
}
}
}
};
var config = {
instrument: false
};
EventTarget['mixin'](config);
function configure(name, value) {
if (arguments.length === 2) {
config[name] = value;
} else {
return config[name];
}
}
function objectOrFunction(x) {
var type = typeof x;
return x !== null && (type === 'object' || type === 'function');
}
function isFunction(x) {
return typeof x === 'function';
}
function isObject(x) {
return x !== null && typeof x === 'object';
}
function isMaybeThenable(x) {
return x !== null && typeof x === 'object';
}
var _isArray = void 0;
if (Array.isArray) {
_isArray = Array.isArray;
} else {
_isArray = function (x) {
return Object.prototype.toString.call(x) === '[object Array]';
};
}
var isArray = _isArray;
// Date.now is not available in browsers < IE9
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/now#Compatibility
var now = Date.now || function () {
return new Date().getTime();
};
var queue = [];
function scheduleFlush() {
setTimeout(function () {
for (var i = 0; i < queue.length; i++) {
var entry = queue[i];
var payload = entry.payload;
payload.guid = payload.key + payload.id;
payload.childGuid = payload.key + payload.childId;
if (payload.error) {
payload.stack = payload.error.stack;
}
config['trigger'](entry.name, entry.payload);
}
queue.length = 0;
}, 50);
}
function instrument(eventName, promise, child) {
if (1 === queue.push({
name: eventName,
payload: {
key: promise._guidKey,
id: promise._id,
eventName: eventName,
detail: promise._result,
childId: child && child._id,
label: promise._label,
timeStamp: now(),
error: config["instrument-with-stack"] ? new Error(promise._label) : null
} })) {
scheduleFlush();
}
}
/**
`RSVP.Promise.resolve` returns a promise that will become resolved with the
passed `value`. It is shorthand for the following:
```javascript
let promise = new RSVP.Promise(function(resolve, reject){
resolve(1);
});
promise.then(function(value){
// value === 1
});
```
Instead of writing the above, your code now simply becomes the following:
```javascript
let promise = RSVP.Promise.resolve(1);
promise.then(function(value){
// value === 1
});
```
@method resolve
@static
@param {*} object value that the returned promise will be resolved with
@param {String} label optional string for identifying the returned promise.
Useful for tooling.
@return {Promise} a promise that will become fulfilled with the given
`value`
*/
function resolve$1(object, label) {
/*jshint validthis:true */
var Constructor = this;
if (object && typeof object === 'object' && object.constructor === Constructor) {
return object;
}
var promise = new Constructor(noop, label);
resolve(promise, object);
return promise;
}
function withOwnPromise() {
return new TypeError('A promises callback cannot return that same promise.');
}
function noop() {}
var PENDING = void 0;
var FULFILLED = 1;
var REJECTED = 2;
var GET_THEN_ERROR = new ErrorObject();
function getThen(promise) {
try {
return promise.then;
} catch (error) {
GET_THEN_ERROR.error = error;
return GET_THEN_ERROR;
}
}
function tryThen(then$$1, value, fulfillmentHandler, rejectionHandler) {
try {
then$$1.call(value, fulfillmentHandler, rejectionHandler);
} catch (e) {
return e;
}
}
function handleForeignThenable(promise, thenable, then$$1) {
config.async(function (promise) {
var sealed = false;
var error = tryThen(then$$1, thenable, function (value) {
if (sealed) {
return;
}
sealed = true;
if (thenable !== value) {
resolve(promise, value, undefined);
} else {
fulfill(promise, value);
}
}, function (reason) {
if (sealed) {
return;
}
sealed = true;
reject(promise, reason);
}, 'Settle: ' + (promise._label || ' unknown promise'));
if (!sealed && error) {
sealed = true;
reject(promise, error);
}
}, promise);
}
function handleOwnThenable(promise, thenable) {
if (thenable._state === FULFILLED) {
fulfill(promise, thenable._result);
} else if (thenable._state === REJECTED) {
thenable._onError = null;
reject(promise, thenable._result);
} else {
subscribe(thenable, undefined, function (value) {
if (thenable !== value) {
resolve(promise, value, undefined);
} else {
fulfill(promise, value);
}
}, function (reason) {
return reject(promise, reason);
});
}
}
function handleMaybeThenable(promise, maybeThenable, then$$1) {
var isOwnThenable = maybeThenable.constructor === promise.constructor && then$$1 === then && promise.constructor.resolve === resolve$1;
if (isOwnThenable) {
handleOwnThenable(promise, maybeThenable);
} else if (then$$1 === GET_THEN_ERROR) {
reject(promise, GET_THEN_ERROR.error);
GET_THEN_ERROR.error = null;
} else if (isFunction(then$$1)) {
handleForeignThenable(promise, maybeThenable, then$$1);
} else {
fulfill(promise, maybeThenable);
}
}
function resolve(promise, value) {
if (promise === value) {
fulfill(promise, value);
} else if (objectOrFunction(value)) {
handleMaybeThenable(promise, value, getThen(value));
} else {
fulfill(promise, value);
}
}
function publishRejection(promise) {
if (promise._onError) {
promise._onError(promise._result);
}
publish(promise);
}
function fulfill(promise, value) {
if (promise._state !== PENDING) {
return;
}
promise._result = value;
promise._state = FULFILLED;
if (promise._subscribers.length === 0) {
if (config.instrument) {
instrument('fulfilled', promise);
}
} else {
config.async(publish, promise);
}
}
function reject(promise, reason) {
if (promise._state !== PENDING) {
return;
}
promise._state = REJECTED;
promise._result = reason;
config.async(publishRejection, promise);
}
function subscribe(parent, child, onFulfillment, onRejection) {
var subscribers = parent._subscribers;
var length = subscribers.length;
parent._onError = null;
subscribers[length] = child;
subscribers[length + FULFILLED] = onFulfillment;
subscribers[length + REJECTED] = onRejection;
if (length === 0 && parent._state) {
config.async(publish, parent);
}
}
function publish(promise) {
var subscribers = promise._subscribers;
var settled = promise._state;
if (config.instrument) {
instrument(settled === FULFILLED ? 'fulfilled' : 'rejected', promise);
}
if (subscribers.length === 0) {
return;
}
var child = void 0,
callback = void 0,
result = promise._result;
for (var i = 0; i < subscribers.length; i += 3) {
child = subscribers[i];
callback = subscribers[i + settled];
if (child) {
invokeCallback(settled, child, callback, result);
} else {
callback(result);
}
}
promise._subscribers.length = 0;
}
function ErrorObject() {
this.error = null;
}
var TRY_CATCH_ERROR = new ErrorObject();
function tryCatch(callback, result) {
try {
return callback(result);
} catch (e) {
TRY_CATCH_ERROR.error = e;
return TRY_CATCH_ERROR;
}
}
function invokeCallback(state, promise, callback, result) {
var hasCallback = isFunction(callback);
var value = void 0,
error = void 0;
if (hasCallback) {
value = tryCatch(callback, result);
if (value === TRY_CATCH_ERROR) {
error = value.error;
value.error = null; // release
} else if (value === promise) {
reject(promise, withOwnPromise());
return;
}
} else {
value = result;
}
if (promise._state !== PENDING) {
// noop
} else if (hasCallback && error === undefined) {
resolve(promise, value);
} else if (error !== undefined) {
reject(promise, error);
} else if (state === FULFILLED) {
fulfill(promise, value);
} else if (state === REJECTED) {
reject(promise, value);
}
}
function initializePromise(promise, resolver) {
var resolved = false;
try {
resolver(function (value) {
if (resolved) {
return;
}
resolved = true;
resolve(promise, value);
}, function (reason) {
if (resolved) {
return;
}
resolved = true;
reject(promise, reason);
});
} catch (e) {
reject(promise, e);
}
}
function then(onFulfillment, onRejection, label) {
var parent = this;
var state = parent._state;
if (state === FULFILLED && !onFulfillment || state === REJECTED && !onRejection) {
config.instrument && instrument('chained', parent, parent);
return parent;
}
parent._onError = null;
var child = new parent.constructor(noop, label);
var result = parent._result;
config.instrument && instrument('chained', parent, child);
if (state === PENDING) {
subscribe(parent, child, onFulfillment, onRejection);
} else {
var callback = state === FULFILLED ? onFulfillment : onRejection;
config.async(function () {
return invokeCallback(state, child, callback, result);
});
}
return child;
}
var Enumerator = function () {
function Enumerator(Constructor, input, abortOnReject, label) {
this._instanceConstructor = Constructor;
this.promise = new Constructor(noop, label);
this._abortOnReject = abortOnReject;
this._init.apply(this, arguments);
}
Enumerator.prototype._init = function _init(Constructor, input) {
var len = input.length || 0;
this.length = len;
this._remaining = len;
this._result = new Array(len);
this._enumerate(input);
if (this._remaining === 0) {
fulfill(this.promise, this._result);
}
};
Enumerator.prototype._enumerate = function _enumerate(input) {
var length = this.length;
var promise = this.promise;
for (var i = 0; promise._state === PENDING && i < length; i++) {
this._eachEntry(input[i], i);
}
};
Enumerator.prototype._settleMaybeThenable = function _settleMaybeThenable(entry, i) {
var c = this._instanceConstructor;
var resolve$$1 = c.resolve;
if (resolve$$1 === resolve$1) {
var then$$1 = getThen(entry);
if (then$$1 === then && entry._state !== PENDING) {
entry._onError = null;
this._settledAt(entry._state, i, entry._result);
} else if (typeof then$$1 !== 'function') {
this._remaining--;
this._result[i] = this._makeResult(FULFILLED, i, entry);
} else if (c === Promise) {
var promise = new c(noop);
handleMaybeThenable(promise, entry, then$$1);
this._willSettleAt(promise, i);
} else {
this._willSettleAt(new c(function (resolve$$1) {
return resolve$$1(entry);
}), i);
}
} else {
this._willSettleAt(resolve$$1(entry), i);
}
};
Enumerator.prototype._eachEntry = function _eachEntry(entry, i) {
if (isMaybeThenable(entry)) {
this._settleMaybeThenable(entry, i);
} else {
this._remaining--;
this._result[i] = this._makeResult(FULFILLED, i, entry);
}
};
Enumerator.prototype._settledAt = function _settledAt(state, i, value) {
var promise = this.promise;
if (promise._state === PENDING) {
if (this._abortOnReject && state === REJECTED) {
reject(promise, value);
} else {
this._remaining--;
this._result[i] = this._makeResult(state, i, value);
if (this._remaining === 0) {
fulfill(promise, this._result);
}
}
}
};
Enumerator.prototype._makeResult = function _makeResult(state, i, value) {
return value;
};
Enumerator.prototype._willSettleAt = function _willSettleAt(promise, i) {
var enumerator = this;
subscribe(promise, undefined, function (value) {
return enumerator._settledAt(FULFILLED, i, value);
}, function (reason) {
return enumerator._settledAt(REJECTED, i, reason);
});
};
return Enumerator;
}();
function makeSettledResult(state, position, value) {
if (state === FULFILLED) {
return {
state: 'fulfilled',
value: value
};
} else {
return {
state: 'rejected',
reason: value
};
}
}
/**
`RSVP.Promise.all` accepts an array of promises, and returns a new promise which
is fulfilled with an array of fulfillment values for the passed promises, or
rejected with the reason of the first passed promise to be rejected. It casts all
elements of the passed iterable to promises as it runs this algorithm.
Example:
```javascript
let promise1 = RSVP.resolve(1);
let promise2 = RSVP.resolve(2);
let promise3 = RSVP.resolve(3);
let promises = [ promise1, promise2, promise3 ];
RSVP.Promise.all(promises).then(function(array){
// The array here would be [ 1, 2, 3 ];
});
```
If any of the `promises` given to `RSVP.all` are rejected, the first promise
that is rejected will be given as an argument to the returned promises's
rejection handler. For example:
Example:
```javascript
let promise1 = RSVP.resolve(1);
let promise2 = RSVP.reject(new Error("2"));
let promise3 = RSVP.reject(new Error("3"));
let promises = [ promise1, promise2, promise3 ];
RSVP.Promise.all(promises).then(function(array){
// Code here never runs because there are rejected promises!
}, function(error) {
// error.message === "2"
});
```
@method all
@static
@param {Array} entries array of promises
@param {String} label optional string for labeling the promise.
Useful for tooling.
@return {Promise} promise that is fulfilled when all `promises` have been
fulfilled, or rejected if any of them become rejected.
@static
*/
function all(entries, label) {
if (!isArray(entries)) {
return this.reject(new TypeError("Promise.all must be called with an array"), label);
}
return new Enumerator(this, entries, true /* abort on reject */, label).promise;
}
/**
`RSVP.Promise.race` returns a new promise which is settled in the same way as the
first passed promise to settle.
Example:
```javascript
let promise1 = new RSVP.Promise(function(resolve, reject){
setTimeout(function(){
resolve('promise 1');
}, 200);
});
let promise2 = new RSVP.Promise(function(resolve, reject){
setTimeout(function(){
resolve('promise 2');
}, 100);
});
RSVP.Promise.race([promise1, promise2]).then(function(result){
// result === 'promise 2' because it was resolved before promise1
// was resolved.
});
```
`RSVP.Promise.race` is deterministic in that only the state of the first
settled promise matters. For example, even if other promises given to the
`promises` array argument are resolved, but the first settled promise has
become rejected before the other promises became fulfilled, the returned
promise will become rejected:
```javascript
let promise1 = new RSVP.Promise(function(resolve, reject){
setTimeout(function(){
resolve('promise 1');
}, 200);
});
let promise2 = new RSVP.Promise(function(resolve, reject){
setTimeout(function(){
reject(new Error('promise 2'));
}, 100);
});
RSVP.Promise.race([promise1, promise2]).then(function(result){
// Code here never runs
}, function(reason){
// reason.message === 'promise 2' because promise 2 became rejected before
// promise 1 became fulfilled
});
```
An example real-world use case is implementing timeouts:
```javascript
RSVP.Promise.race([ajax('foo.json'), timeout(5000)])
```
@method race
@static
@param {Array} entries array of promises to observe
@param {String} label optional string for describing the promise returned.
Useful for tooling.
@return {Promise} a promise which settles in the same way as the first passed
promise to settle.
*/
function race(entries, label) {
/*jshint validthis:true */
var Constructor = this;
var promise = new Constructor(noop, label);
if (!isArray(entries)) {
reject(promise, new TypeError('Promise.race must be called with an array'));
return promise;
}
for (var i = 0; promise._state === PENDING && i < entries.length; i++) {
subscribe(Constructor.resolve(entries[i]), undefined, function (value) {
return resolve(promise, value);
}, function (reason) {
return reject(promise, reason);
});
}
return promise;
}
/**
`RSVP.Promise.reject` returns a promise rejected with the passed `reason`.
It is shorthand for the following:
```javascript
let promise = new RSVP.Promise(function(resolve, reject){
reject(new Error('WHOOPS'));
});
promise.then(function(value){
// Code here doesn't run because the promise is rejected!
}, function(reason){
// reason.message === 'WHOOPS'
});
```
Instead of writing the above, your code now simply becomes the following:
```javascript
let promise = RSVP.Promise.reject(new Error('WHOOPS'));
promise.then(function(value){
// Code here doesn't run because the promise is rejected!
}, function(reason){
// reason.message === 'WHOOPS'
});
```
@method reject
@static
@param {*} reason value that the returned promise will be rejected with.
@param {String} label optional string for identifying the returned promise.
Useful for tooling.
@return {Promise} a promise rejected with the given `reason`.
*/
function reject$1(reason, label) {
/*jshint validthis:true */
var Constructor = this;
var promise = new Constructor(noop, label);
reject(promise, reason);
return promise;
}
var guidKey = 'rsvp_' + now() + '-';
var counter = 0;
function needsResolver() {
throw new TypeError('You must pass a resolver function as the first argument to the promise constructor');
}
function needsNew() {
throw new TypeError("Failed to construct 'Promise': Please use the 'new' operator, this object constructor cannot be called as a function.");
}
/**
Promise objects represent the eventual result of an asynchronous operation. The
primary way of interacting with a promise is through its `then` method, which
registers callbacks to receive either a promise’s eventual value or the reason
why the promise cannot be fulfilled.
Terminology
-----------
- `promise` is an object or function with a `then` method whose behavior conforms to this specification.
- `thenable` is an object or function that defines a `then` method.
- `value` is any legal JavaScript value (including undefined, a thenable, or a promise).
- `exception` is a value that is thrown using the throw statement.
- `reason` is a value that indicates why a promise was rejected.
- `settled` the final resting state of a promise, fulfilled or rejected.
A promise can be in one of three states: pending, fulfilled, or rejected.
Promises that are fulfilled have a fulfillment value and are in the fulfilled
state. Promises that are rejected have a rejection reason and are in the
rejected state. A fulfillment value is never a thenable.
Promises can also be said to *resolve* a value. If this value is also a
promise, then the original promise's settled state will match the value's
settled state. So a promise that *resolves* a promise that rejects will
itself reject, and a promise that *resolves* a promise that fulfills will
itself fulfill.
Basic Usage:
------------
```js
let promise = new Promise(function(resolve, reject) {
// on success
resolve(value);
// on failure
reject(reason);
});
promise.then(function(value) {
// on fulfillment
}, function(reason) {
// on rejection
});
```
Advanced Usage:
---------------
Promises shine when abstracting away asynchronous interactions such as
`XMLHttpRequest`s.
```js
function getJSON(url) {
return new Promise(function(resolve, reject){
let xhr = new XMLHttpRequest();
xhr.open('GET', url);
xhr.onreadystatechange = handler;
xhr.responseType = 'json';
xhr.setRequestHeader('Accept', 'application/json');
xhr.send();
function handler() {
if (this.readyState === this.DONE) {
if (this.status === 200) {
resolve(this.response);
} else {
reject(new Error('getJSON: `' + url + '` failed with status: [' + this.status + ']'));
}
}
};
});
}
getJSON('/posts.json').then(function(json) {
// on fulfillment
}, function(reason) {
// on rejection
});
```
Unlike callbacks, promises are great composable primitives.
```js
Promise.all([
getJSON('/posts'),
getJSON('/comments')
]).then(function(values){
values[0] // => postsJSON
values[1] // => commentsJSON
return values;
});
```
@class RSVP.Promise
@param {function} resolver
@param {String} label optional string for labeling the promise.
Useful for tooling.
@constructor
*/
var Promise = function () {
function Promise(resolver, label) {
this._id = counter++;
this._label = label;
this._state = undefined;
this._result = undefined;
this._subscribers = [];
config.instrument && instrument('created', this);
if (noop !== resolver) {
typeof resolver !== 'function' && needsResolver();
this instanceof Promise ? initializePromise(this, resolver) : needsNew();
}
}
Promise.prototype._onError = function _onError(reason) {
var _this = this;
config.after(function () {
if (_this._onError) {
config.trigger('error', reason, _this._label);
}
});
};
/**
`catch` is simply sugar for `then(undefined, onRejection)` which makes it the same
as the catch block of a try/catch statement.
```js
function findAuthor(){
throw new Error('couldn\'t find that author');
}
// synchronous
try {
findAuthor();
} catch(reason) {
// something went wrong
}
// async with promises
findAuthor().catch(function(reason){
// something went wrong
});
```
@method catch
@param {Function} onRejection
@param {String} label optional string for labeling the promise.
Useful for tooling.
@return {Promise}
*/
Promise.prototype.catch = function _catch(onRejection, label) {
return this.then(undefined, onRejection, label);
};
/**
`finally` will be invoked regardless of the promise's fate just as native
try/catch/finally behaves
Synchronous example:
```js
findAuthor() {
if (Math.random() > 0.5) {
throw new Error();
}
return new Author();
}
try {
return findAuthor(); // succeed or fail
} catch(error) {
return findOtherAuthor();
} finally {
// always runs
// doesn't affect the return value
}
```
Asynchronous example:
```js
findAuthor().catch(function(reason){
return findOtherAuthor();
}).finally(function(){
// author was either found, or not
});
```
@method finally
@param {Function} callback
@param {String} label optional string for labeling the promise.
Useful for tooling.
@return {Promise}
*/
Promise.prototype.finally = function _finally(callback, label) {
var promise = this;
var constructor = promise.constructor;
return promise.then(function (value) {
return constructor.resolve(callback()).then(function () {
return value;
});
}, function (reason) {
return constructor.resolve(callback()).then(function () {
throw reason;
});
}, label);
};
return Promise;
}();
Promise.cast = resolve$1; // deprecated
Promise.all = all;
Promise.race = race;
Promise.resolve = resolve$1;
Promise.reject = reject$1;
Promise.prototype._guidKey = guidKey;
/**
The primary way of interacting with a promise is through its `then` method,
which registers callbacks to receive either a promise's eventual value or the
reason why the promise cannot be fulfilled.
```js
findUser().then(function(user){
// user is available
}, function(reason){
// user is unavailable, and you are given the reason why
});
```
Chaining
--------
The return value of `then` is itself a promise. This second, 'downstream'
promise is resolved with the return value of the first promise's fulfillment
or rejection handler, or rejected if the handler throws an exception.
```js
findUser().then(function (user) {
return user.name;
}, function (reason) {
return 'default name';
}).then(function (userName) {
// If `findUser` fulfilled, `userName` will be the user's name, otherwise it
// will be `'default name'`
});
findUser().then(function (user) {
throw new Error('Found user, but still unhappy');
}, function (reason) {
throw new Error('`findUser` rejected and we\'re unhappy');
}).then(function (value) {
// never reached
}, function (reason) {
// if `findUser` fulfilled, `reason` will be 'Found user, but still unhappy'.
// If `findUser` rejected, `reason` will be '`findUser` rejected and we\'re unhappy'.
});
```
If the downstream promise does not specify a rejection handler, rejection reasons will be propagated further downstream.
```js
findUser().then(function (user) {
throw new PedagogicalException('Upstream error');
}).then(function (value) {
// never reached
}).then(function (value) {
// never reached
}, function (reason) {
// The `PedgagocialException` is propagated all the way down to here
});
```
Assimilation
------------
Sometimes the value you want to propagate to a downstream promise can only be
retrieved asynchronously. This can be achieved by returning a promise in the
fulfillment or rejection handler. The downstream promise will then be pending
until the returned promise is settled. This is called *assimilation*.
```js
findUser().then(function (user) {
return findCommentsByAuthor(user);
}).then(function (comments) {
// The user's comments are now available
});
```
If the assimliated promise rejects, then the downstream promise will also reject.
```js
findUser().then(function (user) {
return findCommentsByAuthor(user);
}).then(function (comments) {
// If `findCommentsByAuthor` fulfills, we'll have the value here
}, function (reason) {
// If `findCommentsByAuthor` rejects, we'll have the reason here
});
```
Simple Example
--------------
Synchronous Example
```javascript
let result;
try {
result = findResult();
// success
} catch(reason) {
// failure
}
```
Errback Example
```js
findResult(function(result, err){
if (err) {
// failure
} else {
// success
}
});
```
Promise Example;
```javascript
findResult().then(function(result){
// success
}, function(reason){
// failure
});
```
Advanced Example
--------------
Synchronous Example
```javascript
let author, books;
try {
author = findAuthor();
books = findBooksByAuthor(author);
// success
} catch(reason) {
// failure
}
```
Errback Example
```js
function foundBooks(books) {
}
function failure(reason) {
}
findAuthor(function(author, err){
if (err) {
failure(err);
// failure
} else {
try {
findBoooksByAuthor(author, function(books, err) {
if (err) {
failure(err);
} else {
try {
foundBooks(books);
} catch(reason) {
failure(reason);
}
}
});
} catch(error) {
failure(err);
}
// success
}
});
```
Promise Example;
```javascript
findAuthor().
then(findBooksByAuthor).
then(function(books){
// found books
}).catch(function(reason){
// something went wrong
});
```
@method then
@param {Function} onFulfillment
@param {Function} onRejection
@param {String} label optional string for labeling the promise.
Useful for tooling.
@return {Promise}
*/
Promise.prototype.then = then;
function Result() {
this.value = undefined;
}
var ERROR = new Result();
var GET_THEN_ERROR$1 = new Result();
function getThen$1(obj) {
try {
return obj.then;
} catch (error) {
ERROR.value = error;
return ERROR;
}
}
function tryApply(f, s, a) {
try {
f.apply(s, a);
} catch (error) {
ERROR.value = error;
return ERROR;
}
}
function makeObject(_, argumentNames) {
var obj = {};
var length = _.length;
var args = new Array(length);
for (var x = 0; x < length; x++) {
args[x] = _[x];
}
for (var i = 0; i < argumentNames.length; i++) {
var name = argumentNames[i];
obj[name] = args[i + 1];
}
return obj;
}
function arrayResult(_) {
var length = _.length;
var args = new Array(length - 1);
for (var i = 1; i < length; i++) {
args[i - 1] = _[i];
}
return args;
}
function wrapThenable(then, promise) {
return {
then: function (onFulFillment, onRejection) {
return then.call(promise, onFulFillment, onRejection);
}
};
}
/**
`RSVP.denodeify` takes a 'node-style' function and returns a function that
will return an `RSVP.Promise`. You can use `denodeify` in Node.js or the
browser when you'd prefer to use promises over using callbacks. For example,
`denodeify` transforms the following:
```javascript
let fs = require('fs');
fs.readFile('myfile.txt', function(err, data){
if (err) return handleError(err);
handleData(data);
});
```
into:
```javascript
let fs = require('fs');
let readFile = RSVP.denodeify(fs.readFile);
readFile('myfile.txt').then(handleData, handleError);
```
If the node function has multiple success parameters, then `denodeify`
just returns the first one:
```javascript
let request = RSVP.denodeify(require('request'));
request('http://example.com').then(function(res) {
// ...
});
```
However, if you need all success parameters, setting `denodeify`'s
second parameter to `true` causes it to return all success parameters
as an array:
```javascript
let request = RSVP.denodeify(require('request'), true);
request('http://example.com').then(function(result) {
// result[0] -> res
// result[1] -> body
});
```
Or if you pass it an array with names it returns the parameters as a hash:
```javascript
let request = RSVP.denodeify(require('request'), ['res', 'body']);
request('http://example.com').then(function(result) {
// result.res
// result.body
});
```
Sometimes you need to retain the `this`:
```javascript
let app = require('express')();
let render = RSVP.denodeify(app.render.bind(app));
```
The denodified function inherits from the original function. It works in all
environments, except IE 10 and below. Consequently all properties of the original
function are available to you. However, any properties you change on the
denodeified function won't be changed on the original function. Example:
```javascript
let request = RSVP.denodeify(require('request')),
cookieJar = request.jar(); // <- Inheritance is used here
request('http://example.com', {jar: cookieJar}).then(function(res) {
// cookieJar.cookies holds now the cookies returned by example.com
});
```
Using `denodeify` makes it easier to compose asynchronous operations instead
of using callbacks. For example, instead of:
```javascript
let fs = require('fs');
fs.readFile('myfile.txt', function(err, data){
if (err) { ... } // Handle error
fs.writeFile('myfile2.txt', data, function(err){
if (err) { ... } // Handle error
console.log('done')
});
});
```
you can chain the operations together using `then` from the returned promise:
```javascript
let fs = require('fs');
let readFile = RSVP.denodeify(fs.readFile);
let writeFile = RSVP.denodeify(fs.writeFile);
readFile('myfile.txt').then(function(data){
return writeFile('myfile2.txt', data);
}).then(function(){
console.log('done')
}).catch(function(error){
// Handle error
});
```
@method denodeify
@static
@for RSVP
@param {Function} nodeFunc a 'node-style' function that takes a callback as
its last argument. The callback expects an error to be passed as its first
argument (if an error occurred, otherwise null), and the value from the
operation as its second argument ('function(err, value){ }').
@param {Boolean|Array} [options] An optional paramter that if set
to `true` causes the promise to fulfill with the callback's success arguments
as an array. This is useful if the node function has multiple success
paramters. If you set this paramter to an array with names, the promise will
fulfill with a hash with these names as keys and the success parameters as
values.
@return {Function} a function that wraps `nodeFunc` to return an
`RSVP.Promise`
@static
*/
function denodeify(nodeFunc, options) {
var fn = function () {
var self = this;
var l = arguments.length;
var args = new Array(l + 1);
var promiseInput = false;
for (var i = 0; i < l; ++i) {
var arg = arguments[i];
if (!promiseInput) {
// TODO: clean this up
promiseInput = needsPromiseInput(arg);
if (promiseInput === GET_THEN_ERROR$1) {
var p = new Promise(noop);
reject(p, GET_THEN_ERROR$1.value);
return p;
} else if (promiseInput && promiseInput !== true) {
arg = wrapThenable(promiseInput, arg);
}
}
args[i] = arg;
}
var promise = new Promise(noop);
args[l] = function (err, val) {
if (err) reject(promise, err);else if (options === undefined) resolve(promise, val);else if (options === true) resolve(promise, arrayResult(arguments));else if (isArray(options)) resolve(promise, makeObject(arguments, options));else resolve(promise, val);
};
if (promiseInput) {
return handlePromiseInput(promise, args, nodeFunc, self);
} else {
return handleValueInput(promise, args, nodeFunc, self);
}
};
fn.__proto__ = nodeFunc;
return fn;
}
function handleValueInput(promise, args, nodeFunc, self) {
var result = tryApply(nodeFunc, self, args);
if (result === ERROR) {
reject(promise, result.value);
}
return promise;
}
function handlePromiseInput(promise, args, nodeFunc, self) {
return Promise.all(args).then(function (args) {
var result = tryApply(nodeFunc, self, args);
if (result === ERROR) {
reject(promise, result.value);
}
return promise;
});
}
function needsPromiseInput(arg) {
if (arg && typeof arg === 'object') {
if (arg.constructor === Promise) {
return true;
} else {
return getThen$1(arg);
}
} else {
return false;
}
}
/**
This is a convenient alias for `RSVP.Promise.all`.
@method all
@static
@for RSVP
@param {Array} array Array of promises.
@param {String} label An optional label. This is useful
for tooling.
*/
function all$1(array, label) {
return Promise.all(array, label);
}
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var AllSettled = function (_Enumerator) {
_inherits(AllSettled, _Enumerator);
function AllSettled(Constructor, entries, label) {
return _possibleConstructorReturn(this, _Enumerator.call(this, Constructor, entries, false /* don't abort on reject */, label));
}
return AllSettled;
}(Enumerator);
AllSettled.prototype._makeResult = makeSettledResult;
/**
`RSVP.allSettled` is similar to `RSVP.all`, but instead of implementing
a fail-fast method, it waits until all the promises have returned and
shows you all the results. This is useful if you want to handle multiple
promises' failure states together as a set.
Returns a promise that is fulfilled when all the given promises have been
settled. The return promise is fulfilled with an array of the states of
the promises passed into the `promises` array argument.
Each state object will either indicate fulfillment or rejection, and
provide the corresponding value or reason. The states will take one of
the following formats:
```javascript
{ state: 'fulfilled', value: value }
or
{ state: 'rejected', reason: reason }
```
Example:
```javascript
let promise1 = RSVP.Promise.resolve(1);
let promise2 = RSVP.Promise.reject(new Error('2'));
let promise3 = RSVP.Promise.reject(new Error('3'));
let promises = [ promise1, promise2, promise3 ];
RSVP.allSettled(promises).then(function(array){
// array == [
// { state: 'fulfilled', value: 1 },
// { state: 'rejected', reason: Error },
// { state: 'rejected', reason: Error }
// ]
// Note that for the second item, reason.message will be '2', and for the
// third item, reason.message will be '3'.
}, function(error) {
// Not run. (This block would only be called if allSettled had failed,
// for instance if passed an incorrect argument type.)
});
```
@method allSettled
@static
@for RSVP
@param {Array} entries
@param {String} label - optional string that describes the promise.
Useful for tooling.
@return {Promise} promise that is fulfilled with an array of the settled
states of the constituent promises.
*/
function allSettled(entries, label) {
if (!isArray(entries)) {
return Promise.reject(new TypeError("Promise.allSettled must be called with an array"), label);
}
return new AllSettled(Promise, entries, label).promise;
}
/**
This is a convenient alias for `RSVP.Promise.race`.
@method race
@static
@for RSVP
@param {Array} array Array of promises.
@param {String} label An optional label. This is useful
for tooling.
*/
function race$1(array, label) {
return Promise.race(array, label);
}
function _possibleConstructorReturn$1(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits$1(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var hasOwnProperty = Object.prototype.hasOwnProperty;
var PromiseHash = function (_Enumerator) {
_inherits$1(PromiseHash, _Enumerator);
function PromiseHash(Constructor, object) {
var abortOnReject = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : true;
var label = arguments[3];
return _possibleConstructorReturn$1(this, _Enumerator.call(this, Constructor, object, abortOnReject, label));
}
PromiseHash.prototype._init = function _init(Constructor, object) {
this._result = {};
this._enumerate(object);
if (this._remaining === 0) {
fulfill(this.promise, this._result);
}
};
PromiseHash.prototype._enumerate = function _enumerate(input) {
var promise = this.promise;
var results = [];
for (var key in input) {
if (hasOwnProperty.call(input, key)) {
results.push({
position: key,
entry: input[key]
});
}
}
var length = results.length;
this._remaining = length;
var result = void 0;
for (var i = 0; promise._state === PENDING && i < length; i++) {
result = results[i];
this._eachEntry(result.entry, result.position);
}
};
return PromiseHash;
}(Enumerator);
/**
`RSVP.hash` is similar to `RSVP.all`, but takes an object instead of an array
for its `promises` argument.
Returns a promise that is fulfilled when all the given promises have been
fulfilled, or rejected if any of them become rejected. The returned promise
is fulfilled with a hash that has the same key names as the `promises` object
argument. If any of the values in the object are not promises, they will
simply be copied over to the fulfilled object.
Example:
```javascript
let promises = {
myPromise: RSVP.resolve(1),
yourPromise: RSVP.resolve(2),
theirPromise: RSVP.resolve(3),
notAPromise: 4
};
RSVP.hash(promises).then(function(hash){
// hash here is an object that looks like:
// {
// myPromise: 1,
// yourPromise: 2,
// theirPromise: 3,
// notAPromise: 4
// }
});
````
If any of the `promises` given to `RSVP.hash` are rejected, the first promise
that is rejected will be given as the reason to the rejection handler.
Example:
```javascript
let promises = {
myPromise: RSVP.resolve(1),
rejectedPromise: RSVP.reject(new Error('rejectedPromise')),
anotherRejectedPromise: RSVP.reject(new Error('anotherRejectedPromise')),
};
RSVP.hash(promises).then(function(hash){
// Code here never runs because there are rejected promises!
}, function(reason) {
// reason.message === 'rejectedPromise'
});
```
An important note: `RSVP.hash` is intended for plain JavaScript objects that
are just a set of keys and values. `RSVP.hash` will NOT preserve prototype
chains.
Example:
```javascript
function MyConstructor(){
this.example = RSVP.resolve('Example');
}
MyConstructor.prototype = {
protoProperty: RSVP.resolve('Proto Property')
};
let myObject = new MyConstructor();
RSVP.hash(myObject).then(function(hash){
// protoProperty will not be present, instead you will just have an
// object that looks like:
// {
// example: 'Example'
// }
//
// hash.hasOwnProperty('protoProperty'); // false
// 'undefined' === typeof hash.protoProperty
});
```
@method hash
@static
@for RSVP
@param {Object} object
@param {String} label optional string that describes the promise.
Useful for tooling.
@return {Promise} promise that is fulfilled when all properties of `promises`
have been fulfilled, or rejected if any of them become rejected.
*/
function hash(object, label) {
if (!isObject(object)) {
return Promise.reject(new TypeError("Promise.hash must be called with an object"), label);
}
return new PromiseHash(Promise, object, label).promise;
}
function _possibleConstructorReturn$2(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits$2(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var HashSettled = function (_PromiseHash) {
_inherits$2(HashSettled, _PromiseHash);
function HashSettled(Constructor, object, label) {
return _possibleConstructorReturn$2(this, _PromiseHash.call(this, Constructor, object, false, label));
}
return HashSettled;
}(PromiseHash);
HashSettled.prototype._makeResult = makeSettledResult;
/**
`RSVP.hashSettled` is similar to `RSVP.allSettled`, but takes an object
instead of an array for its `promises` argument.
Unlike `RSVP.all` or `RSVP.hash`, which implement a fail-fast method,
but like `RSVP.allSettled`, `hashSettled` waits until all the
constituent promises have returned and then shows you all the results
with their states and values/reasons. This is useful if you want to
handle multiple promises' failure states together as a set.
Returns a promise that is fulfilled when all the given promises have been
settled, or rejected if the passed parameters are invalid.
The returned promise is fulfilled with a hash that has the same key names as
the `promises` object argument. If any of the values in the object are not
promises, they will be copied over to the fulfilled object and marked with state
'fulfilled'.
Example:
```javascript
let promises = {
myPromise: RSVP.Promise.resolve(1),
yourPromise: RSVP.Promise.resolve(2),
theirPromise: RSVP.Promise.resolve(3),
notAPromise: 4
};
RSVP.hashSettled(promises).then(function(hash){
// hash here is an object that looks like:
// {
// myPromise: { state: 'fulfilled', value: 1 },
// yourPromise: { state: 'fulfilled', value: 2 },
// theirPromise: { state: 'fulfilled', value: 3 },
// notAPromise: { state: 'fulfilled', value: 4 }
// }
});
```
If any of the `promises` given to `RSVP.hash` are rejected, the state will
be set to 'rejected' and the reason for rejection provided.
Example:
```javascript
let promises = {
myPromise: RSVP.Promise.resolve(1),
rejectedPromise: RSVP.Promise.reject(new Error('rejection')),
anotherRejectedPromise: RSVP.Promise.reject(new Error('more rejection')),
};
RSVP.hashSettled(promises).then(function(hash){
// hash here is an object that looks like:
// {
// myPromise: { state: 'fulfilled', value: 1 },
// rejectedPromise: { state: 'rejected', reason: Error },
// anotherRejectedPromise: { state: 'rejected', reason: Error },
// }
// Note that for rejectedPromise, reason.message == 'rejection',
// and for anotherRejectedPromise, reason.message == 'more rejection'.
});
```
An important note: `RSVP.hashSettled` is intended for plain JavaScript objects that
are just a set of keys and values. `RSVP.hashSettled` will NOT preserve prototype
chains.
Example:
```javascript
function MyConstructor(){
this.example = RSVP.Promise.resolve('Example');
}
MyConstructor.prototype = {
protoProperty: RSVP.Promise.resolve('Proto Property')
};
let myObject = new MyConstructor();
RSVP.hashSettled(myObject).then(function(hash){
// protoProperty will not be present, instead you will just have an
// object that looks like:
// {
// example: { state: 'fulfilled', value: 'Example' }
// }
//
// hash.hasOwnProperty('protoProperty'); // false
// 'undefined' === typeof hash.protoProperty
});
```
@method hashSettled
@for RSVP
@param {Object} object
@param {String} label optional string that describes the promise.
Useful for tooling.
@return {Promise} promise that is fulfilled when when all properties of `promises`
have been settled.
@static
*/
function hashSettled(object, label) {
if (!isObject(object)) {
return Promise.reject(new TypeError("RSVP.hashSettled must be called with an object"), label);
}
return new HashSettled(Promise, object, false, label).promise;
}
/**
`RSVP.rethrow` will rethrow an error on the next turn of the JavaScript event
loop in order to aid debugging.
Promises A+ specifies that any exceptions that occur with a promise must be
caught by the promises implementation and bubbled to the last handler. For
this reason, it is recommended that you always specify a second rejection
handler function to `then`. However, `RSVP.rethrow` will throw the exception
outside of the promise, so it bubbles up to your console if in the browser,
or domain/cause uncaught exception in Node. `rethrow` will also throw the
error again so the error can be handled by the promise per the spec.
```javascript
function throws(){
throw new Error('Whoops!');
}
let promise = new RSVP.Promise(function(resolve, reject){
throws();
});
promise.catch(RSVP.rethrow).then(function(){
// Code here doesn't run because the promise became rejected due to an
// error!
}, function (err){
// handle the error here
});
```
The 'Whoops' error will be thrown on the next turn of the event loop
and you can watch for it in your console. You can also handle it using a
rejection handler given to `.then` or `.catch` on the returned promise.
@method rethrow
@static
@for RSVP
@param {Error} reason reason the promise became rejected.
@throws Error
@static
*/
function rethrow(reason) {
setTimeout(function () {
throw reason;
});
throw reason;
}
/**
`RSVP.defer` returns an object similar to jQuery's `$.Deferred`.
`RSVP.defer` should be used when porting over code reliant on `$.Deferred`'s
interface. New code should use the `RSVP.Promise` constructor instead.
The object returned from `RSVP.defer` is a plain object with three properties:
* promise - an `RSVP.Promise`.
* reject - a function that causes the `promise` property on this object to
become rejected
* resolve - a function that causes the `promise` property on this object to
become fulfilled.
Example:
```javascript
let deferred = RSVP.defer();
deferred.resolve("Success!");
deferred.promise.then(function(value){
// value here is "Success!"
});
```
@method defer
@static
@for RSVP
@param {String} label optional string for labeling the promise.
Useful for tooling.
@return {Object}
*/
function defer(label) {
var deferred = { resolve: undefined, reject: undefined };
deferred.promise = new Promise(function (resolve, reject) {
deferred.resolve = resolve;
deferred.reject = reject;
}, label);
return deferred;
}
/**
`RSVP.map` is similar to JavaScript's native `map` method, except that it
waits for all promises to become fulfilled before running the `mapFn` on
each item in given to `promises`. `RSVP.map` returns a promise that will
become fulfilled with the result of running `mapFn` on the values the promises
become fulfilled with.
For example:
```javascript
let promise1 = RSVP.resolve(1);
let promise2 = RSVP.resolve(2);
let promise3 = RSVP.resolve(3);
let promises = [ promise1, promise2, promise3 ];
let mapFn = function(item){
return item + 1;
};
RSVP.map(promises, mapFn).then(function(result){
// result is [ 2, 3, 4 ]
});
```
If any of the `promises` given to `RSVP.map` are rejected, the first promise
that is rejected will be given as an argument to the returned promise's
rejection handler. For example:
```javascript
let promise1 = RSVP.resolve(1);
let promise2 = RSVP.reject(new Error('2'));
let promise3 = RSVP.reject(new Error('3'));
let promises = [ promise1, promise2, promise3 ];
let mapFn = function(item){
return item + 1;
};
RSVP.map(promises, mapFn).then(function(array){
// Code here never runs because there are rejected promises!
}, function(reason) {
// reason.message === '2'
});
```
`RSVP.map` will also wait if a promise is returned from `mapFn`. For example,
say you want to get all comments from a set of blog posts, but you need
the blog posts first because they contain a url to those comments.
```javscript
let mapFn = function(blogPost){
// getComments does some ajax and returns an RSVP.Promise that is fulfilled
// with some comments data
return getComments(blogPost.comments_url);
};
// getBlogPosts does some ajax and returns an RSVP.Promise that is fulfilled
// with some blog post data
RSVP.map(getBlogPosts(), mapFn).then(function(comments){
// comments is the result of asking the server for the comments
// of all blog posts returned from getBlogPosts()
});
```
@method map
@static
@for RSVP
@param {Array} promises
@param {Function} mapFn function to be called on each fulfilled promise.
@param {String} label optional string for labeling the promise.
Useful for tooling.
@return {Promise} promise that is fulfilled with the result of calling
`mapFn` on each fulfilled promise or value when they become fulfilled.
The promise will be rejected if any of the given `promises` become rejected.
@static
*/
function map(promises, mapFn, label) {
if (!isArray(promises)) {
return Promise.reject(new TypeError("RSVP.map must be called with an array"), label);
}
if (!isFunction(mapFn)) {
return Promise.reject(new TypeError("RSVP.map expects a function as a second argument"), label);
}
return Promise.all(promises, label).then(function (values) {
var length = values.length;
var results = new Array(length);
for (var i = 0; i < length; i++) {
results[i] = mapFn(values[i]);
}
return Promise.all(results, label);
});
}
/**
This is a convenient alias for `RSVP.Promise.resolve`.
@method resolve
@static
@for RSVP
@param {*} value value that the returned promise will be resolved with
@param {String} label optional string for identifying the returned promise.
Useful for tooling.
@return {Promise} a promise that will become fulfilled with the given
`value`
*/
function resolve$2(value, label) {
return Promise.resolve(value, label);
}
/**
This is a convenient alias for `RSVP.Promise.reject`.
@method reject
@static
@for RSVP
@param {*} reason value that the returned promise will be rejected with.
@param {String} label optional string for identifying the returned promise.
Useful for tooling.
@return {Promise} a promise rejected with the given `reason`.
*/
function reject$2(reason, label) {
return Promise.reject(reason, label);
}
/**
`RSVP.filter` is similar to JavaScript's native `filter` method, except that it
waits for all promises to become fulfilled before running the `filterFn` on
each item in given to `promises`. `RSVP.filter` returns a promise that will
become fulfilled with the result of running `filterFn` on the values the
promises become fulfilled with.
For example:
```javascript
let promise1 = RSVP.resolve(1);
let promise2 = RSVP.resolve(2);
let promise3 = RSVP.resolve(3);
let promises = [promise1, promise2, promise3];
let filterFn = function(item){
return item > 1;
};
RSVP.filter(promises, filterFn).then(function(result){
// result is [ 2, 3 ]
});
```
If any of the `promises` given to `RSVP.filter` are rejected, the first promise
that is rejected will be given as an argument to the returned promise's
rejection handler. For example:
```javascript
let promise1 = RSVP.resolve(1);
let promise2 = RSVP.reject(new Error('2'));
let promise3 = RSVP.reject(new Error('3'));
let promises = [ promise1, promise2, promise3 ];
let filterFn = function(item){
return item > 1;
};
RSVP.filter(promises, filterFn).then(function(array){
// Code here never runs because there are rejected promises!
}, function(reason) {
// reason.message === '2'
});
```
`RSVP.filter` will also wait for any promises returned from `filterFn`.
For instance, you may want to fetch a list of users then return a subset
of those users based on some asynchronous operation:
```javascript
let alice = { name: 'alice' };
let bob = { name: 'bob' };
let users = [ alice, bob ];
let promises = users.map(function(user){
return RSVP.resolve(user);
});
let filterFn = function(user){
// Here, Alice has permissions to create a blog post, but Bob does not.
return getPrivilegesForUser(user).then(function(privs){
return privs.can_create_blog_post === true;
});
};
RSVP.filter(promises, filterFn).then(function(users){
// true, because the server told us only Alice can create a blog post.
users.length === 1;
// false, because Alice is the only user present in `users`
users[0] === bob;
});
```
@method filter
@static
@for RSVP
@param {Array} promises
@param {Function} filterFn - function to be called on each resolved value to
filter the final results.
@param {String} label optional string describing the promise. Useful for
tooling.
@return {Promise}
*/
function resolveAll(promises, label) {
return Promise.all(promises, label);
}
function resolveSingle(promise, label) {
return Promise.resolve(promise, label).then(function (promises) {
return resolveAll(promises, label);
});
}
function filter(promises, filterFn, label) {
if (!isArray(promises) && !(isObject(promises) && promises.then !== undefined)) {
return Promise.reject(new TypeError("RSVP.filter must be called with an array or promise"), label);
}
if (!isFunction(filterFn)) {
return Promise.reject(new TypeError("RSVP.filter expects function as a second argument"), label);
}
var promise = isArray(promises) ? resolveAll(promises, label) : resolveSingle(promises, label);
return promise.then(function (values) {
var length = values.length;
var filtered = new Array(length);
for (var i = 0; i < length; i++) {
filtered[i] = filterFn(values[i]);
}
return resolveAll(filtered, label).then(function (filtered) {
var results = new Array(length);
var newLength = 0;
for (var _i = 0; _i < length; _i++) {
if (filtered[_i]) {
results[newLength] = values[_i];
newLength++;
}
}
results.length = newLength;
return results;
});
});
}
var len = 0;
var vertxNext = void 0;
function asap(callback, arg) {
queue$1[len] = callback;
queue$1[len + 1] = arg;
len += 2;
if (len === 2) {
// If len is 1, that means that we need to schedule an async flush.
// If additional callbacks are queued before the queue is flushed, they
// will be processed by this flush that we are scheduling.
scheduleFlush$1();
}
}
var browserWindow = typeof window !== 'undefined' ? window : undefined;
var browserGlobal = browserWindow || {};
var BrowserMutationObserver = browserGlobal.MutationObserver || browserGlobal.WebKitMutationObserver;
var isNode = typeof self === 'undefined' && typeof process !== 'undefined' && {}.toString.call(process) === '[object process]';
// test for web worker but not in IE10
var isWorker = typeof Uint8ClampedArray !== 'undefined' && typeof importScripts !== 'undefined' && typeof MessageChannel !== 'undefined';
// node
function useNextTick() {
var nextTick = process.nextTick;
// node version 0.10.x displays a deprecation warning when nextTick is used recursively
// setImmediate should be used instead instead
var version = process.versions.node.match(/^(?:(\d+)\.)?(?:(\d+)\.)?(\*|\d+)$/);
if (Array.isArray(version) && version[1] === '0' && version[2] === '10') {
nextTick = setImmediate;
}
return function () {
return nextTick(flush);
};
}
// vertx
function useVertxTimer() {
if (typeof vertxNext !== 'undefined') {
return function () {
vertxNext(flush);
};
}
return useSetTimeout();
}
function useMutationObserver() {
var iterations = 0;
var observer = new BrowserMutationObserver(flush);
var node = document.createTextNode('');
observer.observe(node, { characterData: true });
return function () {
return node.data = iterations = ++iterations % 2;
};
}
// web worker
function useMessageChannel() {
var channel = new MessageChannel();
channel.port1.onmessage = flush;
return function () {
return channel.port2.postMessage(0);
};
}
function useSetTimeout() {
return function () {
return setTimeout(flush, 1);
};
}
var queue$1 = new Array(1000);
function flush() {
for (var i = 0; i < len; i += 2) {
var callback = queue$1[i];
var arg = queue$1[i + 1];
callback(arg);
queue$1[i] = undefined;
queue$1[i + 1] = undefined;
}
len = 0;
}
function attemptVertex() {
try {
var r = require;
var vertx = r('vertx');
vertxNext = vertx.runOnLoop || vertx.runOnContext;
return useVertxTimer();
} catch (e) {
return useSetTimeout();
}
}
var scheduleFlush$1 = void 0;
// Decide what async method to use to triggering processing of queued callbacks:
if (isNode) {
scheduleFlush$1 = useNextTick();
} else if (BrowserMutationObserver) {
scheduleFlush$1 = useMutationObserver();
} else if (isWorker) {
scheduleFlush$1 = useMessageChannel();
} else if (browserWindow === undefined && typeof require === 'function') {
scheduleFlush$1 = attemptVertex();
} else {
scheduleFlush$1 = useSetTimeout();
}
var platform = void 0;
/* global self */
if (typeof self === 'object') {
platform = self;
/* global global */
} else if (typeof global === 'object') {
platform = global;
} else {
throw new Error('no global: `self` or `global` found');
}
var _asap$cast$Promise$Ev;
function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }
// defaults
config.async = asap;
config.after = function (cb) {
return setTimeout(cb, 0);
};
var cast = resolve$2;
var async = function (callback, arg) {
return config.async(callback, arg);
};
function on() {
config['on'].apply(config, arguments);
}
function off() {
config['off'].apply(config, arguments);
}
// Set up instrumentation through `window.__PROMISE_INTRUMENTATION__`
if (typeof window !== 'undefined' && typeof window['__PROMISE_INSTRUMENTATION__'] === 'object') {
var callbacks = window['__PROMISE_INSTRUMENTATION__'];
configure('instrument', true);
for (var eventName in callbacks) {
if (callbacks.hasOwnProperty(eventName)) {
on(eventName, callbacks[eventName]);
}
}
}
// the default export here is for backwards compat:
// https://github.com/tildeio/rsvp.js/issues/434
var rsvp = (_asap$cast$Promise$Ev = {
asap: asap,
cast: cast,
Promise: Promise,
EventTarget: EventTarget,
all: all$1,
allSettled: allSettled,
race: race$1,
hash: hash,
hashSettled: hashSettled,
rethrow: rethrow,
defer: defer,
denodeify: denodeify,
configure: configure,
on: on,
off: off,
resolve: resolve$2,
reject: reject$2,
map: map
}, _defineProperty(_asap$cast$Promise$Ev, 'async', async), _defineProperty(_asap$cast$Promise$Ev, 'filter', filter), _asap$cast$Promise$Ev);
exports['default'] = rsvp;
exports.asap = asap;
exports.cast = cast;
exports.Promise = Promise;
exports.EventTarget = EventTarget;
exports.all = all$1;
exports.allSettled = allSettled;
exports.race = race$1;
exports.hash = hash;
exports.hashSettled = hashSettled;
exports.rethrow = rethrow;
exports.defer = defer;
exports.denodeify = denodeify;
exports.configure = configure;
exports.on = on;
exports.off = off;
exports.resolve = resolve$2;
exports.reject = reject$2;
exports.map = map;
exports.async = async;
exports.filter = filter;
Object.defineProperty(exports, '__esModule', { value: true });
})));
//
var EPUBJS = EPUBJS || {};
EPUBJS.core = {};
var ELEMENT_NODE = 1;
var TEXT_NODE = 3;
var COMMENT_NODE = 8;
var DOCUMENT_NODE = 9;
//-- Get a element for an id
EPUBJS.core.getEl = function(elem) {
return document.getElementById(elem);
};
//-- Get all elements for a class
EPUBJS.core.getEls = function(classes) {
return document.getElementsByClassName(classes);
};
EPUBJS.core.request = function(url, type, withCredentials) {
var supportsURL = window.URL;
var BLOB_RESPONSE = supportsURL ? "blob" : "arraybuffer";
var deferred = new RSVP.defer();
var xhr = new XMLHttpRequest();
var uri;
//-- Check from PDF.js:
// https://github.com/mozilla/pdf.js/blob/master/web/compatibility.js
var xhrPrototype = XMLHttpRequest.prototype;
var handler = function() {
var r;
if (this.readyState != this.DONE) return;
if ((this.status === 200 || this.status === 0) && this.response) { // Android & Firefox reporting 0 for local & blob urls
if (type == 'xml'){
// If this.responseXML wasn't set, try to parse using a DOMParser from text
if(!this.responseXML) {
r = new DOMParser().parseFromString(this.response, "application/xml");
} else {
r = this.responseXML;
}
} else if (type == 'xhtml') {
if (!this.responseXML){
r = new DOMParser().parseFromString(this.response, "application/xhtml+xml");
} else {
r = this.responseXML;
}
} else if (type == 'html') {
if (!this.responseXML){
r = new DOMParser().parseFromString(this.response, "text/html");
} else {
r = this.responseXML;
}
} else if (type == 'json') {
r = JSON.parse(this.response);
} else if (type == 'blob') {
if (supportsURL) {
r = this.response;
} else {
//-- Safari doesn't support responseType blob, so create a blob from arraybuffer
r = new Blob([this.response]);
}
} else {
r = this.response;
}
deferred.resolve(r);
} else {
deferred.reject({
message : this.response,
stack : new Error().stack
});
}
};
if (!('overrideMimeType' in xhrPrototype)) {
// IE10 might have response, but not overrideMimeType
Object.defineProperty(xhrPrototype, 'overrideMimeType', {
value: function xmlHttpRequestOverrideMimeType(mimeType) {}
});
}
xhr.onreadystatechange = handler;
xhr.open("GET", url, true);
if(withCredentials) {
xhr.withCredentials = true;
}
// If type isn't set, determine it from the file extension
if(!type) {
uri = EPUBJS.core.uri(url);
type = uri.extension;
type = {
'htm': 'html'
}[type] || type;
}
if(type == 'blob'){
xhr.responseType = BLOB_RESPONSE;
}
if(type == "json") {
xhr.setRequestHeader("Accept", "application/json");
}
if(type == 'xml') {
xhr.responseType = "document";
xhr.overrideMimeType('text/xml'); // for OPF parsing
}
if(type == 'xhtml') {
xhr.responseType = "document";
}
if(type == 'html') {
xhr.responseType = "document";
}
if(type == "binary") {
xhr.responseType = "arraybuffer";
}
xhr.send();
return deferred.promise;
};
EPUBJS.core.toArray = function(obj) {
var arr = [];
for (var member in obj) {
var newitm;
if ( obj.hasOwnProperty(member) ) {
newitm = obj[member];
newitm.ident = member;
arr.push(newitm);
}
}
return arr;
};
//-- Parse the different parts of a url, returning a object
EPUBJS.core.uri = function(url){
var uri = {
protocol : '',
host : '',
path : '',
origin : '',
directory : '',
base : '',
filename : '',
extension : '',
fragment : '',
href : url
},
blob = url.indexOf('blob:'),
doubleSlash = url.indexOf('://'),
search = url.indexOf('?'),
fragment = url.indexOf("#"),
withoutProtocol,
dot,
firstSlash;
if(blob === 0) {
uri.protocol = "blob";
uri.base = url.indexOf(0, fragment);
return uri;
}
if(fragment != -1) {
uri.fragment = url.slice(fragment + 1);
url = url.slice(0, fragment);
}
if(search != -1) {
uri.search = url.slice(search + 1);
url = url.slice(0, search);
href = uri.href;
}
if(doubleSlash != -1) {
uri.protocol = url.slice(0, doubleSlash);
withoutProtocol = url.slice(doubleSlash+3);
firstSlash = withoutProtocol.indexOf('/');
if(firstSlash === -1) {
uri.host = uri.path;
uri.path = "";
} else {
uri.host = withoutProtocol.slice(0, firstSlash);
uri.path = withoutProtocol.slice(firstSlash);
}
uri.origin = uri.protocol + "://" + uri.host;
uri.directory = EPUBJS.core.folder(uri.path);
uri.base = uri.origin + uri.directory;
// return origin;
} else {
uri.path = url;
uri.directory = EPUBJS.core.folder(url);
uri.base = uri.directory;
}
//-- Filename
uri.filename = url.replace(uri.base, '');
dot = uri.filename.lastIndexOf('.');
if(dot != -1) {
uri.extension = uri.filename.slice(dot+1);
}
return uri;
};
//-- Parse out the folder, will return everything before the last slash
EPUBJS.core.folder = function(url){
var lastSlash = url.lastIndexOf('/');
if(lastSlash == -1) var folder = '';
folder = url.slice(0, lastSlash + 1);
return folder;
};
//-- https://github.com/ebidel/filer.js/blob/master/src/filer.js#L128
EPUBJS.core.dataURLToBlob = function(dataURL) {
var BASE64_MARKER = ';base64,',
parts, contentType, raw, rawLength, uInt8Array;
if (dataURL.indexOf(BASE64_MARKER) == -1) {
parts = dataURL.split(',');
contentType = parts[0].split(':')[1];
raw = parts[1];
return new Blob([raw], {type: contentType});
}
parts = dataURL.split(BASE64_MARKER);
contentType = parts[0].split(':')[1];
raw = window.atob(parts[1]);
rawLength = raw.length;
uInt8Array = new Uint8Array(rawLength);
for (var i = 0; i < rawLength; ++i) {
uInt8Array[i] = raw.charCodeAt(i);
}
return new Blob([uInt8Array], {type: contentType});
};
//-- Load scripts async: http://stackoverflow.com/questions/7718935/load-scripts-asynchronously
EPUBJS.core.addScript = function(src, callback, target) {
var s, r;
r = false;
s = document.createElement('script');
s.type = 'text/javascript';
s.async = false;
s.src = src;
s.onload = s.onreadystatechange = function() {
if ( !r && (!this.readyState || this.readyState == 'complete') ) {
r = true;
if(callback) callback();
}
};
target = target || document.body;
target.appendChild(s);
};
EPUBJS.core.addScripts = function(srcArr, callback, target) {
var total = srcArr.length,
curr = 0,
cb = function(){
curr++;
if(total == curr){
if(callback) callback();
}else{
EPUBJS.core.addScript(srcArr[curr], cb, target);
}
};
EPUBJS.core.addScript(srcArr[curr], cb, target);
};
EPUBJS.core.addCss = function(src, callback, target) {
var s, r;
r = false;
s = document.createElement('link');
s.type = 'text/css';
s.rel = "stylesheet";
s.href = src;
s.onload = s.onreadystatechange = function() {
if ( !r && (!this.readyState || this.readyState == 'complete') ) {
r = true;
if(callback) callback();
}
};
target = target || document.body;
target.appendChild(s);
};
EPUBJS.core.prefixed = function(unprefixed) {
var vendors = ["Webkit", "Moz", "O", "ms" ],
prefixes = ['-Webkit-', '-moz-', '-o-', '-ms-'],
upper = unprefixed[0].toUpperCase() + unprefixed.slice(1),
length = vendors.length;
if (typeof(document.documentElement.style[unprefixed]) != 'undefined') {
return unprefixed;
}
for ( var i=0; i < length; i++ ) {
if (typeof(document.documentElement.style[vendors[i] + upper]) != 'undefined') {
return vendors[i] + upper;
}
}
return unprefixed;
};
EPUBJS.core.resolveUrl = function(base, path) {
var url,
segments = [],
uri = EPUBJS.core.uri(path),
folders = base.split("/"),
paths;
if(uri.host) {
return path;
}
folders.pop();
paths = path.split("/");
paths.forEach(function(p){
if(p === ".."){
folders.pop();
}else{
segments.push(p);
}
});
url = folders.concat(segments);
return url.join("/");
};
// http://stackoverflow.com/questions/105034/how-to-create-a-guid-uuid-in-javascript
EPUBJS.core.uuid = function() {
var d = new Date().getTime();
var uuid = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
var r = (d + Math.random()*16)%16 | 0;
d = Math.floor(d/16);
return (c=='x' ? r : (r&0x7|0x8)).toString(16);
});
return uuid;
};
// Fast quicksort insert for sorted array -- based on:
// http://stackoverflow.com/questions/1344500/efficient-way-to-insert-a-number-into-a-sorted-array-of-numbers
EPUBJS.core.insert = function(item, array, compareFunction) {
var location = EPUBJS.core.locationOf(item, array, compareFunction);
array.splice(location, 0, item);
return location;
};
EPUBJS.core.locationOf = function(item, array, compareFunction, _start, _end) {
var start = _start || 0;
var end = _end || array.length;
var pivot = parseInt(start + (end - start) / 2);
var compared;
if(!compareFunction){
compareFunction = function(a, b) {
if(a > b) return 1;
if(a < b) return -1;
if(a = b) return 0;
};
}
if(end-start <= 0) {
return pivot;
}
compared = compareFunction(array[pivot], item);
if(end-start === 1) {
return compared > 0 ? pivot : pivot + 1;
}
if(compared === 0) {
return pivot;
}
if(compared === -1) {
return EPUBJS.core.locationOf(item, array, compareFunction, pivot, end);
} else{
return EPUBJS.core.locationOf(item, array, compareFunction, start, pivot);
}
};
EPUBJS.core.indexOfSorted = function(item, array, compareFunction, _start, _end) {
var start = _start || 0;
var end = _end || array.length;
var pivot = parseInt(start + (end - start) / 2);
var compared;
if(!compareFunction){
compareFunction = function(a, b) {
if(a > b) return 1;
if(a < b) return -1;
if(a = b) return 0;
};
}
if(end-start <= 0) {
return -1; // Not found
}
compared = compareFunction(array[pivot], item);
if(end-start === 1) {
return compared === 0 ? pivot : -1;
}
if(compared === 0) {
return pivot; // Found
}
if(compared === -1) {
return EPUBJS.core.indexOfSorted(item, array, compareFunction, pivot, end);
} else{
return EPUBJS.core.indexOfSorted(item, array, compareFunction, start, pivot);
}
};
EPUBJS.core.queue = function(_scope){
var _q = [];
var scope = _scope;
// Add an item to the queue
var enqueue = function(funcName, args, context) {
_q.push({
"funcName" : funcName,
"args" : args,
"context" : context
});
return _q;
};
// Run one item
var dequeue = function(){
var inwait;
if(_q.length) {
inwait = _q.shift();
// Defer to any current tasks
// setTimeout(function(){
scope[inwait.funcName].apply(inwait.context || scope, inwait.args);
// }, 0);
}
};
// Run All
var flush = function(){
while(_q.length) {
dequeue();
}
};
// Clear all items in wait
var clear = function(){
_q = [];
};
var length = function(){
return _q.length;
};
return {
"enqueue" : enqueue,
"dequeue" : dequeue,
"flush" : flush,
"clear" : clear,
"length" : length
};
};
// From: https://code.google.com/p/fbug/source/browse/branches/firebug1.10/content/firebug/lib/xpath.js
/**
* Gets an XPath for an element which describes its hierarchical location.
*/
EPUBJS.core.getElementXPath = function(element) {
if (element && element.id) {
return '//*[@id="' + element.id + '"]';
} else {
return EPUBJS.core.getElementTreeXPath(element);
}
};
EPUBJS.core.getElementTreeXPath = function(element) {
var paths = [];
var isXhtml = (element.ownerDocument.documentElement.getAttribute('xmlns') === "http://www.w3.org/1999/xhtml");
var index, nodeName, tagName, pathIndex;
if(element.nodeType === Node.TEXT_NODE){
// index = Array.prototype.indexOf.call(element.parentNode.childNodes, element) + 1;
index = EPUBJS.core.indexOfTextNode(element) + 1;
paths.push("text()["+index+"]");
element = element.parentNode;
}
// Use nodeName (instead of localName) so namespace prefix is included (if any).
for (; element && element.nodeType == 1; element = element.parentNode)
{
index = 0;
for (var sibling = element.previousSibling; sibling; sibling = sibling.previousSibling)
{
// Ignore document type declaration.
if (sibling.nodeType == Node.DOCUMENT_TYPE_NODE) {
continue;
}
if (sibling.nodeName == element.nodeName) {
++index;
}
}
nodeName = element.nodeName.toLowerCase();
tagName = (isXhtml ? "xhtml:" + nodeName : nodeName);
pathIndex = (index ? "[" + (index+1) + "]" : "");
paths.splice(0, 0, tagName + pathIndex);
}
return paths.length ? "./" + paths.join("/") : null;
};
EPUBJS.core.nsResolver = function(prefix) {
var ns = {
'xhtml' : 'http://www.w3.org/1999/xhtml',
'epub': 'http://www.idpf.org/2007/ops'
};
return ns[prefix] || null;
};
//https://stackoverflow.com/questions/13482352/xquery-looking-for-text-with-single-quote/13483496#13483496
EPUBJS.core.cleanStringForXpath = function(str) {
var parts = str.match(/[^'"]+|['"]/g);
parts = parts.map(function(part){
if (part === "'") {
return '\"\'\"'; // output "'"
}
if (part === '"') {
return "\'\"\'"; // output '"'
}
return "\'" + part + "\'";
});
return "concat(\'\'," + parts.join(",") + ")";
};
EPUBJS.core.indexOfTextNode = function(textNode){
var parent = textNode.parentNode;
var children = parent.childNodes;
var sib;
var index = -1;
for (var i = 0; i < children.length; i++) {
sib = children[i];
if(sib.nodeType === Node.TEXT_NODE){
index++;
}
if(sib == textNode) break;
}
return index;
};
// Underscore
EPUBJS.core.defaults = function(obj) {
for (var i = 1, length = arguments.length; i < length; i++) {
var source = arguments[i];
for (var prop in source) {
if (obj[prop] === void 0) obj[prop] = source[prop];
}
}
return obj;
};
EPUBJS.core.extend = function(target) {
var sources = [].slice.call(arguments, 1);
sources.forEach(function (source) {
if(!source) return;
Object.getOwnPropertyNames(source).forEach(function(propName) {
Object.defineProperty(target, propName, Object.getOwnPropertyDescriptor(source, propName));
});
});
return target;
};
EPUBJS.core.clone = function(obj) {
return EPUBJS.core.isArray(obj) ? obj.slice() : EPUBJS.core.extend({}, obj);
};
EPUBJS.core.isElement = function(obj) {
return !!(obj && obj.nodeType == 1);
};
EPUBJS.core.isNumber = function(n) {
return !isNaN(parseFloat(n)) && isFinite(n);
};
EPUBJS.core.isString = function(str) {
return (typeof str === 'string' || str instanceof String);
};
EPUBJS.core.isArray = Array.isArray || function(obj) {
return Object.prototype.toString.call(obj) === '[object Array]';
};
// Lodash
EPUBJS.core.values = function(object) {
var index = -1;
var props, length, result;
if(!object) return [];
props = Object.keys(object);
length = props.length;
result = Array(length);
while (++index < length) {
result[index] = object[props[index]];
}
return result;
};
EPUBJS.core.indexOfNode = function(node, typeId) {
var parent = node.parentNode;
var children = parent.childNodes;
var sib;
var index = -1;
for (var i = 0; i < children.length; i++) {
sib = children[i];
if (sib.nodeType === typeId) {
index++;
}
if (sib == node) break;
}
return index;
}
EPUBJS.core.indexOfTextNode = function(textNode) {
return EPUBJS.core.indexOfNode(textNode, TEXT_NODE);
}
EPUBJS.core.indexOfElementNode = function(elementNode) {
return EPUBJS.core.indexOfNode(elementNode, ELEMENT_NODE);
}
var EPUBJS = EPUBJS || {};
EPUBJS.reader = {};
EPUBJS.reader.plugins = {}; //-- Attach extra Controllers as plugins (like search?)
(function(root, $) {
var previousReader = root.ePubReader || {};
var ePubReader = root.ePubReader = function(path, options) {
return new EPUBJS.Reader(path, options);
};
//exports to multiple environments
if (typeof define === 'function' && define.amd) {
//AMD
define(function(){ return Reader; });
} else if (typeof module != "undefined" && module.exports) {
//Node
module.exports = ePubReader;
}
})(window, jQuery);
EPUBJS.Reader = function(bookPath, _options) {
var reader = this;
var book;
var plugin;
var $viewer = $("#viewer");
var search = window.location.search;
var parameters;
this.settings = EPUBJS.core.defaults(_options || {}, {
bookPath : bookPath,
restore : false,
reload : false,
bookmarks : undefined,
annotations : undefined,
contained : undefined,
bookKey : undefined,
styles : undefined,
sidebarReflow: false,
generatePagination: false,
history: true
});
// Overide options with search parameters
if(search) {
parameters = search.slice(1).split("&");
parameters.forEach(function(p){
var split = p.split("=");
var name = split[0];
var value = split[1] || '';
reader.settings[name] = decodeURIComponent(value);
});
}
this.setBookKey(this.settings.bookPath); //-- This could be username + path or any unique string
if(this.settings.restore && this.isSaved()) {
this.applySavedSettings();
}
this.settings.styles = this.settings.styles || {
fontSize : "100%"
};
this.book = book = new ePub(this.settings.bookPath, this.settings);
this.offline = false;
this.sidebarOpen = false;
if(!this.settings.bookmarks) {
this.settings.bookmarks = [];
}
if(!this.settings.annotations) {
this.settings.annotations = [];
}
if(this.settings.generatePagination) {
book.generatePagination($viewer.width(), $viewer.height());
}
this.rendition = book.renderTo("viewer", {
ignoreClass: "annotator-hl",
width: "100%",
height: "100%"
});
if(this.settings.previousLocationCfi) {
this.displayed = this.rendition.display(this.settings.previousLocationCfi);
} else {
this.displayed = this.rendition.display();
}
book.ready.then(function () {
reader.ReaderController = EPUBJS.reader.ReaderController.call(reader, book);
reader.SettingsController = EPUBJS.reader.SettingsController.call(reader, book);
reader.ControlsController = EPUBJS.reader.ControlsController.call(reader, book);
reader.SidebarController = EPUBJS.reader.SidebarController.call(reader, book);
reader.BookmarksController = EPUBJS.reader.BookmarksController.call(reader, book);
reader.NotesController = EPUBJS.reader.NotesController.call(reader, book);
window.addEventListener("hashchange", this.hashChanged.bind(this), false);
document.addEventListener('keydown', this.adjustFontSize.bind(this), false);
this.rendition.on("keydown", this.adjustFontSize.bind(this));
this.rendition.on("keydown", reader.ReaderController.arrowKeys.bind(this));
this.rendition.on("selected", this.selectedRange.bind(this));
}.bind(this)).then(function() {
reader.ReaderController.hideLoader();
}.bind(this));
// Call Plugins
for(plugin in EPUBJS.reader.plugins) {
if(EPUBJS.reader.plugins.hasOwnProperty(plugin)) {
reader[plugin] = EPUBJS.reader.plugins[plugin].call(reader, book);
}
}
book.loaded.metadata.then(function(meta) {
reader.MetaController = EPUBJS.reader.MetaController.call(reader, meta);
});
book.loaded.navigation.then(function(navigation) {
reader.TocController = EPUBJS.reader.TocController.call(reader, navigation);
});
window.addEventListener("beforeunload", this.unload.bind(this), false);
return this;
};
EPUBJS.Reader.prototype.adjustFontSize = function(e) {
var fontSize;
var interval = 2;
var PLUS = 187;
var MINUS = 189;
var ZERO = 48;
var MOD = (e.ctrlKey || e.metaKey );
if(!this.settings.styles) return;
if(!this.settings.styles.fontSize) {
this.settings.styles.fontSize = "100%";
}
fontSize = parseInt(this.settings.styles.fontSize.slice(0, -1));
if(MOD && e.keyCode == PLUS) {
e.preventDefault();
this.book.setStyle("fontSize", (fontSize + interval) + "%");
}
if(MOD && e.keyCode == MINUS){
e.preventDefault();
this.book.setStyle("fontSize", (fontSize - interval) + "%");
}
if(MOD && e.keyCode == ZERO){
e.preventDefault();
this.book.setStyle("fontSize", "100%");
}
};
EPUBJS.Reader.prototype.addBookmark = function(cfi) {
var present = this.isBookmarked(cfi);
if(present > -1 ) return;
this.settings.bookmarks.push(cfi);
this.trigger("reader:bookmarked", cfi);
};
EPUBJS.Reader.prototype.removeBookmark = function(cfi) {
var bookmark = this.isBookmarked(cfi);
if( bookmark === -1 ) return;
this.settings.bookmarks.splice(bookmark, 1);
this.trigger("reader:unbookmarked", bookmark);
};
EPUBJS.Reader.prototype.isBookmarked = function(cfi) {
var bookmarks = this.settings.bookmarks;
return bookmarks.indexOf(cfi);
};
/*
EPUBJS.Reader.prototype.searchBookmarked = function(cfi) {
var bookmarks = this.settings.bookmarks,
len = bookmarks.length,
i;
for(i = 0; i < len; i++) {
if (bookmarks[i]['cfi'] === cfi) return i;
}
return -1;
};
*/
EPUBJS.Reader.prototype.clearBookmarks = function() {
this.settings.bookmarks = [];
};
//-- Notes
EPUBJS.Reader.prototype.addNote = function(note) {
this.settings.annotations.push(note);
};
EPUBJS.Reader.prototype.removeNote = function(note) {
var index = this.settings.annotations.indexOf(note);
if( index === -1 ) return;
delete this.settings.annotations[index];
};
EPUBJS.Reader.prototype.clearNotes = function() {
this.settings.annotations = [];
};
//-- Settings
EPUBJS.Reader.prototype.setBookKey = function(identifier){
if(!this.settings.bookKey) {
this.settings.bookKey = "epubjsreader:" + EPUBJS.VERSION + ":" + window.location.host + ":" + identifier;
}
return this.settings.bookKey;
};
//-- Checks if the book setting can be retrieved from localStorage
EPUBJS.Reader.prototype.isSaved = function(bookPath) {
var storedSettings;
if(!localStorage) {
return false;
}
storedSettings = localStorage.getItem(this.settings.bookKey);
if(storedSettings === null) {
return false;
} else {
return true;
}
};
EPUBJS.Reader.prototype.removeSavedSettings = function() {
if(!localStorage) {
return false;
}
localStorage.removeItem(this.settings.bookKey);
};
EPUBJS.Reader.prototype.applySavedSettings = function() {
var stored;
if(!localStorage) {
return false;
}
try {
stored = JSON.parse(localStorage.getItem(this.settings.bookKey));
} catch (e) { // parsing error of localStorage
return false;
}
if(stored) {
// Merge styles
if(stored.styles) {
this.settings.styles = EPUBJS.core.defaults(this.settings.styles || {}, stored.styles);
}
// Merge the rest
this.settings = EPUBJS.core.defaults(this.settings, stored);
return true;
} else {
return false;
}
};
EPUBJS.Reader.prototype.saveSettings = function(){
if(this.book) {
this.settings.previousLocationCfi = this.rendition.currentLocation().start.cfi;
}
if(!localStorage) {
return false;
}
localStorage.setItem(this.settings.bookKey, JSON.stringify(this.settings));
};
EPUBJS.Reader.prototype.unload = function(){
if(this.settings.restore && localStorage) {
this.saveSettings();
}
};
EPUBJS.Reader.prototype.hashChanged = function(){
var hash = window.location.hash.slice(1);
this.rendition.display(hash);
};
EPUBJS.Reader.prototype.selectedRange = function(cfiRange){
var cfiFragment = "#"+cfiRange;
// Update the History Location
if(this.settings.history &&
window.location.hash != cfiFragment) {
// Add CFI fragment to the history
history.pushState({}, '', cfiFragment);
this.currentLocationCfi = cfiRange;
}
};
//-- Enable binding events to reader
RSVP.EventTarget.mixin(EPUBJS.Reader.prototype);
EPUBJS.reader.BookmarksController = function() {
var reader = this;
var book = this.book;
var rendition = this.rendition;
var $bookmarks = $("#bookmarksView"),
$list = $bookmarks.find("#bookmarks");
var docfrag = document.createDocumentFragment();
var show = function() {
$bookmarks.show();
};
var hide = function() {
$bookmarks.hide();
};
var counter = 0;
var createBookmarkItem = function(cfi) {
var listitem = document.createElement("li"),
link = document.createElement("a");
listitem.id = "bookmark-"+counter;
listitem.classList.add('list_item');
var spineItem = book.spine.get(cfi);
var tocItem;
if (spineItem.index in book.navigation.toc) {
tocItem = book.navigation.toc[spineItem.index];
link.textContent = tocItem.label;
} else {
link.textContent = cfi;
}
link.href = cfi;
link.classList.add('bookmark_link');
link.addEventListener("click", function(event){
var cfi = this.getAttribute('href');
rendition.display(cfi);
event.preventDefault();
}, false);
listitem.appendChild(link);
counter++;
return listitem;
};
this.settings.bookmarks.forEach(function(cfi) {
var bookmark = createBookmarkItem(cfi);
docfrag.appendChild(bookmark);
});
$list.append(docfrag);
this.on("reader:bookmarked", function(cfi) {
var item = createBookmarkItem(cfi);
$list.append(item);
});
this.on("reader:unbookmarked", function(index) {
var $item = $("#bookmark-"+index);
$item.remove();
});
return {
"show" : show,
"hide" : hide
};
};
EPUBJS.reader.ControlsController = function(book) {
var reader = this;
var rendition = this.rendition;
var $store = $("#store"),
$fullscreen = $("#fullscreen"),
$fullscreenicon = $("#fullscreenicon"),
$cancelfullscreenicon = $("#cancelfullscreenicon"),
$slider = $("#slider"),
$main = $("#main"),
$sidebar = $("#sidebar"),
$settings = $("#setting"),
$bookmark = $("#bookmark");
/*
var goOnline = function() {
reader.offline = false;
// $store.attr("src", $icon.data("save"));
};
var goOffline = function() {
reader.offline = true;
// $store.attr("src", $icon.data("saved"));
};
var fullscreen = false;
book.on("book:online", goOnline);
book.on("book:offline", goOffline);
*/
$slider.on("click", function () {
if(reader.sidebarOpen) {
reader.SidebarController.hide();
$slider.addClass("icon-menu");
$slider.removeClass("icon-right");
} else {
reader.SidebarController.show();
$slider.addClass("icon-right");
$slider.removeClass("icon-menu");
}
});
if(typeof screenfull !== 'undefined') {
$fullscreen.on("click", function() {
screenfull.toggle($('#container')[0]);
});
if(screenfull.raw) {
document.addEventListener(screenfull.raw.fullscreenchange, function() {
fullscreen = screenfull.isFullscreen;
if(fullscreen) {
$fullscreen
.addClass("icon-resize-small")
.removeClass("icon-resize-full");
} else {
$fullscreen
.addClass("icon-resize-full")
.removeClass("icon-resize-small");
}
});
}
}
$settings.on("click", function() {
reader.SettingsController.show();
});
$bookmark.on("click", function() {
var cfi = reader.rendition.currentLocation().start.cfi;
var bookmarked = reader.isBookmarked(cfi);
if(bookmarked === -1) { //-- Add bookmark
reader.addBookmark(cfi);
$bookmark
.addClass("icon-bookmark")
.removeClass("icon-bookmark-empty");
} else { //-- Remove Bookmark
reader.removeBookmark(cfi);
$bookmark
.removeClass("icon-bookmark")
.addClass("icon-bookmark-empty");
}
});
rendition.on('relocated', function(location){
var cfi = location.start.cfi;
var cfiFragment = "#" + cfi;
//-- Check if bookmarked
var bookmarked = reader.isBookmarked(cfi);
if(bookmarked === -1) { //-- Not bookmarked
$bookmark
.removeClass("icon-bookmark")
.addClass("icon-bookmark-empty");
} else { //-- Bookmarked
$bookmark
.addClass("icon-bookmark")
.removeClass("icon-bookmark-empty");
}
reader.currentLocationCfi = cfi;
// Update the History Location
if(reader.settings.history &&
window.location.hash != cfiFragment) {
// Add CFI fragment to the history
history.pushState({}, '', cfiFragment);
}
});
return {
};
};
EPUBJS.reader.MetaController = function(meta) {
var title = meta.title,
author = meta.creator;
var $title = $("#book-title"),
$author = $("#chapter-title"),
$dash = $("#title-seperator");
document.title = title+" – "+author;
$title.html(title);
$author.html(author);
$dash.show();
};
EPUBJS.reader.NotesController = function() {
var book = this.book;
var rendition = this.rendition;
var reader = this;
var $notesView = $("#notesView");
var $notes = $("#notes");
var $text = $("#note-text");
var $anchor = $("#note-anchor");
var annotations = reader.settings.annotations;
var renderer = book.renderer;
var popups = [];
var epubcfi = new ePub.CFI();
var show = function() {
$notesView.show();
};
var hide = function() {
$notesView.hide();
}
var insertAtPoint = function(e) {
var range;
var textNode;
var offset;
var doc = book.renderer.doc;
var cfi;
var annotation;
// standard
if (doc.caretPositionFromPoint) {
range = doc.caretPositionFromPoint(e.clientX, e.clientY);
textNode = range.offsetNode;
offset = range.offset;
// WebKit
} else if (doc.caretRangeFromPoint) {
range = doc.caretRangeFromPoint(e.clientX, e.clientY);
textNode = range.startContainer;
offset = range.startOffset;
}
if (textNode.nodeType !== 3) {
for (var i=0; i < textNode.childNodes.length; i++) {
if (textNode.childNodes[i].nodeType == 3) {
textNode = textNode.childNodes[i];
break;
}
}
}
// Find the end of the sentance
offset = textNode.textContent.indexOf(".", offset);
if(offset === -1){
offset = textNode.length; // Last item
} else {
offset += 1; // After the period
}
cfi = epubcfi.generateCfiFromTextNode(textNode, offset, book.renderer.currentChapter.cfiBase);
annotation = {
annotatedAt: new Date(),
anchor: cfi,
body: $text.val()
}
// add to list
reader.addNote(annotation);
// attach
addAnnotation(annotation);
placeMarker(annotation);
// clear
$text.val('');
$anchor.text("Attach");
$text.prop("disabled", false);
rendition.off("click", insertAtPoint);
};
var addAnnotation = function(annotation){
var note = document.createElement("li");
var link = document.createElement("a");
note.innerHTML = annotation.body;
// note.setAttribute("ref", annotation.anchor);
link.innerHTML = " context »";
link.href = "#"+annotation.anchor;
link.onclick = function(){
rendition.display(annotation.anchor);
return false;
};
note.appendChild(link);
$notes.append(note);
};
var placeMarker = function(annotation){
var doc = book.renderer.doc;
var marker = document.createElement("span");
var mark = document.createElement("a");
marker.classList.add("footnotesuperscript", "reader_generated");
marker.style.verticalAlign = "super";
marker.style.fontSize = ".75em";
// marker.style.position = "relative";
marker.style.lineHeight = "1em";
// mark.style.display = "inline-block";
mark.style.padding = "2px";
mark.style.backgroundColor = "#fffa96";
mark.style.borderRadius = "5px";
mark.style.cursor = "pointer";
marker.id = "note-"+EPUBJS.core.uuid();
mark.innerHTML = annotations.indexOf(annotation) + 1 + "[Reader]";
marker.appendChild(mark);
epubcfi.addMarker(annotation.anchor, doc, marker);
markerEvents(marker, annotation.body);
}
var markerEvents = function(item, txt){
var id = item.id;
var showPop = function(){
var poppos,
iheight = renderer.height,
iwidth = renderer.width,
tip,
pop,
maxHeight = 225,
itemRect,
left,
top,
pos;
//-- create a popup with endnote inside of it
if(!popups[id]) {
popups[id] = document.createElement("div");
popups[id].setAttribute("class", "popup");
pop_content = document.createElement("div");
popups[id].appendChild(pop_content);
pop_content.innerHTML = txt;
pop_content.setAttribute("class", "pop_content");
renderer.render.document.body.appendChild(popups[id]);
//-- TODO: will these leak memory? - Fred
popups[id].addEventListener("mouseover", onPop, false);
popups[id].addEventListener("mouseout", offPop, false);
//-- Add hide on page change
rendition.on("locationChanged", hidePop, this);
rendition.on("locationChanged", offPop, this);
// chapter.book.on("renderer:chapterDestroy", hidePop, this);
}
pop = popups[id];
//-- get location of item
itemRect = item.getBoundingClientRect();
left = itemRect.left;
top = itemRect.top;
//-- show the popup
pop.classList.add("show");
//-- locations of popup
popRect = pop.getBoundingClientRect();
//-- position the popup
pop.style.left = left - popRect.width / 2 + "px";
pop.style.top = top + "px";
//-- Adjust max height
if(maxHeight > iheight / 2.5) {
maxHeight = iheight / 2.5;
pop_content.style.maxHeight = maxHeight + "px";
}
//-- switch above / below
if(popRect.height + top >= iheight - 25) {
pop.style.top = top - popRect.height + "px";
pop.classList.add("above");
}else{
pop.classList.remove("above");
}
//-- switch left
if(left - popRect.width <= 0) {
pop.style.left = left + "px";
pop.classList.add("left");
}else{
pop.classList.remove("left");
}
//-- switch right
if(left + popRect.width / 2 >= iwidth) {
//-- TEMP MOVE: 300
pop.style.left = left - 300 + "px";
popRect = pop.getBoundingClientRect();
pop.style.left = left - popRect.width + "px";
//-- switch above / below again
if(popRect.height + top >= iheight - 25) {
pop.style.top = top - popRect.height + "px";
pop.classList.add("above");
}else{
pop.classList.remove("above");
}
pop.classList.add("right");
}else{
pop.classList.remove("right");
}
}
var onPop = function(){
popups[id].classList.add("on");
}
var offPop = function(){
popups[id].classList.remove("on");
}
var hidePop = function(){
setTimeout(function(){
popups[id].classList.remove("show");
}, 100);
}
var openSidebar = function(){
reader.ReaderController.slideOut();
show();
};
item.addEventListener("mouseover", showPop, false);
item.addEventListener("mouseout", hidePop, false);
item.addEventListener("click", openSidebar, false);
}
$anchor.on("click", function(e){
$anchor.text("Cancel");
$text.prop("disabled", "true");
// listen for selection
rendition.on("click", insertAtPoint);
});
annotations.forEach(function(note) {
addAnnotation(note);
});
/*
renderer.registerHook("beforeChapterDisplay", function(callback, renderer){
var chapter = renderer.currentChapter;
annotations.forEach(function(note) {
var cfi = epubcfi.parse(note.anchor);
if(cfi.spinePos === chapter.spinePos) {
try {
placeMarker(note);
} catch(e) {
console.log("anchoring failed", note.anchor);
}
}
});
callback();
}, true);
*/
return {
"show" : show,
"hide" : hide
};
};
EPUBJS.reader.ReaderController = function(book) {
var $main = $("#main"),
$divider = $("#divider"),
$loader = $("#loader"),
$next = $("#next"),
$prev = $("#prev");
var reader = this;
var book = this.book;
var rendition = this.rendition;
var slideIn = function() {
var currentPosition = rendition.currentLocation().start.cfi;
if (reader.settings.sidebarReflow){
$main.removeClass('single');
$main.one("transitionend", function(){
rendition.resize();
});
} else {
$main.removeClass("closed");
}
};
var slideOut = function() {
var location = rendition.currentLocation();
if (!location) {
return;
}
var currentPosition = location.start.cfi;
if (reader.settings.sidebarReflow){
$main.addClass('single');
$main.one("transitionend", function(){
rendition.resize();
});
} else {
$main.addClass("closed");
}
};
var showLoader = function() {
$loader.show();
hideDivider();
};
var hideLoader = function() {
$loader.hide();
//-- If the book is using spreads, show the divider
// if(book.settings.spreads) {
// showDivider();
// }
};
var showDivider = function() {
$divider.addClass("show");
};
var hideDivider = function() {
$divider.removeClass("show");
};
var keylock = false;
var arrowKeys = function(e) {
if(e.keyCode == 37) {
if(book.package.metadata.direction === "rtl") {
rendition.next();
} else {
rendition.prev();
}
$prev.addClass("active");
keylock = true;
setTimeout(function(){
keylock = false;
$prev.removeClass("active");
}, 100);
e.preventDefault();
}
if(e.keyCode == 39) {
if(book.package.metadata.direction === "rtl") {
rendition.prev();
} else {
rendition.next();
}
$next.addClass("active");
keylock = true;
setTimeout(function(){
keylock = false;
$next.removeClass("active");
}, 100);
e.preventDefault();
}
}
document.addEventListener('keydown', arrowKeys, false);
$next.on("click", function(e){
if(book.package.metadata.direction === "rtl") {
rendition.prev();
} else {
rendition.next();
}
e.preventDefault();
});
$prev.on("click", function(e){
if(book.package.metadata.direction === "rtl") {
rendition.next();
} else {
rendition.prev();
}
e.preventDefault();
});
rendition.on("layout", function(props){
if(props.spread === true) {
showDivider();
} else {
hideDivider();
}
});
rendition.on('relocated', function(location){
if (location.atStart) {
$prev.addClass("disabled");
}
if (location.atEnd) {
$next.addClass("disabled");
}
});
return {
"slideOut" : slideOut,
"slideIn" : slideIn,
"showLoader" : showLoader,
"hideLoader" : hideLoader,
"showDivider" : showDivider,
"hideDivider" : hideDivider,
"arrowKeys" : arrowKeys
};
};
EPUBJS.reader.SettingsController = function() {
var book = this.book;
var reader = this;
var $settings = $("#settings-modal"),
$overlay = $(".overlay");
var show = function() {
$settings.addClass("md-show");
};
var hide = function() {
$settings.removeClass("md-show");
};
var $sidebarReflowSetting = $('#sidebarReflow');
$sidebarReflowSetting.on('click', function() {
reader.settings.sidebarReflow = !reader.settings.sidebarReflow;
});
$settings.find(".closer").on("click", function() {
hide();
});
$overlay.on("click", function() {
hide();
});
return {
"show" : show,
"hide" : hide
};
};
EPUBJS.reader.SidebarController = function(book) {
var reader = this;
var $sidebar = $("#sidebar"),
$panels = $("#panels");
var activePanel = "Toc";
var changePanelTo = function(viewName) {
var controllerName = viewName + "Controller";
if(activePanel == viewName || typeof reader[controllerName] === 'undefined' ) return;
reader[activePanel+ "Controller"].hide();
reader[controllerName].show();
activePanel = viewName;
$panels.find('.active').removeClass("active");
$panels.find("#show-" + viewName ).addClass("active");
};
var getActivePanel = function() {
return activePanel;
};
var show = function() {
reader.sidebarOpen = true;
reader.ReaderController.slideOut();
$sidebar.addClass("open");
}
var hide = function() {
reader.sidebarOpen = false;
reader.ReaderController.slideIn();
$sidebar.removeClass("open");
}
$panels.find(".show_view").on("click", function(event) {
var view = $(this).data("view");
changePanelTo(view);
event.preventDefault();
});
return {
'show' : show,
'hide' : hide,
'getActivePanel' : getActivePanel,
'changePanelTo' : changePanelTo
};
};
EPUBJS.reader.TocController = function(toc) {
var book = this.book;
var rendition = this.rendition;
var $list = $("#tocView"),
docfrag = document.createDocumentFragment();
var currentChapter = false;
var generateTocItems = function(toc, level) {
var container = document.createElement("ul");
if(!level) level = 1;
toc.forEach(function(chapter) {
var listitem = document.createElement("li"),
link = document.createElement("a");
toggle = document.createElement("a");
var subitems;
listitem.id = "toc-"+chapter.id;
listitem.classList.add('list_item');
link.textContent = chapter.label;
link.href = chapter.href;
link.classList.add('toc_link');
listitem.appendChild(link);
if(chapter.subitems && chapter.subitems.length > 0) {
level++;
subitems = generateTocItems(chapter.subitems, level);
toggle.classList.add('toc_toggle');
listitem.insertBefore(toggle, link);
listitem.appendChild(subitems);
}
container.appendChild(listitem);
});
return container;
};
var onShow = function() {
$list.show();
};
var onHide = function() {
$list.hide();
};
var chapterChange = function(e) {
var id = e.id,
$item = $list.find("#toc-"+id),
$current = $list.find(".currentChapter"),
$open = $list.find('.openChapter');
if($item.length){
if($item != $current && $item.has(currentChapter).length > 0) {
$current.removeClass("currentChapter");
}
$item.addClass("currentChapter");
// $open.removeClass("openChapter");
$item.parents('li').addClass("openChapter");
}
};
rendition.on('renderered', chapterChange);
var tocitems = generateTocItems(toc);
docfrag.appendChild(tocitems);
$list.append(docfrag);
$list.find(".toc_link").on("click", function(event){
var url = this.getAttribute('href');
event.preventDefault();
//-- Provide the Book with the url to show
// The Url must be found in the books manifest
rendition.display(url);
$list.find(".currentChapter")
.addClass("openChapter")
.removeClass("currentChapter");
$(this).parent('li').addClass("currentChapter");
});
$list.find(".toc_toggle").on("click", function(event){
var $el = $(this).parent('li'),
open = $el.hasClass("openChapter");
event.preventDefault();
if(open){
$el.removeClass("openChapter");
} else {
$el.addClass("openChapter");
}
});
return {
"show" : onShow,
"hide" : onHide
};
};
//# sourceMappingURL=reader.js.map | zikongli-jingdian-taozhuang-x3 | /zikongli-jingdian-taozhuang-x3-2022.10.15.0.tar.gz/zikongli-jingdian-taozhuang-x3-2022.10.15.0/ZikongliJingdianTaozhuangX3/js/reader.js | reader.js |
window.hypothesisConfig = function() {
var Annotator = window.Annotator;
var $main = $("#main");
function EpubAnnotationSidebar(elem, options) {
options = {
server: true,
origin: true,
showHighlights: true,
Toolbar: {container: '#annotation-controls'}
}
Annotator.Host.call(this, elem, options);
}
EpubAnnotationSidebar.prototype = Object.create(Annotator.Host.prototype);
EpubAnnotationSidebar.prototype.show = function() {
this.frame.css({
'margin-left': (-1 * this.frame.width()) + "px"
});
this.frame.removeClass('annotator-collapsed');
if (!$main.hasClass('single')) {
$main.addClass("single");
this.toolbar.find('[name=sidebar-toggle]').removeClass('h-icon-chevron-left').addClass('h-icon-chevron-right');
this.setVisibleHighlights(true);
}
};
EpubAnnotationSidebar.prototype.hide = function() {
this.frame.css({
'margin-left': ''
});
this.frame.addClass('annotator-collapsed');
if ($main.hasClass('single')) {
$main.removeClass("single");
this.toolbar.find('[name=sidebar-toggle]').removeClass('h-icon-chevron-right').addClass('h-icon-chevron-left');
this.setVisibleHighlights(false);
}
};
return {
constructor: EpubAnnotationSidebar,
}
};
// This is the Epub.js plugin. Annotations are updated on location change.
EPUBJS.reader.plugins.HypothesisController = function (Book) {
var reader = this;
var $main = $("#main");
var updateAnnotations = function () {
var annotator = Book.renderer.render.window.annotator;
if (annotator && annotator.constructor.$) {
var annotations = getVisibleAnnotations(annotator.constructor.$);
annotator.showAnnotations(annotations)
}
};
var getVisibleAnnotations = function ($) {
var width = Book.renderer.render.iframe.clientWidth;
return $('.annotator-hl').map(function() {
var $this = $(this),
left = this.getBoundingClientRect().left;
if (left >= 0 && left <= width) {
return $this.data('annotation');
}
}).get();
};
Book.on("renderer:locationChanged", updateAnnotations);
return {}
}; | zikongli-jingdian-taozhuang-x3 | /zikongli-jingdian-taozhuang-x3-2022.10.15.0.tar.gz/zikongli-jingdian-taozhuang-x3-2022.10.15.0/ZikongliJingdianTaozhuangX3/js/plugins/hypothesis.js | hypothesis.js |
EPUBJS.reader.search = {};
// Search Server -- https://github.com/futurepress/epubjs-search
EPUBJS.reader.search.SERVER = "https://pacific-cliffs-3579.herokuapp.com";
EPUBJS.reader.search.request = function(q, callback) {
var fetch = $.ajax({
dataType: "json",
url: EPUBJS.reader.search.SERVER + "/search?q=" + encodeURIComponent(q)
});
fetch.fail(function(err) {
console.error(err);
});
fetch.done(function(results) {
callback(results);
});
};
EPUBJS.reader.plugins.SearchController = function(Book) {
var reader = this;
var $searchBox = $("#searchBox"),
$searchResults = $("#searchResults"),
$searchView = $("#searchView"),
iframeDoc;
var searchShown = false;
var onShow = function() {
query();
searchShown = true;
$searchView.addClass("shown");
};
var onHide = function() {
searchShown = false;
$searchView.removeClass("shown");
};
var query = function() {
var q = $searchBox.val();
if(q == '') {
return;
}
$searchResults.empty();
$searchResults.append("<li><p>Searching...</p></li>");
EPUBJS.reader.search.request(q, function(data) {
var results = data.results;
$searchResults.empty();
if(iframeDoc) {
$(iframeDoc).find('body').unhighlight();
}
if(results.length == 0) {
$searchResults.append("<li><p>No Results Found</p></li>");
return;
}
iframeDoc = $("#viewer iframe")[0].contentDocument;
$(iframeDoc).find('body').highlight(q, { element: 'span' });
results.forEach(function(result) {
var $li = $("<li></li>");
var $item = $("<a href='"+result.href+"' data-cfi='"+result.cfi+"'><span>"+result.title+"</span><p>"+result.highlight+"</p></a>");
$item.on("click", function(e) {
var $this = $(this),
cfi = $this.data("cfi");
e.preventDefault();
Book.gotoCfi(cfi+"/1:0");
Book.on("renderer:chapterDisplayed", function() {
iframeDoc = $("#viewer iframe")[0].contentDocument;
$(iframeDoc).find('body').highlight(q, { element: 'span' });
})
});
$li.append($item);
$searchResults.append($li);
});
});
};
$searchBox.on("search", function(e) {
var q = $searchBox.val();
//-- SearchBox is empty or cleared
if(q == '') {
$searchResults.empty();
if(reader.SidebarController.getActivePanel() == "Search") {
reader.SidebarController.changePanelTo("Toc");
}
$(iframeDoc).find('body').unhighlight();
iframeDoc = false;
return;
}
reader.SidebarController.changePanelTo("Search");
e.preventDefault();
});
return {
"show" : onShow,
"hide" : onHide
};
}; | zikongli-jingdian-taozhuang-x3 | /zikongli-jingdian-taozhuang-x3-2022.10.15.0.tar.gz/zikongli-jingdian-taozhuang-x3-2022.10.15.0/ZikongliJingdianTaozhuangX3/js/plugins/search.js | search.js |
!function(a){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=a();else if("function"==typeof define&&define.amd)define([],a);else{var b;b="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this,b.JSZip=a()}}(function(){return function a(b,c,d){function e(g,h){if(!c[g]){if(!b[g]){var i="function"==typeof require&&require;if(!h&&i)return i(g,!0);if(f)return f(g,!0);var j=new Error("Cannot find module '"+g+"'");throw j.code="MODULE_NOT_FOUND",j}var k=c[g]={exports:{}};b[g][0].call(k.exports,function(a){var c=b[g][1][a];return e(c?c:a)},k,k.exports,a,b,c,d)}return c[g].exports}for(var f="function"==typeof require&&require,g=0;g<d.length;g++)e(d[g]);return e}({1:[function(a,b,c){"use strict";var d=a("./utils"),e=a("./support"),f="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";c.encode=function(a){for(var b,c,e,g,h,i,j,k=[],l=0,m=a.length,n=m,o="string"!==d.getTypeOf(a);l<a.length;)n=m-l,o?(b=a[l++],c=l<m?a[l++]:0,e=l<m?a[l++]:0):(b=a.charCodeAt(l++),c=l<m?a.charCodeAt(l++):0,e=l<m?a.charCodeAt(l++):0),g=b>>2,h=(3&b)<<4|c>>4,i=n>1?(15&c)<<2|e>>6:64,j=n>2?63&e:64,k.push(f.charAt(g)+f.charAt(h)+f.charAt(i)+f.charAt(j));return k.join("")},c.decode=function(a){var b,c,d,g,h,i,j,k=0,l=0,m="data:";if(a.substr(0,m.length)===m)throw new Error("Invalid base64 input, it looks like a data url.");a=a.replace(/[^A-Za-z0-9\+\/\=]/g,"");var n=3*a.length/4;if(a.charAt(a.length-1)===f.charAt(64)&&n--,a.charAt(a.length-2)===f.charAt(64)&&n--,n%1!==0)throw new Error("Invalid base64 input, bad content length.");var o;for(o=e.uint8array?new Uint8Array(0|n):new Array(0|n);k<a.length;)g=f.indexOf(a.charAt(k++)),h=f.indexOf(a.charAt(k++)),i=f.indexOf(a.charAt(k++)),j=f.indexOf(a.charAt(k++)),b=g<<2|h>>4,c=(15&h)<<4|i>>2,d=(3&i)<<6|j,o[l++]=b,64!==i&&(o[l++]=c),64!==j&&(o[l++]=d);return o}},{"./support":30,"./utils":32}],2:[function(a,b,c){"use strict";function d(a,b,c,d,e){this.compressedSize=a,this.uncompressedSize=b,this.crc32=c,this.compression=d,this.compressedContent=e}var e=a("./external"),f=a("./stream/DataWorker"),g=a("./stream/DataLengthProbe"),h=a("./stream/Crc32Probe"),g=a("./stream/DataLengthProbe");d.prototype={getContentWorker:function(){var a=new f(e.Promise.resolve(this.compressedContent)).pipe(this.compression.uncompressWorker()).pipe(new g("data_length")),b=this;return a.on("end",function(){if(this.streamInfo.data_length!==b.uncompressedSize)throw new Error("Bug : uncompressed data size mismatch")}),a},getCompressedWorker:function(){return new f(e.Promise.resolve(this.compressedContent)).withStreamInfo("compressedSize",this.compressedSize).withStreamInfo("uncompressedSize",this.uncompressedSize).withStreamInfo("crc32",this.crc32).withStreamInfo("compression",this.compression)}},d.createWorkerFrom=function(a,b,c){return a.pipe(new h).pipe(new g("uncompressedSize")).pipe(b.compressWorker(c)).pipe(new g("compressedSize")).withStreamInfo("compression",b)},b.exports=d},{"./external":6,"./stream/Crc32Probe":25,"./stream/DataLengthProbe":26,"./stream/DataWorker":27}],3:[function(a,b,c){"use strict";var d=a("./stream/GenericWorker");c.STORE={magic:"\0\0",compressWorker:function(a){return new d("STORE compression")},uncompressWorker:function(){return new d("STORE decompression")}},c.DEFLATE=a("./flate")},{"./flate":7,"./stream/GenericWorker":28}],4:[function(a,b,c){"use strict";function d(){for(var a,b=[],c=0;c<256;c++){a=c;for(var d=0;d<8;d++)a=1&a?3988292384^a>>>1:a>>>1;b[c]=a}return b}function e(a,b,c,d){var e=h,f=d+c;a^=-1;for(var g=d;g<f;g++)a=a>>>8^e[255&(a^b[g])];return a^-1}function f(a,b,c,d){var e=h,f=d+c;a^=-1;for(var g=d;g<f;g++)a=a>>>8^e[255&(a^b.charCodeAt(g))];return a^-1}var g=a("./utils"),h=d();b.exports=function(a,b){if("undefined"==typeof a||!a.length)return 0;var c="string"!==g.getTypeOf(a);return c?e(0|b,a,a.length,0):f(0|b,a,a.length,0)}},{"./utils":32}],5:[function(a,b,c){"use strict";c.base64=!1,c.binary=!1,c.dir=!1,c.createFolders=!0,c.date=null,c.compression=null,c.compressionOptions=null,c.comment=null,c.unixPermissions=null,c.dosPermissions=null},{}],6:[function(a,b,c){"use strict";var d=null;d="undefined"!=typeof Promise?Promise:a("lie"),b.exports={Promise:d}},{lie:58}],7:[function(a,b,c){"use strict";function d(a,b){h.call(this,"FlateWorker/"+a),this._pako=null,this._pakoAction=a,this._pakoOptions=b,this.meta={}}var e="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Uint32Array,f=a("pako"),g=a("./utils"),h=a("./stream/GenericWorker"),i=e?"uint8array":"array";c.magic="\b\0",g.inherits(d,h),d.prototype.processChunk=function(a){this.meta=a.meta,null===this._pako&&this._createPako(),this._pako.push(g.transformTo(i,a.data),!1)},d.prototype.flush=function(){h.prototype.flush.call(this),null===this._pako&&this._createPako(),this._pako.push([],!0)},d.prototype.cleanUp=function(){h.prototype.cleanUp.call(this),this._pako=null},d.prototype._createPako=function(){this._pako=new f[this._pakoAction]({raw:!0,level:this._pakoOptions.level||-1});var a=this;this._pako.onData=function(b){a.push({data:b,meta:a.meta})}},c.compressWorker=function(a){return new d("Deflate",a)},c.uncompressWorker=function(){return new d("Inflate",{})}},{"./stream/GenericWorker":28,"./utils":32,pako:59}],8:[function(a,b,c){"use strict";function d(a,b,c,d){f.call(this,"ZipFileWorker"),this.bytesWritten=0,this.zipComment=b,this.zipPlatform=c,this.encodeFileName=d,this.streamFiles=a,this.accumulate=!1,this.contentBuffer=[],this.dirRecords=[],this.currentSourceOffset=0,this.entriesCount=0,this.currentFile=null,this._sources=[]}var e=a("../utils"),f=a("../stream/GenericWorker"),g=a("../utf8"),h=a("../crc32"),i=a("../signature"),j=function(a,b){var c,d="";for(c=0;c<b;c++)d+=String.fromCharCode(255&a),a>>>=8;return d},k=function(a,b){var c=a;return a||(c=b?16893:33204),(65535&c)<<16},l=function(a,b){return 63&(a||0)},m=function(a,b,c,d,f,m){var n,o,p=a.file,q=a.compression,r=m!==g.utf8encode,s=e.transformTo("string",m(p.name)),t=e.transformTo("string",g.utf8encode(p.name)),u=p.comment,v=e.transformTo("string",m(u)),w=e.transformTo("string",g.utf8encode(u)),x=t.length!==p.name.length,y=w.length!==u.length,z="",A="",B="",C=p.dir,D=p.date,E={crc32:0,compressedSize:0,uncompressedSize:0};b&&!c||(E.crc32=a.crc32,E.compressedSize=a.compressedSize,E.uncompressedSize=a.uncompressedSize);var F=0;b&&(F|=8),r||!x&&!y||(F|=2048);var G=0,H=0;C&&(G|=16),"UNIX"===f?(H=798,G|=k(p.unixPermissions,C)):(H=20,G|=l(p.dosPermissions,C)),n=D.getUTCHours(),n<<=6,n|=D.getUTCMinutes(),n<<=5,n|=D.getUTCSeconds()/2,o=D.getUTCFullYear()-1980,o<<=4,o|=D.getUTCMonth()+1,o<<=5,o|=D.getUTCDate(),x&&(A=j(1,1)+j(h(s),4)+t,z+="up"+j(A.length,2)+A),y&&(B=j(1,1)+j(h(v),4)+w,z+="uc"+j(B.length,2)+B);var I="";I+="\n\0",I+=j(F,2),I+=q.magic,I+=j(n,2),I+=j(o,2),I+=j(E.crc32,4),I+=j(E.compressedSize,4),I+=j(E.uncompressedSize,4),I+=j(s.length,2),I+=j(z.length,2);var J=i.LOCAL_FILE_HEADER+I+s+z,K=i.CENTRAL_FILE_HEADER+j(H,2)+I+j(v.length,2)+"\0\0\0\0"+j(G,4)+j(d,4)+s+z+v;return{fileRecord:J,dirRecord:K}},n=function(a,b,c,d,f){var g="",h=e.transformTo("string",f(d));return g=i.CENTRAL_DIRECTORY_END+"\0\0\0\0"+j(a,2)+j(a,2)+j(b,4)+j(c,4)+j(h.length,2)+h},o=function(a){var b="";return b=i.DATA_DESCRIPTOR+j(a.crc32,4)+j(a.compressedSize,4)+j(a.uncompressedSize,4)};e.inherits(d,f),d.prototype.push=function(a){var b=a.meta.percent||0,c=this.entriesCount,d=this._sources.length;this.accumulate?this.contentBuffer.push(a):(this.bytesWritten+=a.data.length,f.prototype.push.call(this,{data:a.data,meta:{currentFile:this.currentFile,percent:c?(b+100*(c-d-1))/c:100}}))},d.prototype.openedSource=function(a){this.currentSourceOffset=this.bytesWritten,this.currentFile=a.file.name;var b=this.streamFiles&&!a.file.dir;if(b){var c=m(a,b,!1,this.currentSourceOffset,this.zipPlatform,this.encodeFileName);this.push({data:c.fileRecord,meta:{percent:0}})}else this.accumulate=!0},d.prototype.closedSource=function(a){this.accumulate=!1;var b=this.streamFiles&&!a.file.dir,c=m(a,b,!0,this.currentSourceOffset,this.zipPlatform,this.encodeFileName);if(this.dirRecords.push(c.dirRecord),b)this.push({data:o(a),meta:{percent:100}});else for(this.push({data:c.fileRecord,meta:{percent:0}});this.contentBuffer.length;)this.push(this.contentBuffer.shift());this.currentFile=null},d.prototype.flush=function(){for(var a=this.bytesWritten,b=0;b<this.dirRecords.length;b++)this.push({data:this.dirRecords[b],meta:{percent:100}});var c=this.bytesWritten-a,d=n(this.dirRecords.length,c,a,this.zipComment,this.encodeFileName);this.push({data:d,meta:{percent:100}})},d.prototype.prepareNextSource=function(){this.previous=this._sources.shift(),this.openedSource(this.previous.streamInfo),this.isPaused?this.previous.pause():this.previous.resume()},d.prototype.registerPrevious=function(a){this._sources.push(a);var b=this;return a.on("data",function(a){b.processChunk(a)}),a.on("end",function(){b.closedSource(b.previous.streamInfo),b._sources.length?b.prepareNextSource():b.end()}),a.on("error",function(a){b.error(a)}),this},d.prototype.resume=function(){return!!f.prototype.resume.call(this)&&(!this.previous&&this._sources.length?(this.prepareNextSource(),!0):this.previous||this._sources.length||this.generatedError?void 0:(this.end(),!0))},d.prototype.error=function(a){var b=this._sources;if(!f.prototype.error.call(this,a))return!1;for(var c=0;c<b.length;c++)try{b[c].error(a)}catch(a){}return!0},d.prototype.lock=function(){f.prototype.lock.call(this);for(var a=this._sources,b=0;b<a.length;b++)a[b].lock()},b.exports=d},{"../crc32":4,"../signature":23,"../stream/GenericWorker":28,"../utf8":31,"../utils":32}],9:[function(a,b,c){"use strict";var d=a("../compressions"),e=a("./ZipFileWorker"),f=function(a,b){var c=a||b,e=d[c];if(!e)throw new Error(c+" is not a valid compression method !");return e};c.generateWorker=function(a,b,c){var d=new e(b.streamFiles,c,b.platform,b.encodeFileName),g=0;try{a.forEach(function(a,c){g++;var e=f(c.options.compression,b.compression),h=c.options.compressionOptions||b.compressionOptions||{},i=c.dir,j=c.date;c._compressWorker(e,h).withStreamInfo("file",{name:a,dir:i,date:j,comment:c.comment||"",unixPermissions:c.unixPermissions,dosPermissions:c.dosPermissions}).pipe(d)}),d.entriesCount=g}catch(h){d.error(h)}return d}},{"../compressions":3,"./ZipFileWorker":8}],10:[function(a,b,c){"use strict";function d(){if(!(this instanceof d))return new d;if(arguments.length)throw new Error("The constructor with parameters has been removed in JSZip 3.0, please check the upgrade guide.");this.files={},this.comment=null,this.root="",this.clone=function(){var a=new d;for(var b in this)"function"!=typeof this[b]&&(a[b]=this[b]);return a}}d.prototype=a("./object"),d.prototype.loadAsync=a("./load"),d.support=a("./support"),d.defaults=a("./defaults"),d.version="3.1.5",d.loadAsync=function(a,b){return(new d).loadAsync(a,b)},d.external=a("./external"),b.exports=d},{"./defaults":5,"./external":6,"./load":11,"./object":15,"./support":30}],11:[function(a,b,c){"use strict";function d(a){return new f.Promise(function(b,c){var d=a.decompressed.getContentWorker().pipe(new i);d.on("error",function(a){c(a)}).on("end",function(){d.streamInfo.crc32!==a.decompressed.crc32?c(new Error("Corrupted zip : CRC32 mismatch")):b()}).resume()})}var e=a("./utils"),f=a("./external"),g=a("./utf8"),e=a("./utils"),h=a("./zipEntries"),i=a("./stream/Crc32Probe"),j=a("./nodejsUtils");b.exports=function(a,b){var c=this;return b=e.extend(b||{},{base64:!1,checkCRC32:!1,optimizedBinaryString:!1,createFolders:!1,decodeFileName:g.utf8decode}),j.isNode&&j.isStream(a)?f.Promise.reject(new Error("JSZip can't accept a stream when loading a zip file.")):e.prepareContent("the loaded zip file",a,!0,b.optimizedBinaryString,b.base64).then(function(a){var c=new h(b);return c.load(a),c}).then(function(a){var c=[f.Promise.resolve(a)],e=a.files;if(b.checkCRC32)for(var g=0;g<e.length;g++)c.push(d(e[g]));return f.Promise.all(c)}).then(function(a){for(var d=a.shift(),e=d.files,f=0;f<e.length;f++){var g=e[f];c.file(g.fileNameStr,g.decompressed,{binary:!0,optimizedBinaryString:!0,date:g.date,dir:g.dir,comment:g.fileCommentStr.length?g.fileCommentStr:null,unixPermissions:g.unixPermissions,dosPermissions:g.dosPermissions,createFolders:b.createFolders})}return d.zipComment.length&&(c.comment=d.zipComment),c})}},{"./external":6,"./nodejsUtils":14,"./stream/Crc32Probe":25,"./utf8":31,"./utils":32,"./zipEntries":33}],12:[function(a,b,c){"use strict";function d(a,b){f.call(this,"Nodejs stream input adapter for "+a),this._upstreamEnded=!1,this._bindStream(b)}var e=a("../utils"),f=a("../stream/GenericWorker");e.inherits(d,f),d.prototype._bindStream=function(a){var b=this;this._stream=a,a.pause(),a.on("data",function(a){b.push({data:a,meta:{percent:0}})}).on("error",function(a){b.isPaused?this.generatedError=a:b.error(a)}).on("end",function(){b.isPaused?b._upstreamEnded=!0:b.end()})},d.prototype.pause=function(){return!!f.prototype.pause.call(this)&&(this._stream.pause(),!0)},d.prototype.resume=function(){return!!f.prototype.resume.call(this)&&(this._upstreamEnded?this.end():this._stream.resume(),!0)},b.exports=d},{"../stream/GenericWorker":28,"../utils":32}],13:[function(a,b,c){"use strict";function d(a,b,c){e.call(this,b),this._helper=a;var d=this;a.on("data",function(a,b){d.push(a)||d._helper.pause(),c&&c(b)}).on("error",function(a){d.emit("error",a)}).on("end",function(){d.push(null)})}var e=a("readable-stream").Readable,f=a("../utils");f.inherits(d,e),d.prototype._read=function(){this._helper.resume()},b.exports=d},{"../utils":32,"readable-stream":16}],14:[function(a,b,c){"use strict";b.exports={isNode:"undefined"!=typeof Buffer,newBufferFrom:function(a,b){return new Buffer(a,b)},allocBuffer:function(a){return Buffer.alloc?Buffer.alloc(a):new Buffer(a)},isBuffer:function(a){return Buffer.isBuffer(a)},isStream:function(a){return a&&"function"==typeof a.on&&"function"==typeof a.pause&&"function"==typeof a.resume}}},{}],15:[function(a,b,c){"use strict";function d(a){return"[object RegExp]"===Object.prototype.toString.call(a)}var e=a("./utf8"),f=a("./utils"),g=a("./stream/GenericWorker"),h=a("./stream/StreamHelper"),i=a("./defaults"),j=a("./compressedObject"),k=a("./zipObject"),l=a("./generate"),m=a("./nodejsUtils"),n=a("./nodejs/NodejsStreamInputAdapter"),o=function(a,b,c){var d,e=f.getTypeOf(b),h=f.extend(c||{},i);h.date=h.date||new Date,null!==h.compression&&(h.compression=h.compression.toUpperCase()),"string"==typeof h.unixPermissions&&(h.unixPermissions=parseInt(h.unixPermissions,8)),h.unixPermissions&&16384&h.unixPermissions&&(h.dir=!0),h.dosPermissions&&16&h.dosPermissions&&(h.dir=!0),h.dir&&(a=q(a)),h.createFolders&&(d=p(a))&&r.call(this,d,!0);var l="string"===e&&h.binary===!1&&h.base64===!1;c&&"undefined"!=typeof c.binary||(h.binary=!l);var o=b instanceof j&&0===b.uncompressedSize;(o||h.dir||!b||0===b.length)&&(h.base64=!1,h.binary=!0,b="",h.compression="STORE",e="string");var s=null;s=b instanceof j||b instanceof g?b:m.isNode&&m.isStream(b)?new n(a,b):f.prepareContent(a,b,h.binary,h.optimizedBinaryString,h.base64);var t=new k(a,s,h);this.files[a]=t},p=function(a){"/"===a.slice(-1)&&(a=a.substring(0,a.length-1));var b=a.lastIndexOf("/");return b>0?a.substring(0,b):""},q=function(a){return"/"!==a.slice(-1)&&(a+="/"),a},r=function(a,b){return b="undefined"!=typeof b?b:i.createFolders,a=q(a),this.files[a]||o.call(this,a,null,{dir:!0,createFolders:b}),this.files[a]},s={load:function(){throw new Error("This method has been removed in JSZip 3.0, please check the upgrade guide.")},forEach:function(a){var b,c,d;for(b in this.files)this.files.hasOwnProperty(b)&&(d=this.files[b],c=b.slice(this.root.length,b.length),c&&b.slice(0,this.root.length)===this.root&&a(c,d))},filter:function(a){var b=[];return this.forEach(function(c,d){a(c,d)&&b.push(d)}),b},file:function(a,b,c){if(1===arguments.length){if(d(a)){var e=a;return this.filter(function(a,b){return!b.dir&&e.test(a)})}var f=this.files[this.root+a];return f&&!f.dir?f:null}return a=this.root+a,o.call(this,a,b,c),this},folder:function(a){if(!a)return this;if(d(a))return this.filter(function(b,c){return c.dir&&a.test(b)});var b=this.root+a,c=r.call(this,b),e=this.clone();return e.root=c.name,e},remove:function(a){a=this.root+a;var b=this.files[a];if(b||("/"!==a.slice(-1)&&(a+="/"),b=this.files[a]),b&&!b.dir)delete this.files[a];else for(var c=this.filter(function(b,c){return c.name.slice(0,a.length)===a}),d=0;d<c.length;d++)delete this.files[c[d].name];return this},generate:function(a){throw new Error("This method has been removed in JSZip 3.0, please check the upgrade guide.")},generateInternalStream:function(a){var b,c={};try{if(c=f.extend(a||{},{streamFiles:!1,compression:"STORE",compressionOptions:null,type:"",platform:"DOS",comment:null,mimeType:"application/zip",encodeFileName:e.utf8encode}),c.type=c.type.toLowerCase(),c.compression=c.compression.toUpperCase(),"binarystring"===c.type&&(c.type="string"),!c.type)throw new Error("No output type specified.");f.checkSupport(c.type),"darwin"!==c.platform&&"freebsd"!==c.platform&&"linux"!==c.platform&&"sunos"!==c.platform||(c.platform="UNIX"),"win32"===c.platform&&(c.platform="DOS");var d=c.comment||this.comment||"";b=l.generateWorker(this,c,d)}catch(i){b=new g("error"),b.error(i)}return new h(b,c.type||"string",c.mimeType)},generateAsync:function(a,b){return this.generateInternalStream(a).accumulate(b)},generateNodeStream:function(a,b){return a=a||{},a.type||(a.type="nodebuffer"),this.generateInternalStream(a).toNodejsStream(b)}};b.exports=s},{"./compressedObject":2,"./defaults":5,"./generate":9,"./nodejs/NodejsStreamInputAdapter":12,"./nodejsUtils":14,"./stream/GenericWorker":28,"./stream/StreamHelper":29,"./utf8":31,"./utils":32,"./zipObject":35}],16:[function(a,b,c){b.exports=a("stream")},{stream:void 0}],17:[function(a,b,c){"use strict";function d(a){e.call(this,a);for(var b=0;b<this.data.length;b++)a[b]=255&a[b]}var e=a("./DataReader"),f=a("../utils");f.inherits(d,e),d.prototype.byteAt=function(a){return this.data[this.zero+a]},d.prototype.lastIndexOfSignature=function(a){for(var b=a.charCodeAt(0),c=a.charCodeAt(1),d=a.charCodeAt(2),e=a.charCodeAt(3),f=this.length-4;f>=0;--f)if(this.data[f]===b&&this.data[f+1]===c&&this.data[f+2]===d&&this.data[f+3]===e)return f-this.zero;return-1},d.prototype.readAndCheckSignature=function(a){var b=a.charCodeAt(0),c=a.charCodeAt(1),d=a.charCodeAt(2),e=a.charCodeAt(3),f=this.readData(4);return b===f[0]&&c===f[1]&&d===f[2]&&e===f[3]},d.prototype.readData=function(a){if(this.checkOffset(a),0===a)return[];var b=this.data.slice(this.zero+this.index,this.zero+this.index+a);return this.index+=a,b},b.exports=d},{"../utils":32,"./DataReader":18}],18:[function(a,b,c){"use strict";function d(a){this.data=a,this.length=a.length,this.index=0,this.zero=0}var e=a("../utils");d.prototype={checkOffset:function(a){this.checkIndex(this.index+a)},checkIndex:function(a){if(this.length<this.zero+a||a<0)throw new Error("End of data reached (data length = "+this.length+", asked index = "+a+"). Corrupted zip ?")},setIndex:function(a){this.checkIndex(a),this.index=a},skip:function(a){this.setIndex(this.index+a)},byteAt:function(a){},readInt:function(a){var b,c=0;for(this.checkOffset(a),b=this.index+a-1;b>=this.index;b--)c=(c<<8)+this.byteAt(b);return this.index+=a,c},readString:function(a){return e.transformTo("string",this.readData(a))},readData:function(a){},lastIndexOfSignature:function(a){},readAndCheckSignature:function(a){},readDate:function(){var a=this.readInt(4);return new Date(Date.UTC((a>>25&127)+1980,(a>>21&15)-1,a>>16&31,a>>11&31,a>>5&63,(31&a)<<1))}},b.exports=d},{"../utils":32}],19:[function(a,b,c){"use strict";function d(a){e.call(this,a)}var e=a("./Uint8ArrayReader"),f=a("../utils");f.inherits(d,e),d.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.zero+this.index,this.zero+this.index+a);return this.index+=a,b},b.exports=d},{"../utils":32,"./Uint8ArrayReader":21}],20:[function(a,b,c){"use strict";function d(a){e.call(this,a)}var e=a("./DataReader"),f=a("../utils");f.inherits(d,e),d.prototype.byteAt=function(a){return this.data.charCodeAt(this.zero+a)},d.prototype.lastIndexOfSignature=function(a){return this.data.lastIndexOf(a)-this.zero},d.prototype.readAndCheckSignature=function(a){var b=this.readData(4);return a===b},d.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.zero+this.index,this.zero+this.index+a);return this.index+=a,b},b.exports=d},{"../utils":32,"./DataReader":18}],21:[function(a,b,c){"use strict";function d(a){e.call(this,a)}var e=a("./ArrayReader"),f=a("../utils");f.inherits(d,e),d.prototype.readData=function(a){if(this.checkOffset(a),0===a)return new Uint8Array(0);var b=this.data.subarray(this.zero+this.index,this.zero+this.index+a);return this.index+=a,b},b.exports=d},{"../utils":32,"./ArrayReader":17}],22:[function(a,b,c){"use strict";var d=a("../utils"),e=a("../support"),f=a("./ArrayReader"),g=a("./StringReader"),h=a("./NodeBufferReader"),i=a("./Uint8ArrayReader");b.exports=function(a){var b=d.getTypeOf(a);return d.checkSupport(b),"string"!==b||e.uint8array?"nodebuffer"===b?new h(a):e.uint8array?new i(d.transformTo("uint8array",a)):new f(d.transformTo("array",a)):new g(a)}},{"../support":30,"../utils":32,"./ArrayReader":17,"./NodeBufferReader":19,"./StringReader":20,"./Uint8ArrayReader":21}],23:[function(a,b,c){"use strict";c.LOCAL_FILE_HEADER="PK",c.CENTRAL_FILE_HEADER="PK",c.CENTRAL_DIRECTORY_END="PK",c.ZIP64_CENTRAL_DIRECTORY_LOCATOR="PK",c.ZIP64_CENTRAL_DIRECTORY_END="PK",c.DATA_DESCRIPTOR="PK\b"},{}],24:[function(a,b,c){"use strict";function d(a){e.call(this,"ConvertWorker to "+a),this.destType=a}var e=a("./GenericWorker"),f=a("../utils");f.inherits(d,e),d.prototype.processChunk=function(a){this.push({data:f.transformTo(this.destType,a.data),meta:a.meta})},b.exports=d},{"../utils":32,"./GenericWorker":28}],25:[function(a,b,c){"use strict";function d(){e.call(this,"Crc32Probe"),this.withStreamInfo("crc32",0)}var e=a("./GenericWorker"),f=a("../crc32"),g=a("../utils");g.inherits(d,e),d.prototype.processChunk=function(a){this.streamInfo.crc32=f(a.data,this.streamInfo.crc32||0),this.push(a)},b.exports=d},{"../crc32":4,"../utils":32,"./GenericWorker":28}],26:[function(a,b,c){"use strict";function d(a){f.call(this,"DataLengthProbe for "+a),this.propName=a,this.withStreamInfo(a,0)}var e=a("../utils"),f=a("./GenericWorker");e.inherits(d,f),d.prototype.processChunk=function(a){if(a){var b=this.streamInfo[this.propName]||0;this.streamInfo[this.propName]=b+a.data.length}f.prototype.processChunk.call(this,a)},b.exports=d},{"../utils":32,"./GenericWorker":28}],27:[function(a,b,c){"use strict";function d(a){f.call(this,"DataWorker");var b=this;this.dataIsReady=!1,this.index=0,this.max=0,this.data=null,this.type="",this._tickScheduled=!1,a.then(function(a){b.dataIsReady=!0,b.data=a,b.max=a&&a.length||0,b.type=e.getTypeOf(a),b.isPaused||b._tickAndRepeat()},function(a){b.error(a)})}var e=a("../utils"),f=a("./GenericWorker"),g=16384;e.inherits(d,f),d.prototype.cleanUp=function(){f.prototype.cleanUp.call(this),this.data=null},d.prototype.resume=function(){return!!f.prototype.resume.call(this)&&(!this._tickScheduled&&this.dataIsReady&&(this._tickScheduled=!0,e.delay(this._tickAndRepeat,[],this)),!0)},d.prototype._tickAndRepeat=function(){this._tickScheduled=!1,this.isPaused||this.isFinished||(this._tick(),this.isFinished||(e.delay(this._tickAndRepeat,[],this),this._tickScheduled=!0))},d.prototype._tick=function(){if(this.isPaused||this.isFinished)return!1;var a=g,b=null,c=Math.min(this.max,this.index+a);if(this.index>=this.max)return this.end();switch(this.type){case"string":b=this.data.substring(this.index,c);break;case"uint8array":b=this.data.subarray(this.index,c);break;case"array":case"nodebuffer":b=this.data.slice(this.index,c)}return this.index=c,this.push({data:b,meta:{percent:this.max?this.index/this.max*100:0}})},b.exports=d},{"../utils":32,"./GenericWorker":28}],28:[function(a,b,c){"use strict";function d(a){this.name=a||"default",this.streamInfo={},this.generatedError=null,this.extraStreamInfo={},this.isPaused=!0,this.isFinished=!1,this.isLocked=!1,this._listeners={data:[],end:[],error:[]},this.previous=null}d.prototype={push:function(a){this.emit("data",a)},end:function(){if(this.isFinished)return!1;this.flush();try{this.emit("end"),this.cleanUp(),this.isFinished=!0}catch(a){this.emit("error",a)}return!0},error:function(a){return!this.isFinished&&(this.isPaused?this.generatedError=a:(this.isFinished=!0,this.emit("error",a),this.previous&&this.previous.error(a),this.cleanUp()),!0)},on:function(a,b){return this._listeners[a].push(b),this},cleanUp:function(){this.streamInfo=this.generatedError=this.extraStreamInfo=null,this._listeners=[]},emit:function(a,b){if(this._listeners[a])for(var c=0;c<this._listeners[a].length;c++)this._listeners[a][c].call(this,b)},pipe:function(a){return a.registerPrevious(this)},registerPrevious:function(a){if(this.isLocked)throw new Error("The stream '"+this+"' has already been used.");this.streamInfo=a.streamInfo,this.mergeStreamInfo(),this.previous=a;var b=this;return a.on("data",function(a){b.processChunk(a)}),a.on("end",function(){b.end()}),a.on("error",function(a){b.error(a)}),this},pause:function(){return!this.isPaused&&!this.isFinished&&(this.isPaused=!0,this.previous&&this.previous.pause(),!0)},resume:function(){if(!this.isPaused||this.isFinished)return!1;this.isPaused=!1;var a=!1;return this.generatedError&&(this.error(this.generatedError),a=!0),this.previous&&this.previous.resume(),!a},flush:function(){},processChunk:function(a){this.push(a)},withStreamInfo:function(a,b){return this.extraStreamInfo[a]=b,this.mergeStreamInfo(),this},mergeStreamInfo:function(){for(var a in this.extraStreamInfo)this.extraStreamInfo.hasOwnProperty(a)&&(this.streamInfo[a]=this.extraStreamInfo[a])},lock:function(){if(this.isLocked)throw new Error("The stream '"+this+"' has already been used.");this.isLocked=!0,this.previous&&this.previous.lock()},toString:function(){var a="Worker "+this.name;return this.previous?this.previous+" -> "+a:a}},b.exports=d},{}],29:[function(a,b,c){"use strict";function d(a,b,c){switch(a){case"blob":return h.newBlob(h.transformTo("arraybuffer",b),c);case"base64":return k.encode(b);default:return h.transformTo(a,b)}}function e(a,b){var c,d=0,e=null,f=0;for(c=0;c<b.length;c++)f+=b[c].length;switch(a){case"string":return b.join("");case"array":return Array.prototype.concat.apply([],b);case"uint8array":for(e=new Uint8Array(f),c=0;c<b.length;c++)e.set(b[c],d),d+=b[c].length;return e;case"nodebuffer":return Buffer.concat(b);default:throw new Error("concat : unsupported type '"+a+"'")}}function f(a,b){return new m.Promise(function(c,f){var g=[],h=a._internalType,i=a._outputType,j=a._mimeType;a.on("data",function(a,c){g.push(a),b&&b(c)}).on("error",function(a){g=[],f(a)}).on("end",function(){try{var a=d(i,e(h,g),j);c(a)}catch(b){f(b)}g=[]}).resume()})}function g(a,b,c){var d=b;switch(b){case"blob":case"arraybuffer":d="uint8array";break;case"base64":d="string"}try{this._internalType=d,this._outputType=b,this._mimeType=c,h.checkSupport(d),this._worker=a.pipe(new i(d)),a.lock()}catch(e){this._worker=new j("error"),this._worker.error(e)}}var h=a("../utils"),i=a("./ConvertWorker"),j=a("./GenericWorker"),k=a("../base64"),l=a("../support"),m=a("../external"),n=null;if(l.nodestream)try{n=a("../nodejs/NodejsStreamOutputAdapter")}catch(o){}g.prototype={accumulate:function(a){return f(this,a)},on:function(a,b){var c=this;return"data"===a?this._worker.on(a,function(a){b.call(c,a.data,a.meta)}):this._worker.on(a,function(){h.delay(b,arguments,c)}),this},resume:function(){return h.delay(this._worker.resume,[],this._worker),this},pause:function(){return this._worker.pause(),this},toNodejsStream:function(a){if(h.checkSupport("nodestream"),"nodebuffer"!==this._outputType)throw new Error(this._outputType+" is not supported by this method");return new n(this,{objectMode:"nodebuffer"!==this._outputType},a)}},b.exports=g},{"../base64":1,"../external":6,"../nodejs/NodejsStreamOutputAdapter":13,"../support":30,"../utils":32,"./ConvertWorker":24,"./GenericWorker":28}],30:[function(a,b,c){"use strict";if(c.base64=!0,c.array=!0,c.string=!0,c.arraybuffer="undefined"!=typeof ArrayBuffer&&"undefined"!=typeof Uint8Array,c.nodebuffer="undefined"!=typeof Buffer,c.uint8array="undefined"!=typeof Uint8Array,"undefined"==typeof ArrayBuffer)c.blob=!1;else{var d=new ArrayBuffer(0);try{c.blob=0===new Blob([d],{type:"application/zip"}).size}catch(e){try{var f=self.BlobBuilder||self.WebKitBlobBuilder||self.MozBlobBuilder||self.MSBlobBuilder,g=new f;g.append(d),c.blob=0===g.getBlob("application/zip").size}catch(e){c.blob=!1}}}try{c.nodestream=!!a("readable-stream").Readable}catch(e){c.nodestream=!1}},{"readable-stream":16}],31:[function(a,b,c){"use strict";function d(){i.call(this,"utf-8 decode"),this.leftOver=null}function e(){i.call(this,"utf-8 encode")}for(var f=a("./utils"),g=a("./support"),h=a("./nodejsUtils"),i=a("./stream/GenericWorker"),j=new Array(256),k=0;k<256;k++)j[k]=k>=252?6:k>=248?5:k>=240?4:k>=224?3:k>=192?2:1;j[254]=j[254]=1;var l=function(a){var b,c,d,e,f,h=a.length,i=0;for(e=0;e<h;e++)c=a.charCodeAt(e),55296===(64512&c)&&e+1<h&&(d=a.charCodeAt(e+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),e++)),i+=c<128?1:c<2048?2:c<65536?3:4;for(b=g.uint8array?new Uint8Array(i):new Array(i),f=0,e=0;f<i;e++)c=a.charCodeAt(e),55296===(64512&c)&&e+1<h&&(d=a.charCodeAt(e+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),e++)),c<128?b[f++]=c:c<2048?(b[f++]=192|c>>>6,b[f++]=128|63&c):c<65536?(b[f++]=224|c>>>12,b[f++]=128|c>>>6&63,b[f++]=128|63&c):(b[f++]=240|c>>>18,b[f++]=128|c>>>12&63,b[f++]=128|c>>>6&63,b[f++]=128|63&c);return b},m=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return c<0?b:0===c?b:c+j[a[c]]>b?c:b},n=function(a){var b,c,d,e,g=a.length,h=new Array(2*g);for(c=0,b=0;b<g;)if(d=a[b++],d<128)h[c++]=d;else if(e=j[d],e>4)h[c++]=65533,b+=e-1;else{for(d&=2===e?31:3===e?15:7;e>1&&b<g;)d=d<<6|63&a[b++],e--;e>1?h[c++]=65533:d<65536?h[c++]=d:(d-=65536,h[c++]=55296|d>>10&1023,h[c++]=56320|1023&d)}return h.length!==c&&(h.subarray?h=h.subarray(0,c):h.length=c),f.applyFromCharCode(h)};c.utf8encode=function(a){return g.nodebuffer?h.newBufferFrom(a,"utf-8"):l(a)},c.utf8decode=function(a){return g.nodebuffer?f.transformTo("nodebuffer",a).toString("utf-8"):(a=f.transformTo(g.uint8array?"uint8array":"array",a),n(a))},f.inherits(d,i),d.prototype.processChunk=function(a){var b=f.transformTo(g.uint8array?"uint8array":"array",a.data);if(this.leftOver&&this.leftOver.length){if(g.uint8array){var d=b;b=new Uint8Array(d.length+this.leftOver.length),b.set(this.leftOver,0),b.set(d,this.leftOver.length)}else b=this.leftOver.concat(b);this.leftOver=null}var e=m(b),h=b;e!==b.length&&(g.uint8array?(h=b.subarray(0,e),this.leftOver=b.subarray(e,b.length)):(h=b.slice(0,e),this.leftOver=b.slice(e,b.length))),this.push({data:c.utf8decode(h),meta:a.meta})},d.prototype.flush=function(){this.leftOver&&this.leftOver.length&&(this.push({data:c.utf8decode(this.leftOver),meta:{}}),this.leftOver=null)},c.Utf8DecodeWorker=d,f.inherits(e,i),e.prototype.processChunk=function(a){this.push({data:c.utf8encode(a.data),meta:a.meta})},c.Utf8EncodeWorker=e},{"./nodejsUtils":14,"./stream/GenericWorker":28,"./support":30,"./utils":32}],32:[function(a,b,c){"use strict";function d(a){var b=null;return b=i.uint8array?new Uint8Array(a.length):new Array(a.length),f(a,b)}function e(a){return a}function f(a,b){for(var c=0;c<a.length;++c)b[c]=255&a.charCodeAt(c);return b}function g(a){var b=65536,d=c.getTypeOf(a),e=!0;if("uint8array"===d?e=n.applyCanBeUsed.uint8array:"nodebuffer"===d&&(e=n.applyCanBeUsed.nodebuffer),e)for(;b>1;)try{return n.stringifyByChunk(a,d,b)}catch(f){b=Math.floor(b/2)}return n.stringifyByChar(a)}function h(a,b){for(var c=0;c<a.length;c++)b[c]=a[c];
return b}var i=a("./support"),j=a("./base64"),k=a("./nodejsUtils"),l=a("core-js/library/fn/set-immediate"),m=a("./external");c.newBlob=function(a,b){c.checkSupport("blob");try{return new Blob([a],{type:b})}catch(d){try{var e=self.BlobBuilder||self.WebKitBlobBuilder||self.MozBlobBuilder||self.MSBlobBuilder,f=new e;return f.append(a),f.getBlob(b)}catch(d){throw new Error("Bug : can't construct the Blob.")}}};var n={stringifyByChunk:function(a,b,c){var d=[],e=0,f=a.length;if(f<=c)return String.fromCharCode.apply(null,a);for(;e<f;)"array"===b||"nodebuffer"===b?d.push(String.fromCharCode.apply(null,a.slice(e,Math.min(e+c,f)))):d.push(String.fromCharCode.apply(null,a.subarray(e,Math.min(e+c,f)))),e+=c;return d.join("")},stringifyByChar:function(a){for(var b="",c=0;c<a.length;c++)b+=String.fromCharCode(a[c]);return b},applyCanBeUsed:{uint8array:function(){try{return i.uint8array&&1===String.fromCharCode.apply(null,new Uint8Array(1)).length}catch(a){return!1}}(),nodebuffer:function(){try{return i.nodebuffer&&1===String.fromCharCode.apply(null,k.allocBuffer(1)).length}catch(a){return!1}}()}};c.applyFromCharCode=g;var o={};o.string={string:e,array:function(a){return f(a,new Array(a.length))},arraybuffer:function(a){return o.string.uint8array(a).buffer},uint8array:function(a){return f(a,new Uint8Array(a.length))},nodebuffer:function(a){return f(a,k.allocBuffer(a.length))}},o.array={string:g,array:e,arraybuffer:function(a){return new Uint8Array(a).buffer},uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return k.newBufferFrom(a)}},o.arraybuffer={string:function(a){return g(new Uint8Array(a))},array:function(a){return h(new Uint8Array(a),new Array(a.byteLength))},arraybuffer:e,uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return k.newBufferFrom(new Uint8Array(a))}},o.uint8array={string:g,array:function(a){return h(a,new Array(a.length))},arraybuffer:function(a){return a.buffer},uint8array:e,nodebuffer:function(a){return k.newBufferFrom(a)}},o.nodebuffer={string:g,array:function(a){return h(a,new Array(a.length))},arraybuffer:function(a){return o.nodebuffer.uint8array(a).buffer},uint8array:function(a){return h(a,new Uint8Array(a.length))},nodebuffer:e},c.transformTo=function(a,b){if(b||(b=""),!a)return b;c.checkSupport(a);var d=c.getTypeOf(b),e=o[d][a](b);return e},c.getTypeOf=function(a){return"string"==typeof a?"string":"[object Array]"===Object.prototype.toString.call(a)?"array":i.nodebuffer&&k.isBuffer(a)?"nodebuffer":i.uint8array&&a instanceof Uint8Array?"uint8array":i.arraybuffer&&a instanceof ArrayBuffer?"arraybuffer":void 0},c.checkSupport=function(a){var b=i[a.toLowerCase()];if(!b)throw new Error(a+" is not supported by this platform")},c.MAX_VALUE_16BITS=65535,c.MAX_VALUE_32BITS=-1,c.pretty=function(a){var b,c,d="";for(c=0;c<(a||"").length;c++)b=a.charCodeAt(c),d+="\\x"+(b<16?"0":"")+b.toString(16).toUpperCase();return d},c.delay=function(a,b,c){l(function(){a.apply(c||null,b||[])})},c.inherits=function(a,b){var c=function(){};c.prototype=b.prototype,a.prototype=new c},c.extend=function(){var a,b,c={};for(a=0;a<arguments.length;a++)for(b in arguments[a])arguments[a].hasOwnProperty(b)&&"undefined"==typeof c[b]&&(c[b]=arguments[a][b]);return c},c.prepareContent=function(a,b,e,f,g){var h=m.Promise.resolve(b).then(function(a){var b=i.blob&&(a instanceof Blob||["[object File]","[object Blob]"].indexOf(Object.prototype.toString.call(a))!==-1);return b&&"undefined"!=typeof FileReader?new m.Promise(function(b,c){var d=new FileReader;d.onload=function(a){b(a.target.result)},d.onerror=function(a){c(a.target.error)},d.readAsArrayBuffer(a)}):a});return h.then(function(b){var h=c.getTypeOf(b);return h?("arraybuffer"===h?b=c.transformTo("uint8array",b):"string"===h&&(g?b=j.decode(b):e&&f!==!0&&(b=d(b))),b):m.Promise.reject(new Error("Can't read the data of '"+a+"'. Is it in a supported JavaScript type (String, Blob, ArrayBuffer, etc) ?"))})}},{"./base64":1,"./external":6,"./nodejsUtils":14,"./support":30,"core-js/library/fn/set-immediate":36}],33:[function(a,b,c){"use strict";function d(a){this.files=[],this.loadOptions=a}var e=a("./reader/readerFor"),f=a("./utils"),g=a("./signature"),h=a("./zipEntry"),i=(a("./utf8"),a("./support"));d.prototype={checkSignature:function(a){if(!this.reader.readAndCheckSignature(a)){this.reader.index-=4;var b=this.reader.readString(4);throw new Error("Corrupted zip or bug: unexpected signature ("+f.pretty(b)+", expected "+f.pretty(a)+")")}},isSignature:function(a,b){var c=this.reader.index;this.reader.setIndex(a);var d=this.reader.readString(4),e=d===b;return this.reader.setIndex(c),e},readBlockEndOfCentral:function(){this.diskNumber=this.reader.readInt(2),this.diskWithCentralDirStart=this.reader.readInt(2),this.centralDirRecordsOnThisDisk=this.reader.readInt(2),this.centralDirRecords=this.reader.readInt(2),this.centralDirSize=this.reader.readInt(4),this.centralDirOffset=this.reader.readInt(4),this.zipCommentLength=this.reader.readInt(2);var a=this.reader.readData(this.zipCommentLength),b=i.uint8array?"uint8array":"array",c=f.transformTo(b,a);this.zipComment=this.loadOptions.decodeFileName(c)},readBlockZip64EndOfCentral:function(){this.zip64EndOfCentralSize=this.reader.readInt(8),this.reader.skip(4),this.diskNumber=this.reader.readInt(4),this.diskWithCentralDirStart=this.reader.readInt(4),this.centralDirRecordsOnThisDisk=this.reader.readInt(8),this.centralDirRecords=this.reader.readInt(8),this.centralDirSize=this.reader.readInt(8),this.centralDirOffset=this.reader.readInt(8),this.zip64ExtensibleData={};for(var a,b,c,d=this.zip64EndOfCentralSize-44,e=0;e<d;)a=this.reader.readInt(2),b=this.reader.readInt(4),c=this.reader.readData(b),this.zip64ExtensibleData[a]={id:a,length:b,value:c}},readBlockZip64EndOfCentralLocator:function(){if(this.diskWithZip64CentralDirStart=this.reader.readInt(4),this.relativeOffsetEndOfZip64CentralDir=this.reader.readInt(8),this.disksCount=this.reader.readInt(4),this.disksCount>1)throw new Error("Multi-volumes zip are not supported")},readLocalFiles:function(){var a,b;for(a=0;a<this.files.length;a++)b=this.files[a],this.reader.setIndex(b.localHeaderOffset),this.checkSignature(g.LOCAL_FILE_HEADER),b.readLocalPart(this.reader),b.handleUTF8(),b.processAttributes()},readCentralDir:function(){var a;for(this.reader.setIndex(this.centralDirOffset);this.reader.readAndCheckSignature(g.CENTRAL_FILE_HEADER);)a=new h({zip64:this.zip64},this.loadOptions),a.readCentralPart(this.reader),this.files.push(a);if(this.centralDirRecords!==this.files.length&&0!==this.centralDirRecords&&0===this.files.length)throw new Error("Corrupted zip or bug: expected "+this.centralDirRecords+" records in central dir, got "+this.files.length)},readEndOfCentral:function(){var a=this.reader.lastIndexOfSignature(g.CENTRAL_DIRECTORY_END);if(a<0){var b=!this.isSignature(0,g.LOCAL_FILE_HEADER);throw b?new Error("Can't find end of central directory : is this a zip file ? If it is, see https://stuk.github.io/jszip/documentation/howto/read_zip.html"):new Error("Corrupted zip: can't find end of central directory")}this.reader.setIndex(a);var c=a;if(this.checkSignature(g.CENTRAL_DIRECTORY_END),this.readBlockEndOfCentral(),this.diskNumber===f.MAX_VALUE_16BITS||this.diskWithCentralDirStart===f.MAX_VALUE_16BITS||this.centralDirRecordsOnThisDisk===f.MAX_VALUE_16BITS||this.centralDirRecords===f.MAX_VALUE_16BITS||this.centralDirSize===f.MAX_VALUE_32BITS||this.centralDirOffset===f.MAX_VALUE_32BITS){if(this.zip64=!0,a=this.reader.lastIndexOfSignature(g.ZIP64_CENTRAL_DIRECTORY_LOCATOR),a<0)throw new Error("Corrupted zip: can't find the ZIP64 end of central directory locator");if(this.reader.setIndex(a),this.checkSignature(g.ZIP64_CENTRAL_DIRECTORY_LOCATOR),this.readBlockZip64EndOfCentralLocator(),!this.isSignature(this.relativeOffsetEndOfZip64CentralDir,g.ZIP64_CENTRAL_DIRECTORY_END)&&(this.relativeOffsetEndOfZip64CentralDir=this.reader.lastIndexOfSignature(g.ZIP64_CENTRAL_DIRECTORY_END),this.relativeOffsetEndOfZip64CentralDir<0))throw new Error("Corrupted zip: can't find the ZIP64 end of central directory");this.reader.setIndex(this.relativeOffsetEndOfZip64CentralDir),this.checkSignature(g.ZIP64_CENTRAL_DIRECTORY_END),this.readBlockZip64EndOfCentral()}var d=this.centralDirOffset+this.centralDirSize;this.zip64&&(d+=20,d+=12+this.zip64EndOfCentralSize);var e=c-d;if(e>0)this.isSignature(c,g.CENTRAL_FILE_HEADER)||(this.reader.zero=e);else if(e<0)throw new Error("Corrupted zip: missing "+Math.abs(e)+" bytes.")},prepareReader:function(a){this.reader=e(a)},load:function(a){this.prepareReader(a),this.readEndOfCentral(),this.readCentralDir(),this.readLocalFiles()}},b.exports=d},{"./reader/readerFor":22,"./signature":23,"./support":30,"./utf8":31,"./utils":32,"./zipEntry":34}],34:[function(a,b,c){"use strict";function d(a,b){this.options=a,this.loadOptions=b}var e=a("./reader/readerFor"),f=a("./utils"),g=a("./compressedObject"),h=a("./crc32"),i=a("./utf8"),j=a("./compressions"),k=a("./support"),l=0,m=3,n=function(a){for(var b in j)if(j.hasOwnProperty(b)&&j[b].magic===a)return j[b];return null};d.prototype={isEncrypted:function(){return 1===(1&this.bitFlag)},useUTF8:function(){return 2048===(2048&this.bitFlag)},readLocalPart:function(a){var b,c;if(a.skip(22),this.fileNameLength=a.readInt(2),c=a.readInt(2),this.fileName=a.readData(this.fileNameLength),a.skip(c),this.compressedSize===-1||this.uncompressedSize===-1)throw new Error("Bug or corrupted zip : didn't get enough informations from the central directory (compressedSize === -1 || uncompressedSize === -1)");if(b=n(this.compressionMethod),null===b)throw new Error("Corrupted zip : compression "+f.pretty(this.compressionMethod)+" unknown (inner file : "+f.transformTo("string",this.fileName)+")");this.decompressed=new g(this.compressedSize,this.uncompressedSize,this.crc32,b,a.readData(this.compressedSize))},readCentralPart:function(a){this.versionMadeBy=a.readInt(2),a.skip(2),this.bitFlag=a.readInt(2),this.compressionMethod=a.readString(2),this.date=a.readDate(),this.crc32=a.readInt(4),this.compressedSize=a.readInt(4),this.uncompressedSize=a.readInt(4);var b=a.readInt(2);if(this.extraFieldsLength=a.readInt(2),this.fileCommentLength=a.readInt(2),this.diskNumberStart=a.readInt(2),this.internalFileAttributes=a.readInt(2),this.externalFileAttributes=a.readInt(4),this.localHeaderOffset=a.readInt(4),this.isEncrypted())throw new Error("Encrypted zip are not supported");a.skip(b),this.readExtraFields(a),this.parseZIP64ExtraField(a),this.fileComment=a.readData(this.fileCommentLength)},processAttributes:function(){this.unixPermissions=null,this.dosPermissions=null;var a=this.versionMadeBy>>8;this.dir=!!(16&this.externalFileAttributes),a===l&&(this.dosPermissions=63&this.externalFileAttributes),a===m&&(this.unixPermissions=this.externalFileAttributes>>16&65535),this.dir||"/"!==this.fileNameStr.slice(-1)||(this.dir=!0)},parseZIP64ExtraField:function(a){if(this.extraFields[1]){var b=e(this.extraFields[1].value);this.uncompressedSize===f.MAX_VALUE_32BITS&&(this.uncompressedSize=b.readInt(8)),this.compressedSize===f.MAX_VALUE_32BITS&&(this.compressedSize=b.readInt(8)),this.localHeaderOffset===f.MAX_VALUE_32BITS&&(this.localHeaderOffset=b.readInt(8)),this.diskNumberStart===f.MAX_VALUE_32BITS&&(this.diskNumberStart=b.readInt(4))}},readExtraFields:function(a){var b,c,d,e=a.index+this.extraFieldsLength;for(this.extraFields||(this.extraFields={});a.index<e;)b=a.readInt(2),c=a.readInt(2),d=a.readData(c),this.extraFields[b]={id:b,length:c,value:d}},handleUTF8:function(){var a=k.uint8array?"uint8array":"array";if(this.useUTF8())this.fileNameStr=i.utf8decode(this.fileName),this.fileCommentStr=i.utf8decode(this.fileComment);else{var b=this.findExtraFieldUnicodePath();if(null!==b)this.fileNameStr=b;else{var c=f.transformTo(a,this.fileName);this.fileNameStr=this.loadOptions.decodeFileName(c)}var d=this.findExtraFieldUnicodeComment();if(null!==d)this.fileCommentStr=d;else{var e=f.transformTo(a,this.fileComment);this.fileCommentStr=this.loadOptions.decodeFileName(e)}}},findExtraFieldUnicodePath:function(){var a=this.extraFields[28789];if(a){var b=e(a.value);return 1!==b.readInt(1)?null:h(this.fileName)!==b.readInt(4)?null:i.utf8decode(b.readData(a.length-5))}return null},findExtraFieldUnicodeComment:function(){var a=this.extraFields[25461];if(a){var b=e(a.value);return 1!==b.readInt(1)?null:h(this.fileComment)!==b.readInt(4)?null:i.utf8decode(b.readData(a.length-5))}return null}},b.exports=d},{"./compressedObject":2,"./compressions":3,"./crc32":4,"./reader/readerFor":22,"./support":30,"./utf8":31,"./utils":32}],35:[function(a,b,c){"use strict";var d=a("./stream/StreamHelper"),e=a("./stream/DataWorker"),f=a("./utf8"),g=a("./compressedObject"),h=a("./stream/GenericWorker"),i=function(a,b,c){this.name=a,this.dir=c.dir,this.date=c.date,this.comment=c.comment,this.unixPermissions=c.unixPermissions,this.dosPermissions=c.dosPermissions,this._data=b,this._dataBinary=c.binary,this.options={compression:c.compression,compressionOptions:c.compressionOptions}};i.prototype={internalStream:function(a){var b=null,c="string";try{if(!a)throw new Error("No output type specified.");c=a.toLowerCase();var e="string"===c||"text"===c;"binarystring"!==c&&"text"!==c||(c="string"),b=this._decompressWorker();var g=!this._dataBinary;g&&!e&&(b=b.pipe(new f.Utf8EncodeWorker)),!g&&e&&(b=b.pipe(new f.Utf8DecodeWorker))}catch(i){b=new h("error"),b.error(i)}return new d(b,c,"")},async:function(a,b){return this.internalStream(a).accumulate(b)},nodeStream:function(a,b){return this.internalStream(a||"nodebuffer").toNodejsStream(b)},_compressWorker:function(a,b){if(this._data instanceof g&&this._data.compression.magic===a.magic)return this._data.getCompressedWorker();var c=this._decompressWorker();return this._dataBinary||(c=c.pipe(new f.Utf8EncodeWorker)),g.createWorkerFrom(c,a,b)},_decompressWorker:function(){return this._data instanceof g?this._data.getContentWorker():this._data instanceof h?this._data:new e(this._data)}};for(var j=["asText","asBinary","asNodeBuffer","asUint8Array","asArrayBuffer"],k=function(){throw new Error("This method has been removed in JSZip 3.0, please check the upgrade guide.")},l=0;l<j.length;l++)i.prototype[j[l]]=k;b.exports=i},{"./compressedObject":2,"./stream/DataWorker":27,"./stream/GenericWorker":28,"./stream/StreamHelper":29,"./utf8":31}],36:[function(a,b,c){a("../modules/web.immediate"),b.exports=a("../modules/_core").setImmediate},{"../modules/_core":40,"../modules/web.immediate":56}],37:[function(a,b,c){b.exports=function(a){if("function"!=typeof a)throw TypeError(a+" is not a function!");return a}},{}],38:[function(a,b,c){var d=a("./_is-object");b.exports=function(a){if(!d(a))throw TypeError(a+" is not an object!");return a}},{"./_is-object":51}],39:[function(a,b,c){var d={}.toString;b.exports=function(a){return d.call(a).slice(8,-1)}},{}],40:[function(a,b,c){var d=b.exports={version:"2.3.0"};"number"==typeof __e&&(__e=d)},{}],41:[function(a,b,c){var d=a("./_a-function");b.exports=function(a,b,c){if(d(a),void 0===b)return a;switch(c){case 1:return function(c){return a.call(b,c)};case 2:return function(c,d){return a.call(b,c,d)};case 3:return function(c,d,e){return a.call(b,c,d,e)}}return function(){return a.apply(b,arguments)}}},{"./_a-function":37}],42:[function(a,b,c){b.exports=!a("./_fails")(function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a})},{"./_fails":45}],43:[function(a,b,c){var d=a("./_is-object"),e=a("./_global").document,f=d(e)&&d(e.createElement);b.exports=function(a){return f?e.createElement(a):{}}},{"./_global":46,"./_is-object":51}],44:[function(a,b,c){var d=a("./_global"),e=a("./_core"),f=a("./_ctx"),g=a("./_hide"),h="prototype",i=function(a,b,c){var j,k,l,m=a&i.F,n=a&i.G,o=a&i.S,p=a&i.P,q=a&i.B,r=a&i.W,s=n?e:e[b]||(e[b]={}),t=s[h],u=n?d:o?d[b]:(d[b]||{})[h];n&&(c=b);for(j in c)k=!m&&u&&void 0!==u[j],k&&j in s||(l=k?u[j]:c[j],s[j]=n&&"function"!=typeof u[j]?c[j]:q&&k?f(l,d):r&&u[j]==l?function(a){var b=function(b,c,d){if(this instanceof a){switch(arguments.length){case 0:return new a;case 1:return new a(b);case 2:return new a(b,c)}return new a(b,c,d)}return a.apply(this,arguments)};return b[h]=a[h],b}(l):p&&"function"==typeof l?f(Function.call,l):l,p&&((s.virtual||(s.virtual={}))[j]=l,a&i.R&&t&&!t[j]&&g(t,j,l)))};i.F=1,i.G=2,i.S=4,i.P=8,i.B=16,i.W=32,i.U=64,i.R=128,b.exports=i},{"./_core":40,"./_ctx":41,"./_global":46,"./_hide":47}],45:[function(a,b,c){b.exports=function(a){try{return!!a()}catch(b){return!0}}},{}],46:[function(a,b,c){var d=b.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=d)},{}],47:[function(a,b,c){var d=a("./_object-dp"),e=a("./_property-desc");b.exports=a("./_descriptors")?function(a,b,c){return d.f(a,b,e(1,c))}:function(a,b,c){return a[b]=c,a}},{"./_descriptors":42,"./_object-dp":52,"./_property-desc":53}],48:[function(a,b,c){b.exports=a("./_global").document&&document.documentElement},{"./_global":46}],49:[function(a,b,c){b.exports=!a("./_descriptors")&&!a("./_fails")(function(){return 7!=Object.defineProperty(a("./_dom-create")("div"),"a",{get:function(){return 7}}).a})},{"./_descriptors":42,"./_dom-create":43,"./_fails":45}],50:[function(a,b,c){b.exports=function(a,b,c){var d=void 0===c;switch(b.length){case 0:return d?a():a.call(c);case 1:return d?a(b[0]):a.call(c,b[0]);case 2:return d?a(b[0],b[1]):a.call(c,b[0],b[1]);case 3:return d?a(b[0],b[1],b[2]):a.call(c,b[0],b[1],b[2]);case 4:return d?a(b[0],b[1],b[2],b[3]):a.call(c,b[0],b[1],b[2],b[3])}return a.apply(c,b)}},{}],51:[function(a,b,c){b.exports=function(a){return"object"==typeof a?null!==a:"function"==typeof a}},{}],52:[function(a,b,c){var d=a("./_an-object"),e=a("./_ie8-dom-define"),f=a("./_to-primitive"),g=Object.defineProperty;c.f=a("./_descriptors")?Object.defineProperty:function(a,b,c){if(d(a),b=f(b,!0),d(c),e)try{return g(a,b,c)}catch(h){}if("get"in c||"set"in c)throw TypeError("Accessors not supported!");return"value"in c&&(a[b]=c.value),a}},{"./_an-object":38,"./_descriptors":42,"./_ie8-dom-define":49,"./_to-primitive":55}],53:[function(a,b,c){b.exports=function(a,b){return{enumerable:!(1&a),configurable:!(2&a),writable:!(4&a),value:b}}},{}],54:[function(a,b,c){var d,e,f,g=a("./_ctx"),h=a("./_invoke"),i=a("./_html"),j=a("./_dom-create"),k=a("./_global"),l=k.process,m=k.setImmediate,n=k.clearImmediate,o=k.MessageChannel,p=0,q={},r="onreadystatechange",s=function(){var a=+this;if(q.hasOwnProperty(a)){var b=q[a];delete q[a],b()}},t=function(a){s.call(a.data)};m&&n||(m=function(a){for(var b=[],c=1;arguments.length>c;)b.push(arguments[c++]);return q[++p]=function(){h("function"==typeof a?a:Function(a),b)},d(p),p},n=function(a){delete q[a]},"process"==a("./_cof")(l)?d=function(a){l.nextTick(g(s,a,1))}:o?(e=new o,f=e.port2,e.port1.onmessage=t,d=g(f.postMessage,f,1)):k.addEventListener&&"function"==typeof postMessage&&!k.importScripts?(d=function(a){k.postMessage(a+"","*")},k.addEventListener("message",t,!1)):d=r in j("script")?function(a){i.appendChild(j("script"))[r]=function(){i.removeChild(this),s.call(a)}}:function(a){setTimeout(g(s,a,1),0)}),b.exports={set:m,clear:n}},{"./_cof":39,"./_ctx":41,"./_dom-create":43,"./_global":46,"./_html":48,"./_invoke":50}],55:[function(a,b,c){var d=a("./_is-object");b.exports=function(a,b){if(!d(a))return a;var c,e;if(b&&"function"==typeof(c=a.toString)&&!d(e=c.call(a)))return e;if("function"==typeof(c=a.valueOf)&&!d(e=c.call(a)))return e;if(!b&&"function"==typeof(c=a.toString)&&!d(e=c.call(a)))return e;throw TypeError("Can't convert object to primitive value")}},{"./_is-object":51}],56:[function(a,b,c){var d=a("./_export"),e=a("./_task");d(d.G+d.B,{setImmediate:e.set,clearImmediate:e.clear})},{"./_export":44,"./_task":54}],57:[function(a,b,c){(function(a){"use strict";function c(){k=!0;for(var a,b,c=l.length;c;){for(b=l,l=[],a=-1;++a<c;)b[a]();c=l.length}k=!1}function d(a){1!==l.push(a)||k||e()}var e,f=a.MutationObserver||a.WebKitMutationObserver;if(f){var g=0,h=new f(c),i=a.document.createTextNode("");h.observe(i,{characterData:!0}),e=function(){i.data=g=++g%2}}else if(a.setImmediate||"undefined"==typeof a.MessageChannel)e="document"in a&&"onreadystatechange"in a.document.createElement("script")?function(){var b=a.document.createElement("script");b.onreadystatechange=function(){c(),b.onreadystatechange=null,b.parentNode.removeChild(b),b=null},a.document.documentElement.appendChild(b)}:function(){setTimeout(c,0)};else{var j=new a.MessageChannel;j.port1.onmessage=c,e=function(){j.port2.postMessage(0)}}var k,l=[];b.exports=d}).call(this,"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{}],58:[function(a,b,c){"use strict";function d(){}function e(a){if("function"!=typeof a)throw new TypeError("resolver must be a function");this.state=s,this.queue=[],this.outcome=void 0,a!==d&&i(this,a)}function f(a,b,c){this.promise=a,"function"==typeof b&&(this.onFulfilled=b,this.callFulfilled=this.otherCallFulfilled),"function"==typeof c&&(this.onRejected=c,this.callRejected=this.otherCallRejected)}function g(a,b,c){o(function(){var d;try{d=b(c)}catch(e){return p.reject(a,e)}d===a?p.reject(a,new TypeError("Cannot resolve promise with itself")):p.resolve(a,d)})}function h(a){var b=a&&a.then;if(a&&("object"==typeof a||"function"==typeof a)&&"function"==typeof b)return function(){b.apply(a,arguments)}}function i(a,b){function c(b){f||(f=!0,p.reject(a,b))}function d(b){f||(f=!0,p.resolve(a,b))}function e(){b(d,c)}var f=!1,g=j(e);"error"===g.status&&c(g.value)}function j(a,b){var c={};try{c.value=a(b),c.status="success"}catch(d){c.status="error",c.value=d}return c}function k(a){return a instanceof this?a:p.resolve(new this(d),a)}function l(a){var b=new this(d);return p.reject(b,a)}function m(a){function b(a,b){function d(a){g[b]=a,++h!==e||f||(f=!0,p.resolve(j,g))}c.resolve(a).then(d,function(a){f||(f=!0,p.reject(j,a))})}var c=this;if("[object Array]"!==Object.prototype.toString.call(a))return this.reject(new TypeError("must be an array"));var e=a.length,f=!1;if(!e)return this.resolve([]);for(var g=new Array(e),h=0,i=-1,j=new this(d);++i<e;)b(a[i],i);return j}function n(a){function b(a){c.resolve(a).then(function(a){f||(f=!0,p.resolve(h,a))},function(a){f||(f=!0,p.reject(h,a))})}var c=this;if("[object Array]"!==Object.prototype.toString.call(a))return this.reject(new TypeError("must be an array"));var e=a.length,f=!1;if(!e)return this.resolve([]);for(var g=-1,h=new this(d);++g<e;)b(a[g]);return h}var o=a("immediate"),p={},q=["REJECTED"],r=["FULFILLED"],s=["PENDING"];b.exports=e,e.prototype["catch"]=function(a){return this.then(null,a)},e.prototype.then=function(a,b){if("function"!=typeof a&&this.state===r||"function"!=typeof b&&this.state===q)return this;var c=new this.constructor(d);if(this.state!==s){var e=this.state===r?a:b;g(c,e,this.outcome)}else this.queue.push(new f(c,a,b));return c},f.prototype.callFulfilled=function(a){p.resolve(this.promise,a)},f.prototype.otherCallFulfilled=function(a){g(this.promise,this.onFulfilled,a)},f.prototype.callRejected=function(a){p.reject(this.promise,a)},f.prototype.otherCallRejected=function(a){g(this.promise,this.onRejected,a)},p.resolve=function(a,b){var c=j(h,b);if("error"===c.status)return p.reject(a,c.value);var d=c.value;if(d)i(a,d);else{a.state=r,a.outcome=b;for(var e=-1,f=a.queue.length;++e<f;)a.queue[e].callFulfilled(b)}return a},p.reject=function(a,b){a.state=q,a.outcome=b;for(var c=-1,d=a.queue.length;++c<d;)a.queue[c].callRejected(b);return a},e.resolve=k,e.reject=l,e.all=m,e.race=n},{immediate:57}],59:[function(a,b,c){"use strict";var d=a("./lib/utils/common").assign,e=a("./lib/deflate"),f=a("./lib/inflate"),g=a("./lib/zlib/constants"),h={};d(h,e,f,g),b.exports=h},{"./lib/deflate":60,"./lib/inflate":61,"./lib/utils/common":62,"./lib/zlib/constants":65}],60:[function(a,b,c){"use strict";function d(a){if(!(this instanceof d))return new d(a);this.options=i.assign({level:s,method:u,chunkSize:16384,windowBits:15,memLevel:8,strategy:t,to:""},a||{});var b=this.options;b.raw&&b.windowBits>0?b.windowBits=-b.windowBits:b.gzip&&b.windowBits>0&&b.windowBits<16&&(b.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new l,this.strm.avail_out=0;var c=h.deflateInit2(this.strm,b.level,b.method,b.windowBits,b.memLevel,b.strategy);if(c!==p)throw new Error(k[c]);if(b.header&&h.deflateSetHeader(this.strm,b.header),b.dictionary){var e;if(e="string"==typeof b.dictionary?j.string2buf(b.dictionary):"[object ArrayBuffer]"===m.call(b.dictionary)?new Uint8Array(b.dictionary):b.dictionary,c=h.deflateSetDictionary(this.strm,e),c!==p)throw new Error(k[c]);this._dict_set=!0}}function e(a,b){var c=new d(b);if(c.push(a,!0),c.err)throw c.msg||k[c.err];return c.result}function f(a,b){return b=b||{},b.raw=!0,e(a,b)}function g(a,b){return b=b||{},b.gzip=!0,e(a,b)}var h=a("./zlib/deflate"),i=a("./utils/common"),j=a("./utils/strings"),k=a("./zlib/messages"),l=a("./zlib/zstream"),m=Object.prototype.toString,n=0,o=4,p=0,q=1,r=2,s=-1,t=0,u=8;d.prototype.push=function(a,b){var c,d,e=this.strm,f=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?o:n,"string"==typeof a?e.input=j.string2buf(a):"[object ArrayBuffer]"===m.call(a)?e.input=new Uint8Array(a):e.input=a,e.next_in=0,e.avail_in=e.input.length;do{if(0===e.avail_out&&(e.output=new i.Buf8(f),e.next_out=0,e.avail_out=f),c=h.deflate(e,d),c!==q&&c!==p)return this.onEnd(c),this.ended=!0,!1;0!==e.avail_out&&(0!==e.avail_in||d!==o&&d!==r)||("string"===this.options.to?this.onData(j.buf2binstring(i.shrinkBuf(e.output,e.next_out))):this.onData(i.shrinkBuf(e.output,e.next_out)))}while((e.avail_in>0||0===e.avail_out)&&c!==q);return d===o?(c=h.deflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===p):d!==r||(this.onEnd(p),e.avail_out=0,!0)},d.prototype.onData=function(a){this.chunks.push(a)},d.prototype.onEnd=function(a){a===p&&("string"===this.options.to?this.result=this.chunks.join(""):this.result=i.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Deflate=d,c.deflate=e,c.deflateRaw=f,c.gzip=g},{"./utils/common":62,"./utils/strings":63,"./zlib/deflate":67,"./zlib/messages":72,"./zlib/zstream":74}],61:[function(a,b,c){"use strict";function d(a){if(!(this instanceof d))return new d(a);this.options=h.assign({chunkSize:16384,windowBits:0,to:""},a||{});var b=this.options;b.raw&&b.windowBits>=0&&b.windowBits<16&&(b.windowBits=-b.windowBits,0===b.windowBits&&(b.windowBits=-15)),!(b.windowBits>=0&&b.windowBits<16)||a&&a.windowBits||(b.windowBits+=32),b.windowBits>15&&b.windowBits<48&&0===(15&b.windowBits)&&(b.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new l,this.strm.avail_out=0;var c=g.inflateInit2(this.strm,b.windowBits);if(c!==j.Z_OK)throw new Error(k[c]);this.header=new m,g.inflateGetHeader(this.strm,this.header)}function e(a,b){var c=new d(b);if(c.push(a,!0),c.err)throw c.msg||k[c.err];return c.result}function f(a,b){return b=b||{},b.raw=!0,e(a,b)}var g=a("./zlib/inflate"),h=a("./utils/common"),i=a("./utils/strings"),j=a("./zlib/constants"),k=a("./zlib/messages"),l=a("./zlib/zstream"),m=a("./zlib/gzheader"),n=Object.prototype.toString;d.prototype.push=function(a,b){var c,d,e,f,k,l,m=this.strm,o=this.options.chunkSize,p=this.options.dictionary,q=!1;if(this.ended)return!1;d=b===~~b?b:b===!0?j.Z_FINISH:j.Z_NO_FLUSH,"string"==typeof a?m.input=i.binstring2buf(a):"[object ArrayBuffer]"===n.call(a)?m.input=new Uint8Array(a):m.input=a,m.next_in=0,m.avail_in=m.input.length;do{if(0===m.avail_out&&(m.output=new h.Buf8(o),m.next_out=0,m.avail_out=o),c=g.inflate(m,j.Z_NO_FLUSH),c===j.Z_NEED_DICT&&p&&(l="string"==typeof p?i.string2buf(p):"[object ArrayBuffer]"===n.call(p)?new Uint8Array(p):p,c=g.inflateSetDictionary(this.strm,l)),c===j.Z_BUF_ERROR&&q===!0&&(c=j.Z_OK,q=!1),c!==j.Z_STREAM_END&&c!==j.Z_OK)return this.onEnd(c),this.ended=!0,!1;m.next_out&&(0!==m.avail_out&&c!==j.Z_STREAM_END&&(0!==m.avail_in||d!==j.Z_FINISH&&d!==j.Z_SYNC_FLUSH)||("string"===this.options.to?(e=i.utf8border(m.output,m.next_out),f=m.next_out-e,k=i.buf2string(m.output,e),m.next_out=f,m.avail_out=o-f,f&&h.arraySet(m.output,m.output,e,f,0),this.onData(k)):this.onData(h.shrinkBuf(m.output,m.next_out)))),0===m.avail_in&&0===m.avail_out&&(q=!0)}while((m.avail_in>0||0===m.avail_out)&&c!==j.Z_STREAM_END);return c===j.Z_STREAM_END&&(d=j.Z_FINISH),d===j.Z_FINISH?(c=g.inflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===j.Z_OK):d!==j.Z_SYNC_FLUSH||(this.onEnd(j.Z_OK),m.avail_out=0,!0)},d.prototype.onData=function(a){this.chunks.push(a)},d.prototype.onEnd=function(a){a===j.Z_OK&&("string"===this.options.to?this.result=this.chunks.join(""):this.result=h.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Inflate=d,c.inflate=e,c.inflateRaw=f,c.ungzip=e},{"./utils/common":62,"./utils/strings":63,"./zlib/constants":65,"./zlib/gzheader":68,"./zlib/inflate":70,"./zlib/messages":72,"./zlib/zstream":74}],62:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;c.assign=function(a){for(var b=Array.prototype.slice.call(arguments,1);b.length;){var c=b.shift();if(c){if("object"!=typeof c)throw new TypeError(c+"must be non-object");for(var d in c)c.hasOwnProperty(d)&&(a[d]=c[d])}}return a},c.shrinkBuf=function(a,b){return a.length===b?a:a.subarray?a.subarray(0,b):(a.length=b,a)};var e={arraySet:function(a,b,c,d,e){if(b.subarray&&a.subarray)return void a.set(b.subarray(c,c+d),e);for(var f=0;f<d;f++)a[e+f]=b[c+f]},flattenChunks:function(a){var b,c,d,e,f,g;for(d=0,b=0,c=a.length;b<c;b++)d+=a[b].length;for(g=new Uint8Array(d),e=0,b=0,c=a.length;b<c;b++)f=a[b],g.set(f,e),e+=f.length;return g}},f={arraySet:function(a,b,c,d,e){for(var f=0;f<d;f++)a[e+f]=b[c+f]},flattenChunks:function(a){return[].concat.apply([],a)}};c.setTyped=function(a){a?(c.Buf8=Uint8Array,c.Buf16=Uint16Array,c.Buf32=Int32Array,c.assign(c,e)):(c.Buf8=Array,c.Buf16=Array,c.Buf32=Array,c.assign(c,f))},c.setTyped(d)},{}],63:[function(a,b,c){"use strict";function d(a,b){if(b<65537&&(a.subarray&&g||!a.subarray&&f))return String.fromCharCode.apply(null,e.shrinkBuf(a,b));for(var c="",d=0;d<b;d++)c+=String.fromCharCode(a[d]);return c}var e=a("./common"),f=!0,g=!0;try{String.fromCharCode.apply(null,[0])}catch(h){f=!1}try{String.fromCharCode.apply(null,new Uint8Array(1))}catch(h){g=!1}for(var i=new e.Buf8(256),j=0;j<256;j++)i[j]=j>=252?6:j>=248?5:j>=240?4:j>=224?3:j>=192?2:1;i[254]=i[254]=1,c.string2buf=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;f<h;f++)c=a.charCodeAt(f),55296===(64512&c)&&f+1<h&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=c<128?1:c<2048?2:c<65536?3:4;for(b=new e.Buf8(i),g=0,f=0;g<i;f++)c=a.charCodeAt(f),55296===(64512&c)&&f+1<h&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),c<128?b[g++]=c:c<2048?(b[g++]=192|c>>>6,b[g++]=128|63&c):c<65536?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},c.buf2binstring=function(a){return d(a,a.length)},c.binstring2buf=function(a){for(var b=new e.Buf8(a.length),c=0,d=b.length;c<d;c++)b[c]=a.charCodeAt(c);return b},c.buf2string=function(a,b){var c,e,f,g,h=b||a.length,j=new Array(2*h);for(e=0,c=0;c<h;)if(f=a[c++],f<128)j[e++]=f;else if(g=i[f],g>4)j[e++]=65533,c+=g-1;else{for(f&=2===g?31:3===g?15:7;g>1&&c<h;)f=f<<6|63&a[c++],g--;g>1?j[e++]=65533:f<65536?j[e++]=f:(f-=65536,j[e++]=55296|f>>10&1023,j[e++]=56320|1023&f)}return d(j,e)},c.utf8border=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return c<0?b:0===c?b:c+i[a[c]]>b?c:b}},{"./common":62}],64:[function(a,b,c){"use strict";function d(a,b,c,d){for(var e=65535&a|0,f=a>>>16&65535|0,g=0;0!==c;){g=c>2e3?2e3:c,c-=g;do e=e+b[d++]|0,f=f+e|0;while(--g);e%=65521,f%=65521}return e|f<<16|0;
}b.exports=d},{}],65:[function(a,b,c){"use strict";b.exports={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8}},{}],66:[function(a,b,c){"use strict";function d(){for(var a,b=[],c=0;c<256;c++){a=c;for(var d=0;d<8;d++)a=1&a?3988292384^a>>>1:a>>>1;b[c]=a}return b}function e(a,b,c,d){var e=f,g=d+c;a^=-1;for(var h=d;h<g;h++)a=a>>>8^e[255&(a^b[h])];return a^-1}var f=d();b.exports=e},{}],67:[function(a,b,c){"use strict";function d(a,b){return a.msg=I[b],b}function e(a){return(a<<1)-(a>4?9:0)}function f(a){for(var b=a.length;--b>=0;)a[b]=0}function g(a){var b=a.state,c=b.pending;c>a.avail_out&&(c=a.avail_out),0!==c&&(E.arraySet(a.output,b.pending_buf,b.pending_out,c,a.next_out),a.next_out+=c,b.pending_out+=c,a.total_out+=c,a.avail_out-=c,b.pending-=c,0===b.pending&&(b.pending_out=0))}function h(a,b){F._tr_flush_block(a,a.block_start>=0?a.block_start:-1,a.strstart-a.block_start,b),a.block_start=a.strstart,g(a.strm)}function i(a,b){a.pending_buf[a.pending++]=b}function j(a,b){a.pending_buf[a.pending++]=b>>>8&255,a.pending_buf[a.pending++]=255&b}function k(a,b,c,d){var e=a.avail_in;return e>d&&(e=d),0===e?0:(a.avail_in-=e,E.arraySet(b,a.input,a.next_in,e,c),1===a.state.wrap?a.adler=G(a.adler,b,e,c):2===a.state.wrap&&(a.adler=H(a.adler,b,e,c)),a.next_in+=e,a.total_in+=e,e)}function l(a,b){var c,d,e=a.max_chain_length,f=a.strstart,g=a.prev_length,h=a.nice_match,i=a.strstart>a.w_size-la?a.strstart-(a.w_size-la):0,j=a.window,k=a.w_mask,l=a.prev,m=a.strstart+ka,n=j[f+g-1],o=j[f+g];a.prev_length>=a.good_match&&(e>>=2),h>a.lookahead&&(h=a.lookahead);do if(c=b,j[c+g]===o&&j[c+g-1]===n&&j[c]===j[f]&&j[++c]===j[f+1]){f+=2,c++;do;while(j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&f<m);if(d=ka-(m-f),f=m-ka,d>g){if(a.match_start=b,g=d,d>=h)break;n=j[f+g-1],o=j[f+g]}}while((b=l[b&k])>i&&0!==--e);return g<=a.lookahead?g:a.lookahead}function m(a){var b,c,d,e,f,g=a.w_size;do{if(e=a.window_size-a.lookahead-a.strstart,a.strstart>=g+(g-la)){E.arraySet(a.window,a.window,g,g,0),a.match_start-=g,a.strstart-=g,a.block_start-=g,c=a.hash_size,b=c;do d=a.head[--b],a.head[b]=d>=g?d-g:0;while(--c);c=g,b=c;do d=a.prev[--b],a.prev[b]=d>=g?d-g:0;while(--c);e+=g}if(0===a.strm.avail_in)break;if(c=k(a.strm,a.window,a.strstart+a.lookahead,e),a.lookahead+=c,a.lookahead+a.insert>=ja)for(f=a.strstart-a.insert,a.ins_h=a.window[f],a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+1])&a.hash_mask;a.insert&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+ja-1])&a.hash_mask,a.prev[f&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=f,f++,a.insert--,!(a.lookahead+a.insert<ja)););}while(a.lookahead<la&&0!==a.strm.avail_in)}function n(a,b){var c=65535;for(c>a.pending_buf_size-5&&(c=a.pending_buf_size-5);;){if(a.lookahead<=1){if(m(a),0===a.lookahead&&b===J)return ua;if(0===a.lookahead)break}a.strstart+=a.lookahead,a.lookahead=0;var d=a.block_start+c;if((0===a.strstart||a.strstart>=d)&&(a.lookahead=a.strstart-d,a.strstart=d,h(a,!1),0===a.strm.avail_out))return ua;if(a.strstart-a.block_start>=a.w_size-la&&(h(a,!1),0===a.strm.avail_out))return ua}return a.insert=0,b===M?(h(a,!0),0===a.strm.avail_out?wa:xa):a.strstart>a.block_start&&(h(a,!1),0===a.strm.avail_out)?ua:ua}function o(a,b){for(var c,d;;){if(a.lookahead<la){if(m(a),a.lookahead<la&&b===J)return ua;if(0===a.lookahead)break}if(c=0,a.lookahead>=ja&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+ja-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),0!==c&&a.strstart-c<=a.w_size-la&&(a.match_length=l(a,c)),a.match_length>=ja)if(d=F._tr_tally(a,a.strstart-a.match_start,a.match_length-ja),a.lookahead-=a.match_length,a.match_length<=a.max_lazy_match&&a.lookahead>=ja){a.match_length--;do a.strstart++,a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+ja-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart;while(0!==--a.match_length);a.strstart++}else a.strstart+=a.match_length,a.match_length=0,a.ins_h=a.window[a.strstart],a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+1])&a.hash_mask;else d=F._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++;if(d&&(h(a,!1),0===a.strm.avail_out))return ua}return a.insert=a.strstart<ja-1?a.strstart:ja-1,b===M?(h(a,!0),0===a.strm.avail_out?wa:xa):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?ua:va}function p(a,b){for(var c,d,e;;){if(a.lookahead<la){if(m(a),a.lookahead<la&&b===J)return ua;if(0===a.lookahead)break}if(c=0,a.lookahead>=ja&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+ja-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),a.prev_length=a.match_length,a.prev_match=a.match_start,a.match_length=ja-1,0!==c&&a.prev_length<a.max_lazy_match&&a.strstart-c<=a.w_size-la&&(a.match_length=l(a,c),a.match_length<=5&&(a.strategy===U||a.match_length===ja&&a.strstart-a.match_start>4096)&&(a.match_length=ja-1)),a.prev_length>=ja&&a.match_length<=a.prev_length){e=a.strstart+a.lookahead-ja,d=F._tr_tally(a,a.strstart-1-a.prev_match,a.prev_length-ja),a.lookahead-=a.prev_length-1,a.prev_length-=2;do++a.strstart<=e&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+ja-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart);while(0!==--a.prev_length);if(a.match_available=0,a.match_length=ja-1,a.strstart++,d&&(h(a,!1),0===a.strm.avail_out))return ua}else if(a.match_available){if(d=F._tr_tally(a,0,a.window[a.strstart-1]),d&&h(a,!1),a.strstart++,a.lookahead--,0===a.strm.avail_out)return ua}else a.match_available=1,a.strstart++,a.lookahead--}return a.match_available&&(d=F._tr_tally(a,0,a.window[a.strstart-1]),a.match_available=0),a.insert=a.strstart<ja-1?a.strstart:ja-1,b===M?(h(a,!0),0===a.strm.avail_out?wa:xa):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?ua:va}function q(a,b){for(var c,d,e,f,g=a.window;;){if(a.lookahead<=ka){if(m(a),a.lookahead<=ka&&b===J)return ua;if(0===a.lookahead)break}if(a.match_length=0,a.lookahead>=ja&&a.strstart>0&&(e=a.strstart-1,d=g[e],d===g[++e]&&d===g[++e]&&d===g[++e])){f=a.strstart+ka;do;while(d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&e<f);a.match_length=ka-(f-e),a.match_length>a.lookahead&&(a.match_length=a.lookahead)}if(a.match_length>=ja?(c=F._tr_tally(a,1,a.match_length-ja),a.lookahead-=a.match_length,a.strstart+=a.match_length,a.match_length=0):(c=F._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++),c&&(h(a,!1),0===a.strm.avail_out))return ua}return a.insert=0,b===M?(h(a,!0),0===a.strm.avail_out?wa:xa):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?ua:va}function r(a,b){for(var c;;){if(0===a.lookahead&&(m(a),0===a.lookahead)){if(b===J)return ua;break}if(a.match_length=0,c=F._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++,c&&(h(a,!1),0===a.strm.avail_out))return ua}return a.insert=0,b===M?(h(a,!0),0===a.strm.avail_out?wa:xa):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?ua:va}function s(a,b,c,d,e){this.good_length=a,this.max_lazy=b,this.nice_length=c,this.max_chain=d,this.func=e}function t(a){a.window_size=2*a.w_size,f(a.head),a.max_lazy_match=D[a.level].max_lazy,a.good_match=D[a.level].good_length,a.nice_match=D[a.level].nice_length,a.max_chain_length=D[a.level].max_chain,a.strstart=0,a.block_start=0,a.lookahead=0,a.insert=0,a.match_length=a.prev_length=ja-1,a.match_available=0,a.ins_h=0}function u(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=$,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new E.Buf16(2*ha),this.dyn_dtree=new E.Buf16(2*(2*fa+1)),this.bl_tree=new E.Buf16(2*(2*ga+1)),f(this.dyn_ltree),f(this.dyn_dtree),f(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new E.Buf16(ia+1),this.heap=new E.Buf16(2*ea+1),f(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new E.Buf16(2*ea+1),f(this.depth),this.l_buf=0,this.lit_bufsize=0,this.last_lit=0,this.d_buf=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}function v(a){var b;return a&&a.state?(a.total_in=a.total_out=0,a.data_type=Z,b=a.state,b.pending=0,b.pending_out=0,b.wrap<0&&(b.wrap=-b.wrap),b.status=b.wrap?na:sa,a.adler=2===b.wrap?0:1,b.last_flush=J,F._tr_init(b),O):d(a,Q)}function w(a){var b=v(a);return b===O&&t(a.state),b}function x(a,b){return a&&a.state?2!==a.state.wrap?Q:(a.state.gzhead=b,O):Q}function y(a,b,c,e,f,g){if(!a)return Q;var h=1;if(b===T&&(b=6),e<0?(h=0,e=-e):e>15&&(h=2,e-=16),f<1||f>_||c!==$||e<8||e>15||b<0||b>9||g<0||g>X)return d(a,Q);8===e&&(e=9);var i=new u;return a.state=i,i.strm=a,i.wrap=h,i.gzhead=null,i.w_bits=e,i.w_size=1<<i.w_bits,i.w_mask=i.w_size-1,i.hash_bits=f+7,i.hash_size=1<<i.hash_bits,i.hash_mask=i.hash_size-1,i.hash_shift=~~((i.hash_bits+ja-1)/ja),i.window=new E.Buf8(2*i.w_size),i.head=new E.Buf16(i.hash_size),i.prev=new E.Buf16(i.w_size),i.lit_bufsize=1<<f+6,i.pending_buf_size=4*i.lit_bufsize,i.pending_buf=new E.Buf8(i.pending_buf_size),i.d_buf=1*i.lit_bufsize,i.l_buf=3*i.lit_bufsize,i.level=b,i.strategy=g,i.method=c,w(a)}function z(a,b){return y(a,b,$,aa,ba,Y)}function A(a,b){var c,h,k,l;if(!a||!a.state||b>N||b<0)return a?d(a,Q):Q;if(h=a.state,!a.output||!a.input&&0!==a.avail_in||h.status===ta&&b!==M)return d(a,0===a.avail_out?S:Q);if(h.strm=a,c=h.last_flush,h.last_flush=b,h.status===na)if(2===h.wrap)a.adler=0,i(h,31),i(h,139),i(h,8),h.gzhead?(i(h,(h.gzhead.text?1:0)+(h.gzhead.hcrc?2:0)+(h.gzhead.extra?4:0)+(h.gzhead.name?8:0)+(h.gzhead.comment?16:0)),i(h,255&h.gzhead.time),i(h,h.gzhead.time>>8&255),i(h,h.gzhead.time>>16&255),i(h,h.gzhead.time>>24&255),i(h,9===h.level?2:h.strategy>=V||h.level<2?4:0),i(h,255&h.gzhead.os),h.gzhead.extra&&h.gzhead.extra.length&&(i(h,255&h.gzhead.extra.length),i(h,h.gzhead.extra.length>>8&255)),h.gzhead.hcrc&&(a.adler=H(a.adler,h.pending_buf,h.pending,0)),h.gzindex=0,h.status=oa):(i(h,0),i(h,0),i(h,0),i(h,0),i(h,0),i(h,9===h.level?2:h.strategy>=V||h.level<2?4:0),i(h,ya),h.status=sa);else{var m=$+(h.w_bits-8<<4)<<8,n=-1;n=h.strategy>=V||h.level<2?0:h.level<6?1:6===h.level?2:3,m|=n<<6,0!==h.strstart&&(m|=ma),m+=31-m%31,h.status=sa,j(h,m),0!==h.strstart&&(j(h,a.adler>>>16),j(h,65535&a.adler)),a.adler=1}if(h.status===oa)if(h.gzhead.extra){for(k=h.pending;h.gzindex<(65535&h.gzhead.extra.length)&&(h.pending!==h.pending_buf_size||(h.gzhead.hcrc&&h.pending>k&&(a.adler=H(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending!==h.pending_buf_size));)i(h,255&h.gzhead.extra[h.gzindex]),h.gzindex++;h.gzhead.hcrc&&h.pending>k&&(a.adler=H(a.adler,h.pending_buf,h.pending-k,k)),h.gzindex===h.gzhead.extra.length&&(h.gzindex=0,h.status=pa)}else h.status=pa;if(h.status===pa)if(h.gzhead.name){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=H(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.name.length?255&h.gzhead.name.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=H(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.gzindex=0,h.status=qa)}else h.status=qa;if(h.status===qa)if(h.gzhead.comment){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=H(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.comment.length?255&h.gzhead.comment.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=H(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.status=ra)}else h.status=ra;if(h.status===ra&&(h.gzhead.hcrc?(h.pending+2>h.pending_buf_size&&g(a),h.pending+2<=h.pending_buf_size&&(i(h,255&a.adler),i(h,a.adler>>8&255),a.adler=0,h.status=sa)):h.status=sa),0!==h.pending){if(g(a),0===a.avail_out)return h.last_flush=-1,O}else if(0===a.avail_in&&e(b)<=e(c)&&b!==M)return d(a,S);if(h.status===ta&&0!==a.avail_in)return d(a,S);if(0!==a.avail_in||0!==h.lookahead||b!==J&&h.status!==ta){var o=h.strategy===V?r(h,b):h.strategy===W?q(h,b):D[h.level].func(h,b);if(o!==wa&&o!==xa||(h.status=ta),o===ua||o===wa)return 0===a.avail_out&&(h.last_flush=-1),O;if(o===va&&(b===K?F._tr_align(h):b!==N&&(F._tr_stored_block(h,0,0,!1),b===L&&(f(h.head),0===h.lookahead&&(h.strstart=0,h.block_start=0,h.insert=0))),g(a),0===a.avail_out))return h.last_flush=-1,O}return b!==M?O:h.wrap<=0?P:(2===h.wrap?(i(h,255&a.adler),i(h,a.adler>>8&255),i(h,a.adler>>16&255),i(h,a.adler>>24&255),i(h,255&a.total_in),i(h,a.total_in>>8&255),i(h,a.total_in>>16&255),i(h,a.total_in>>24&255)):(j(h,a.adler>>>16),j(h,65535&a.adler)),g(a),h.wrap>0&&(h.wrap=-h.wrap),0!==h.pending?O:P)}function B(a){var b;return a&&a.state?(b=a.state.status,b!==na&&b!==oa&&b!==pa&&b!==qa&&b!==ra&&b!==sa&&b!==ta?d(a,Q):(a.state=null,b===sa?d(a,R):O)):Q}function C(a,b){var c,d,e,g,h,i,j,k,l=b.length;if(!a||!a.state)return Q;if(c=a.state,g=c.wrap,2===g||1===g&&c.status!==na||c.lookahead)return Q;for(1===g&&(a.adler=G(a.adler,b,l,0)),c.wrap=0,l>=c.w_size&&(0===g&&(f(c.head),c.strstart=0,c.block_start=0,c.insert=0),k=new E.Buf8(c.w_size),E.arraySet(k,b,l-c.w_size,c.w_size,0),b=k,l=c.w_size),h=a.avail_in,i=a.next_in,j=a.input,a.avail_in=l,a.next_in=0,a.input=b,m(c);c.lookahead>=ja;){d=c.strstart,e=c.lookahead-(ja-1);do c.ins_h=(c.ins_h<<c.hash_shift^c.window[d+ja-1])&c.hash_mask,c.prev[d&c.w_mask]=c.head[c.ins_h],c.head[c.ins_h]=d,d++;while(--e);c.strstart=d,c.lookahead=ja-1,m(c)}return c.strstart+=c.lookahead,c.block_start=c.strstart,c.insert=c.lookahead,c.lookahead=0,c.match_length=c.prev_length=ja-1,c.match_available=0,a.next_in=i,a.input=j,a.avail_in=h,c.wrap=g,O}var D,E=a("../utils/common"),F=a("./trees"),G=a("./adler32"),H=a("./crc32"),I=a("./messages"),J=0,K=1,L=3,M=4,N=5,O=0,P=1,Q=-2,R=-3,S=-5,T=-1,U=1,V=2,W=3,X=4,Y=0,Z=2,$=8,_=9,aa=15,ba=8,ca=29,da=256,ea=da+1+ca,fa=30,ga=19,ha=2*ea+1,ia=15,ja=3,ka=258,la=ka+ja+1,ma=32,na=42,oa=69,pa=73,qa=91,ra=103,sa=113,ta=666,ua=1,va=2,wa=3,xa=4,ya=3;D=[new s(0,0,0,0,n),new s(4,4,8,4,o),new s(4,5,16,8,o),new s(4,6,32,32,o),new s(4,4,16,16,p),new s(8,16,32,32,p),new s(8,16,128,128,p),new s(8,32,128,256,p),new s(32,128,258,1024,p),new s(32,258,258,4096,p)],c.deflateInit=z,c.deflateInit2=y,c.deflateReset=w,c.deflateResetKeep=v,c.deflateSetHeader=x,c.deflate=A,c.deflateEnd=B,c.deflateSetDictionary=C,c.deflateInfo="pako deflate (from Nodeca project)"},{"../utils/common":62,"./adler32":64,"./crc32":66,"./messages":72,"./trees":73}],68:[function(a,b,c){"use strict";function d(){this.text=0,this.time=0,this.xflags=0,this.os=0,this.extra=null,this.extra_len=0,this.name="",this.comment="",this.hcrc=0,this.done=!1}b.exports=d},{}],69:[function(a,b,c){"use strict";var d=30,e=12;b.exports=function(a,b){var c,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C;c=a.state,f=a.next_in,B=a.input,g=f+(a.avail_in-5),h=a.next_out,C=a.output,i=h-(b-a.avail_out),j=h+(a.avail_out-257),k=c.dmax,l=c.wsize,m=c.whave,n=c.wnext,o=c.window,p=c.hold,q=c.bits,r=c.lencode,s=c.distcode,t=(1<<c.lenbits)-1,u=(1<<c.distbits)-1;a:do{q<15&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=r[p&t];b:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,0===w)C[h++]=65535&v;else{if(!(16&w)){if(0===(64&w)){v=r[(65535&v)+(p&(1<<w)-1)];continue b}if(32&w){c.mode=e;break a}a.msg="invalid literal/length code",c.mode=d;break a}x=65535&v,w&=15,w&&(q<w&&(p+=B[f++]<<q,q+=8),x+=p&(1<<w)-1,p>>>=w,q-=w),q<15&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=s[p&u];c:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,!(16&w)){if(0===(64&w)){v=s[(65535&v)+(p&(1<<w)-1)];continue c}a.msg="invalid distance code",c.mode=d;break a}if(y=65535&v,w&=15,q<w&&(p+=B[f++]<<q,q+=8,q<w&&(p+=B[f++]<<q,q+=8)),y+=p&(1<<w)-1,y>k){a.msg="invalid distance too far back",c.mode=d;break a}if(p>>>=w,q-=w,w=h-i,y>w){if(w=y-w,w>m&&c.sane){a.msg="invalid distance too far back",c.mode=d;break a}if(z=0,A=o,0===n){if(z+=l-w,w<x){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}else if(n<w){if(z+=l+n-w,w-=n,w<x){x-=w;do C[h++]=o[z++];while(--w);if(z=0,n<x){w=n,x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}}else if(z+=n-w,w<x){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}for(;x>2;)C[h++]=A[z++],C[h++]=A[z++],C[h++]=A[z++],x-=3;x&&(C[h++]=A[z++],x>1&&(C[h++]=A[z++]))}else{z=h-y;do C[h++]=C[z++],C[h++]=C[z++],C[h++]=C[z++],x-=3;while(x>2);x&&(C[h++]=C[z++],x>1&&(C[h++]=C[z++]))}break}}break}}while(f<g&&h<j);x=q>>3,f-=x,q-=x<<3,p&=(1<<q)-1,a.next_in=f,a.next_out=h,a.avail_in=f<g?5+(g-f):5-(f-g),a.avail_out=h<j?257+(j-h):257-(h-j),c.hold=p,c.bits=q}},{}],70:[function(a,b,c){"use strict";function d(a){return(a>>>24&255)+(a>>>8&65280)+((65280&a)<<8)+((255&a)<<24)}function e(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new s.Buf16(320),this.work=new s.Buf16(288),this.lendyn=null,this.distdyn=null,this.sane=0,this.back=0,this.was=0}function f(a){var b;return a&&a.state?(b=a.state,a.total_in=a.total_out=b.total=0,a.msg="",b.wrap&&(a.adler=1&b.wrap),b.mode=L,b.last=0,b.havedict=0,b.dmax=32768,b.head=null,b.hold=0,b.bits=0,b.lencode=b.lendyn=new s.Buf32(pa),b.distcode=b.distdyn=new s.Buf32(qa),b.sane=1,b.back=-1,D):G}function g(a){var b;return a&&a.state?(b=a.state,b.wsize=0,b.whave=0,b.wnext=0,f(a)):G}function h(a,b){var c,d;return a&&a.state?(d=a.state,b<0?(c=0,b=-b):(c=(b>>4)+1,b<48&&(b&=15)),b&&(b<8||b>15)?G:(null!==d.window&&d.wbits!==b&&(d.window=null),d.wrap=c,d.wbits=b,g(a))):G}function i(a,b){var c,d;return a?(d=new e,a.state=d,d.window=null,c=h(a,b),c!==D&&(a.state=null),c):G}function j(a){return i(a,sa)}function k(a){if(ta){var b;for(q=new s.Buf32(512),r=new s.Buf32(32),b=0;b<144;)a.lens[b++]=8;for(;b<256;)a.lens[b++]=9;for(;b<280;)a.lens[b++]=7;for(;b<288;)a.lens[b++]=8;for(w(y,a.lens,0,288,q,0,a.work,{bits:9}),b=0;b<32;)a.lens[b++]=5;w(z,a.lens,0,32,r,0,a.work,{bits:5}),ta=!1}a.lencode=q,a.lenbits=9,a.distcode=r,a.distbits=5}function l(a,b,c,d){var e,f=a.state;return null===f.window&&(f.wsize=1<<f.wbits,f.wnext=0,f.whave=0,f.window=new s.Buf8(f.wsize)),d>=f.wsize?(s.arraySet(f.window,b,c-f.wsize,f.wsize,0),f.wnext=0,f.whave=f.wsize):(e=f.wsize-f.wnext,e>d&&(e=d),s.arraySet(f.window,b,c-d,e,f.wnext),d-=e,d?(s.arraySet(f.window,b,c-d,d,0),f.wnext=d,f.whave=f.wsize):(f.wnext+=e,f.wnext===f.wsize&&(f.wnext=0),f.whave<f.wsize&&(f.whave+=e))),0}function m(a,b){var c,e,f,g,h,i,j,m,n,o,p,q,r,pa,qa,ra,sa,ta,ua,va,wa,xa,ya,za,Aa=0,Ba=new s.Buf8(4),Ca=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];if(!a||!a.state||!a.output||!a.input&&0!==a.avail_in)return G;c=a.state,c.mode===W&&(c.mode=X),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,o=i,p=j,xa=D;a:for(;;)switch(c.mode){case L:if(0===c.wrap){c.mode=X;break}for(;n<16;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(2&c.wrap&&35615===m){c.check=0,Ba[0]=255&m,Ba[1]=m>>>8&255,c.check=u(c.check,Ba,2,0),m=0,n=0,c.mode=M;break}if(c.flags=0,c.head&&(c.head.done=!1),!(1&c.wrap)||(((255&m)<<8)+(m>>8))%31){a.msg="incorrect header check",c.mode=ma;break}if((15&m)!==K){a.msg="unknown compression method",c.mode=ma;break}if(m>>>=4,n-=4,wa=(15&m)+8,0===c.wbits)c.wbits=wa;else if(wa>c.wbits){a.msg="invalid window size",c.mode=ma;break}c.dmax=1<<wa,a.adler=c.check=1,c.mode=512&m?U:W,m=0,n=0;break;case M:for(;n<16;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.flags=m,(255&c.flags)!==K){a.msg="unknown compression method",c.mode=ma;break}if(57344&c.flags){a.msg="unknown header flags set",c.mode=ma;break}c.head&&(c.head.text=m>>8&1),512&c.flags&&(Ba[0]=255&m,Ba[1]=m>>>8&255,c.check=u(c.check,Ba,2,0)),m=0,n=0,c.mode=N;case N:for(;n<32;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.time=m),512&c.flags&&(Ba[0]=255&m,Ba[1]=m>>>8&255,Ba[2]=m>>>16&255,Ba[3]=m>>>24&255,c.check=u(c.check,Ba,4,0)),m=0,n=0,c.mode=O;case O:for(;n<16;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.xflags=255&m,c.head.os=m>>8),512&c.flags&&(Ba[0]=255&m,Ba[1]=m>>>8&255,c.check=u(c.check,Ba,2,0)),m=0,n=0,c.mode=P;case P:if(1024&c.flags){for(;n<16;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length=m,c.head&&(c.head.extra_len=m),512&c.flags&&(Ba[0]=255&m,Ba[1]=m>>>8&255,c.check=u(c.check,Ba,2,0)),m=0,n=0}else c.head&&(c.head.extra=null);c.mode=Q;case Q:if(1024&c.flags&&(q=c.length,q>i&&(q=i),q&&(c.head&&(wa=c.head.extra_len-c.length,c.head.extra||(c.head.extra=new Array(c.head.extra_len)),s.arraySet(c.head.extra,e,g,q,wa)),512&c.flags&&(c.check=u(c.check,e,q,g)),i-=q,g+=q,c.length-=q),c.length))break a;c.length=0,c.mode=R;case R:if(2048&c.flags){if(0===i)break a;q=0;do wa=e[g+q++],c.head&&wa&&c.length<65536&&(c.head.name+=String.fromCharCode(wa));while(wa&&q<i);if(512&c.flags&&(c.check=u(c.check,e,q,g)),i-=q,g+=q,wa)break a}else c.head&&(c.head.name=null);c.length=0,c.mode=S;case S:if(4096&c.flags){if(0===i)break a;q=0;do wa=e[g+q++],c.head&&wa&&c.length<65536&&(c.head.comment+=String.fromCharCode(wa));while(wa&&q<i);if(512&c.flags&&(c.check=u(c.check,e,q,g)),i-=q,g+=q,wa)break a}else c.head&&(c.head.comment=null);c.mode=T;case T:if(512&c.flags){for(;n<16;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(65535&c.check)){a.msg="header crc mismatch",c.mode=ma;break}m=0,n=0}c.head&&(c.head.hcrc=c.flags>>9&1,c.head.done=!0),a.adler=c.check=0,c.mode=W;break;case U:for(;n<32;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}a.adler=c.check=d(m),m=0,n=0,c.mode=V;case V:if(0===c.havedict)return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,F;a.adler=c.check=1,c.mode=W;case W:if(b===B||b===C)break a;case X:if(c.last){m>>>=7&n,n-=7&n,c.mode=ja;break}for(;n<3;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}switch(c.last=1&m,m>>>=1,n-=1,3&m){case 0:c.mode=Y;break;case 1:if(k(c),c.mode=ca,b===C){m>>>=2,n-=2;break a}break;case 2:c.mode=_;break;case 3:a.msg="invalid block type",c.mode=ma}m>>>=2,n-=2;break;case Y:for(m>>>=7&n,n-=7&n;n<32;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if((65535&m)!==(m>>>16^65535)){a.msg="invalid stored block lengths",c.mode=ma;break}if(c.length=65535&m,m=0,n=0,c.mode=Z,b===C)break a;case Z:c.mode=$;case $:if(q=c.length){if(q>i&&(q=i),q>j&&(q=j),0===q)break a;s.arraySet(f,e,g,q,h),i-=q,g+=q,j-=q,h+=q,c.length-=q;break}c.mode=W;break;case _:for(;n<14;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.nlen=(31&m)+257,m>>>=5,n-=5,c.ndist=(31&m)+1,m>>>=5,n-=5,c.ncode=(15&m)+4,m>>>=4,n-=4,c.nlen>286||c.ndist>30){a.msg="too many length or distance symbols",c.mode=ma;break}c.have=0,c.mode=aa;case aa:for(;c.have<c.ncode;){for(;n<3;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.lens[Ca[c.have++]]=7&m,m>>>=3,n-=3}for(;c.have<19;)c.lens[Ca[c.have++]]=0;if(c.lencode=c.lendyn,c.lenbits=7,ya={bits:c.lenbits},xa=w(x,c.lens,0,19,c.lencode,0,c.work,ya),c.lenbits=ya.bits,xa){a.msg="invalid code lengths set",c.mode=ma;break}c.have=0,c.mode=ba;case ba:for(;c.have<c.nlen+c.ndist;){for(;Aa=c.lencode[m&(1<<c.lenbits)-1],qa=Aa>>>24,ra=Aa>>>16&255,sa=65535&Aa,!(qa<=n);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(sa<16)m>>>=qa,n-=qa,c.lens[c.have++]=sa;else{if(16===sa){for(za=qa+2;n<za;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m>>>=qa,n-=qa,0===c.have){a.msg="invalid bit length repeat",c.mode=ma;break}wa=c.lens[c.have-1],q=3+(3&m),m>>>=2,n-=2}else if(17===sa){for(za=qa+3;n<za;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qa,n-=qa,wa=0,q=3+(7&m),m>>>=3,n-=3}else{for(za=qa+7;n<za;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qa,n-=qa,wa=0,q=11+(127&m),m>>>=7,n-=7}if(c.have+q>c.nlen+c.ndist){a.msg="invalid bit length repeat",c.mode=ma;break}for(;q--;)c.lens[c.have++]=wa}}if(c.mode===ma)break;if(0===c.lens[256]){a.msg="invalid code -- missing end-of-block",c.mode=ma;break}if(c.lenbits=9,ya={bits:c.lenbits},xa=w(y,c.lens,0,c.nlen,c.lencode,0,c.work,ya),c.lenbits=ya.bits,xa){a.msg="invalid literal/lengths set",c.mode=ma;break}if(c.distbits=6,c.distcode=c.distdyn,ya={bits:c.distbits},xa=w(z,c.lens,c.nlen,c.ndist,c.distcode,0,c.work,ya),c.distbits=ya.bits,xa){a.msg="invalid distances set",c.mode=ma;break}if(c.mode=ca,b===C)break a;case ca:c.mode=da;case da:if(i>=6&&j>=258){a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,v(a,p),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,c.mode===W&&(c.back=-1);break}for(c.back=0;Aa=c.lencode[m&(1<<c.lenbits)-1],qa=Aa>>>24,ra=Aa>>>16&255,sa=65535&Aa,!(qa<=n);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(ra&&0===(240&ra)){for(ta=qa,ua=ra,va=sa;Aa=c.lencode[va+((m&(1<<ta+ua)-1)>>ta)],qa=Aa>>>24,ra=Aa>>>16&255,sa=65535&Aa,!(ta+qa<=n);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=ta,n-=ta,c.back+=ta}if(m>>>=qa,n-=qa,c.back+=qa,c.length=sa,0===ra){c.mode=ia;break}if(32&ra){c.back=-1,c.mode=W;break}if(64&ra){a.msg="invalid literal/length code",c.mode=ma;break}c.extra=15&ra,c.mode=ea;case ea:if(c.extra){for(za=c.extra;n<za;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}c.was=c.length,c.mode=fa;case fa:for(;Aa=c.distcode[m&(1<<c.distbits)-1],qa=Aa>>>24,ra=Aa>>>16&255,sa=65535&Aa,!(qa<=n);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(0===(240&ra)){for(ta=qa,ua=ra,va=sa;Aa=c.distcode[va+((m&(1<<ta+ua)-1)>>ta)],qa=Aa>>>24,ra=Aa>>>16&255,sa=65535&Aa,!(ta+qa<=n);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=ta,n-=ta,c.back+=ta}if(m>>>=qa,n-=qa,c.back+=qa,64&ra){a.msg="invalid distance code",c.mode=ma;break}c.offset=sa,c.extra=15&ra,c.mode=ga;case ga:if(c.extra){for(za=c.extra;n<za;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.offset+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}if(c.offset>c.dmax){a.msg="invalid distance too far back",c.mode=ma;break}c.mode=ha;case ha:if(0===j)break a;if(q=p-j,c.offset>q){if(q=c.offset-q,q>c.whave&&c.sane){a.msg="invalid distance too far back",c.mode=ma;break}q>c.wnext?(q-=c.wnext,r=c.wsize-q):r=c.wnext-q,q>c.length&&(q=c.length),pa=c.window}else pa=f,r=h-c.offset,q=c.length;q>j&&(q=j),j-=q,c.length-=q;do f[h++]=pa[r++];while(--q);0===c.length&&(c.mode=da);break;case ia:if(0===j)break a;f[h++]=c.length,j--,c.mode=da;break;case ja:if(c.wrap){for(;n<32;){if(0===i)break a;i--,m|=e[g++]<<n,n+=8}if(p-=j,a.total_out+=p,c.total+=p,p&&(a.adler=c.check=c.flags?u(c.check,f,p,h-p):t(c.check,f,p,h-p)),p=j,(c.flags?m:d(m))!==c.check){a.msg="incorrect data check",c.mode=ma;break}m=0,n=0}c.mode=ka;case ka:if(c.wrap&&c.flags){for(;n<32;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(4294967295&c.total)){a.msg="incorrect length check",c.mode=ma;break}m=0,n=0}c.mode=la;case la:xa=E;break a;case ma:xa=H;break a;case na:return I;case oa:default:return G}return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,(c.wsize||p!==a.avail_out&&c.mode<ma&&(c.mode<ja||b!==A))&&l(a,a.output,a.next_out,p-a.avail_out)?(c.mode=na,I):(o-=a.avail_in,p-=a.avail_out,a.total_in+=o,a.total_out+=p,c.total+=p,c.wrap&&p&&(a.adler=c.check=c.flags?u(c.check,f,p,a.next_out-p):t(c.check,f,p,a.next_out-p)),a.data_type=c.bits+(c.last?64:0)+(c.mode===W?128:0)+(c.mode===ca||c.mode===Z?256:0),(0===o&&0===p||b===A)&&xa===D&&(xa=J),xa)}function n(a){if(!a||!a.state)return G;var b=a.state;return b.window&&(b.window=null),a.state=null,D}function o(a,b){var c;return a&&a.state?(c=a.state,0===(2&c.wrap)?G:(c.head=b,b.done=!1,D)):G}function p(a,b){var c,d,e,f=b.length;return a&&a.state?(c=a.state,0!==c.wrap&&c.mode!==V?G:c.mode===V&&(d=1,d=t(d,b,f,0),d!==c.check)?H:(e=l(a,b,f,f))?(c.mode=na,I):(c.havedict=1,D)):G}var q,r,s=a("../utils/common"),t=a("./adler32"),u=a("./crc32"),v=a("./inffast"),w=a("./inftrees"),x=0,y=1,z=2,A=4,B=5,C=6,D=0,E=1,F=2,G=-2,H=-3,I=-4,J=-5,K=8,L=1,M=2,N=3,O=4,P=5,Q=6,R=7,S=8,T=9,U=10,V=11,W=12,X=13,Y=14,Z=15,$=16,_=17,aa=18,ba=19,ca=20,da=21,ea=22,fa=23,ga=24,ha=25,ia=26,ja=27,ka=28,la=29,ma=30,na=31,oa=32,pa=852,qa=592,ra=15,sa=ra,ta=!0;c.inflateReset=g,c.inflateReset2=h,c.inflateResetKeep=f,c.inflateInit=j,c.inflateInit2=i,c.inflate=m,c.inflateEnd=n,c.inflateGetHeader=o,c.inflateSetDictionary=p,c.inflateInfo="pako inflate (from Nodeca project)"},{"../utils/common":62,"./adler32":64,"./crc32":66,"./inffast":69,"./inftrees":71}],71:[function(a,b,c){"use strict";var d=a("../utils/common"),e=15,f=852,g=592,h=0,i=1,j=2,k=[3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,0,0],l=[16,16,16,16,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19,20,20,20,20,21,21,21,21,16,72,78],m=[1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0],n=[16,16,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,64,64];b.exports=function(a,b,c,o,p,q,r,s){var t,u,v,w,x,y,z,A,B,C=s.bits,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=null,O=0,P=new d.Buf16(e+1),Q=new d.Buf16(e+1),R=null,S=0;for(D=0;D<=e;D++)P[D]=0;for(E=0;E<o;E++)P[b[c+E]]++;for(H=C,G=e;G>=1&&0===P[G];G--);if(H>G&&(H=G),0===G)return p[q++]=20971520,p[q++]=20971520,s.bits=1,0;for(F=1;F<G&&0===P[F];F++);for(H<F&&(H=F),K=1,D=1;D<=e;D++)if(K<<=1,K-=P[D],K<0)return-1;if(K>0&&(a===h||1!==G))return-1;for(Q[1]=0,D=1;D<e;D++)Q[D+1]=Q[D]+P[D];for(E=0;E<o;E++)0!==b[c+E]&&(r[Q[b[c+E]]++]=E);if(a===h?(N=R=r,y=19):a===i?(N=k,O-=257,R=l,S-=257,y=256):(N=m,R=n,y=-1),M=0,E=0,D=F,x=q,I=H,J=0,v=-1,L=1<<H,w=L-1,a===i&&L>f||a===j&&L>g)return 1;for(;;){z=D-J,r[E]<y?(A=0,B=r[E]):r[E]>y?(A=R[S+r[E]],B=N[O+r[E]]):(A=96,B=0),t=1<<D-J,u=1<<I,F=u;do u-=t,p[x+(M>>J)+u]=z<<24|A<<16|B|0;while(0!==u);for(t=1<<D-1;M&t;)t>>=1;if(0!==t?(M&=t-1,M+=t):M=0,E++,0===--P[D]){if(D===G)break;D=b[c+r[E]]}if(D>H&&(M&w)!==v){for(0===J&&(J=H),x+=F,I=D-J,K=1<<I;I+J<G&&(K-=P[I+J],!(K<=0));)I++,K<<=1;if(L+=1<<I,a===i&&L>f||a===j&&L>g)return 1;v=M&w,p[v]=H<<24|I<<16|x-q|0}}return 0!==M&&(p[x+M]=D-J<<24|64<<16|0),s.bits=H,0}},{"../utils/common":62}],72:[function(a,b,c){"use strict";b.exports={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"}},{}],73:[function(a,b,c){"use strict";function d(a){for(var b=a.length;--b>=0;)a[b]=0}function e(a,b,c,d,e){this.static_tree=a,this.extra_bits=b,this.extra_base=c,this.elems=d,this.max_length=e,this.has_stree=a&&a.length}function f(a,b){this.dyn_tree=a,this.max_code=0,this.stat_desc=b}function g(a){return a<256?ia[a]:ia[256+(a>>>7)]}function h(a,b){a.pending_buf[a.pending++]=255&b,a.pending_buf[a.pending++]=b>>>8&255}function i(a,b,c){a.bi_valid>X-c?(a.bi_buf|=b<<a.bi_valid&65535,h(a,a.bi_buf),a.bi_buf=b>>X-a.bi_valid,a.bi_valid+=c-X):(a.bi_buf|=b<<a.bi_valid&65535,a.bi_valid+=c)}function j(a,b,c){i(a,c[2*b],c[2*b+1])}function k(a,b){var c=0;do c|=1&a,a>>>=1,c<<=1;while(--b>0);return c>>>1}function l(a){16===a.bi_valid?(h(a,a.bi_buf),a.bi_buf=0,a.bi_valid=0):a.bi_valid>=8&&(a.pending_buf[a.pending++]=255&a.bi_buf,a.bi_buf>>=8,a.bi_valid-=8)}function m(a,b){var c,d,e,f,g,h,i=b.dyn_tree,j=b.max_code,k=b.stat_desc.static_tree,l=b.stat_desc.has_stree,m=b.stat_desc.extra_bits,n=b.stat_desc.extra_base,o=b.stat_desc.max_length,p=0;for(f=0;f<=W;f++)a.bl_count[f]=0;for(i[2*a.heap[a.heap_max]+1]=0,
c=a.heap_max+1;c<V;c++)d=a.heap[c],f=i[2*i[2*d+1]+1]+1,f>o&&(f=o,p++),i[2*d+1]=f,d>j||(a.bl_count[f]++,g=0,d>=n&&(g=m[d-n]),h=i[2*d],a.opt_len+=h*(f+g),l&&(a.static_len+=h*(k[2*d+1]+g)));if(0!==p){do{for(f=o-1;0===a.bl_count[f];)f--;a.bl_count[f]--,a.bl_count[f+1]+=2,a.bl_count[o]--,p-=2}while(p>0);for(f=o;0!==f;f--)for(d=a.bl_count[f];0!==d;)e=a.heap[--c],e>j||(i[2*e+1]!==f&&(a.opt_len+=(f-i[2*e+1])*i[2*e],i[2*e+1]=f),d--)}}function n(a,b,c){var d,e,f=new Array(W+1),g=0;for(d=1;d<=W;d++)f[d]=g=g+c[d-1]<<1;for(e=0;e<=b;e++){var h=a[2*e+1];0!==h&&(a[2*e]=k(f[h]++,h))}}function o(){var a,b,c,d,f,g=new Array(W+1);for(c=0,d=0;d<Q-1;d++)for(ka[d]=c,a=0;a<1<<ba[d];a++)ja[c++]=d;for(ja[c-1]=d,f=0,d=0;d<16;d++)for(la[d]=f,a=0;a<1<<ca[d];a++)ia[f++]=d;for(f>>=7;d<T;d++)for(la[d]=f<<7,a=0;a<1<<ca[d]-7;a++)ia[256+f++]=d;for(b=0;b<=W;b++)g[b]=0;for(a=0;a<=143;)ga[2*a+1]=8,a++,g[8]++;for(;a<=255;)ga[2*a+1]=9,a++,g[9]++;for(;a<=279;)ga[2*a+1]=7,a++,g[7]++;for(;a<=287;)ga[2*a+1]=8,a++,g[8]++;for(n(ga,S+1,g),a=0;a<T;a++)ha[2*a+1]=5,ha[2*a]=k(a,5);ma=new e(ga,ba,R+1,S,W),na=new e(ha,ca,0,T,W),oa=new e(new Array(0),da,0,U,Y)}function p(a){var b;for(b=0;b<S;b++)a.dyn_ltree[2*b]=0;for(b=0;b<T;b++)a.dyn_dtree[2*b]=0;for(b=0;b<U;b++)a.bl_tree[2*b]=0;a.dyn_ltree[2*Z]=1,a.opt_len=a.static_len=0,a.last_lit=a.matches=0}function q(a){a.bi_valid>8?h(a,a.bi_buf):a.bi_valid>0&&(a.pending_buf[a.pending++]=a.bi_buf),a.bi_buf=0,a.bi_valid=0}function r(a,b,c,d){q(a),d&&(h(a,c),h(a,~c)),G.arraySet(a.pending_buf,a.window,b,c,a.pending),a.pending+=c}function s(a,b,c,d){var e=2*b,f=2*c;return a[e]<a[f]||a[e]===a[f]&&d[b]<=d[c]}function t(a,b,c){for(var d=a.heap[c],e=c<<1;e<=a.heap_len&&(e<a.heap_len&&s(b,a.heap[e+1],a.heap[e],a.depth)&&e++,!s(b,d,a.heap[e],a.depth));)a.heap[c]=a.heap[e],c=e,e<<=1;a.heap[c]=d}function u(a,b,c){var d,e,f,h,k=0;if(0!==a.last_lit)do d=a.pending_buf[a.d_buf+2*k]<<8|a.pending_buf[a.d_buf+2*k+1],e=a.pending_buf[a.l_buf+k],k++,0===d?j(a,e,b):(f=ja[e],j(a,f+R+1,b),h=ba[f],0!==h&&(e-=ka[f],i(a,e,h)),d--,f=g(d),j(a,f,c),h=ca[f],0!==h&&(d-=la[f],i(a,d,h)));while(k<a.last_lit);j(a,Z,b)}function v(a,b){var c,d,e,f=b.dyn_tree,g=b.stat_desc.static_tree,h=b.stat_desc.has_stree,i=b.stat_desc.elems,j=-1;for(a.heap_len=0,a.heap_max=V,c=0;c<i;c++)0!==f[2*c]?(a.heap[++a.heap_len]=j=c,a.depth[c]=0):f[2*c+1]=0;for(;a.heap_len<2;)e=a.heap[++a.heap_len]=j<2?++j:0,f[2*e]=1,a.depth[e]=0,a.opt_len--,h&&(a.static_len-=g[2*e+1]);for(b.max_code=j,c=a.heap_len>>1;c>=1;c--)t(a,f,c);e=i;do c=a.heap[1],a.heap[1]=a.heap[a.heap_len--],t(a,f,1),d=a.heap[1],a.heap[--a.heap_max]=c,a.heap[--a.heap_max]=d,f[2*e]=f[2*c]+f[2*d],a.depth[e]=(a.depth[c]>=a.depth[d]?a.depth[c]:a.depth[d])+1,f[2*c+1]=f[2*d+1]=e,a.heap[1]=e++,t(a,f,1);while(a.heap_len>=2);a.heap[--a.heap_max]=a.heap[1],m(a,b),n(f,j,a.bl_count)}function w(a,b,c){var d,e,f=-1,g=b[1],h=0,i=7,j=4;for(0===g&&(i=138,j=3),b[2*(c+1)+1]=65535,d=0;d<=c;d++)e=g,g=b[2*(d+1)+1],++h<i&&e===g||(h<j?a.bl_tree[2*e]+=h:0!==e?(e!==f&&a.bl_tree[2*e]++,a.bl_tree[2*$]++):h<=10?a.bl_tree[2*_]++:a.bl_tree[2*aa]++,h=0,f=e,0===g?(i=138,j=3):e===g?(i=6,j=3):(i=7,j=4))}function x(a,b,c){var d,e,f=-1,g=b[1],h=0,k=7,l=4;for(0===g&&(k=138,l=3),d=0;d<=c;d++)if(e=g,g=b[2*(d+1)+1],!(++h<k&&e===g)){if(h<l){do j(a,e,a.bl_tree);while(0!==--h)}else 0!==e?(e!==f&&(j(a,e,a.bl_tree),h--),j(a,$,a.bl_tree),i(a,h-3,2)):h<=10?(j(a,_,a.bl_tree),i(a,h-3,3)):(j(a,aa,a.bl_tree),i(a,h-11,7));h=0,f=e,0===g?(k=138,l=3):e===g?(k=6,l=3):(k=7,l=4)}}function y(a){var b;for(w(a,a.dyn_ltree,a.l_desc.max_code),w(a,a.dyn_dtree,a.d_desc.max_code),v(a,a.bl_desc),b=U-1;b>=3&&0===a.bl_tree[2*ea[b]+1];b--);return a.opt_len+=3*(b+1)+5+5+4,b}function z(a,b,c,d){var e;for(i(a,b-257,5),i(a,c-1,5),i(a,d-4,4),e=0;e<d;e++)i(a,a.bl_tree[2*ea[e]+1],3);x(a,a.dyn_ltree,b-1),x(a,a.dyn_dtree,c-1)}function A(a){var b,c=4093624447;for(b=0;b<=31;b++,c>>>=1)if(1&c&&0!==a.dyn_ltree[2*b])return I;if(0!==a.dyn_ltree[18]||0!==a.dyn_ltree[20]||0!==a.dyn_ltree[26])return J;for(b=32;b<R;b++)if(0!==a.dyn_ltree[2*b])return J;return I}function B(a){pa||(o(),pa=!0),a.l_desc=new f(a.dyn_ltree,ma),a.d_desc=new f(a.dyn_dtree,na),a.bl_desc=new f(a.bl_tree,oa),a.bi_buf=0,a.bi_valid=0,p(a)}function C(a,b,c,d){i(a,(L<<1)+(d?1:0),3),r(a,b,c,!0)}function D(a){i(a,M<<1,3),j(a,Z,ga),l(a)}function E(a,b,c,d){var e,f,g=0;a.level>0?(a.strm.data_type===K&&(a.strm.data_type=A(a)),v(a,a.l_desc),v(a,a.d_desc),g=y(a),e=a.opt_len+3+7>>>3,f=a.static_len+3+7>>>3,f<=e&&(e=f)):e=f=c+5,c+4<=e&&b!==-1?C(a,b,c,d):a.strategy===H||f===e?(i(a,(M<<1)+(d?1:0),3),u(a,ga,ha)):(i(a,(N<<1)+(d?1:0),3),z(a,a.l_desc.max_code+1,a.d_desc.max_code+1,g+1),u(a,a.dyn_ltree,a.dyn_dtree)),p(a),d&&q(a)}function F(a,b,c){return a.pending_buf[a.d_buf+2*a.last_lit]=b>>>8&255,a.pending_buf[a.d_buf+2*a.last_lit+1]=255&b,a.pending_buf[a.l_buf+a.last_lit]=255&c,a.last_lit++,0===b?a.dyn_ltree[2*c]++:(a.matches++,b--,a.dyn_ltree[2*(ja[c]+R+1)]++,a.dyn_dtree[2*g(b)]++),a.last_lit===a.lit_bufsize-1}var G=a("../utils/common"),H=4,I=0,J=1,K=2,L=0,M=1,N=2,O=3,P=258,Q=29,R=256,S=R+1+Q,T=30,U=19,V=2*S+1,W=15,X=16,Y=7,Z=256,$=16,_=17,aa=18,ba=[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0],ca=[0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13],da=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7],ea=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15],fa=512,ga=new Array(2*(S+2));d(ga);var ha=new Array(2*T);d(ha);var ia=new Array(fa);d(ia);var ja=new Array(P-O+1);d(ja);var ka=new Array(Q);d(ka);var la=new Array(T);d(la);var ma,na,oa,pa=!1;c._tr_init=B,c._tr_stored_block=C,c._tr_flush_block=E,c._tr_tally=F,c._tr_align=D},{"../utils/common":62}],74:[function(a,b,c){"use strict";function d(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}b.exports=d},{}]},{},[10])(10)}); | zikongli-jingdian-taozhuang-x3 | /zikongli-jingdian-taozhuang-x3-2022.10.15.0.tar.gz/zikongli-jingdian-taozhuang-x3-2022.10.15.0/ZikongliJingdianTaozhuangX3/js/libs/zip.min.js | zip.min.js |
(function () {
'use strict';
var isCommonjs = typeof module !== 'undefined' && module.exports;
var keyboardAllowed = typeof Element !== 'undefined' && 'ALLOW_KEYBOARD_INPUT' in Element;
var fn = (function () {
var val;
var valLength;
var fnMap = [
[
'requestFullscreen',
'exitFullscreen',
'fullscreenElement',
'fullscreenEnabled',
'fullscreenchange',
'fullscreenerror'
],
// new WebKit
[
'webkitRequestFullscreen',
'webkitExitFullscreen',
'webkitFullscreenElement',
'webkitFullscreenEnabled',
'webkitfullscreenchange',
'webkitfullscreenerror'
],
// old WebKit (Safari 5.1)
[
'webkitRequestFullScreen',
'webkitCancelFullScreen',
'webkitCurrentFullScreenElement',
'webkitCancelFullScreen',
'webkitfullscreenchange',
'webkitfullscreenerror'
],
[
'mozRequestFullScreen',
'mozCancelFullScreen',
'mozFullScreenElement',
'mozFullScreenEnabled',
'mozfullscreenchange',
'mozfullscreenerror'
],
[
'msRequestFullscreen',
'msExitFullscreen',
'msFullscreenElement',
'msFullscreenEnabled',
'MSFullscreenChange',
'MSFullscreenError'
]
];
var i = 0;
var l = fnMap.length;
var ret = {};
for (; i < l; i++) {
val = fnMap[i];
if (val && val[1] in document) {
for (i = 0, valLength = val.length; i < valLength; i++) {
ret[fnMap[0][i]] = val[i];
}
return ret;
}
}
return false;
})();
var screenfull = {
request: function (elem) {
var request = fn.requestFullscreen;
elem = elem || document.documentElement;
// Work around Safari 5.1 bug: reports support for
// keyboard in fullscreen even though it doesn't.
// Browser sniffing, since the alternative with
// setTimeout is even worse.
if (/5\.1[\.\d]* Safari/.test(navigator.userAgent)) {
elem[request]();
} else {
elem[request](keyboardAllowed && Element.ALLOW_KEYBOARD_INPUT);
}
},
exit: function () {
document[fn.exitFullscreen]();
},
toggle: function (elem) {
if (this.isFullscreen) {
this.exit();
} else {
this.request(elem);
}
},
raw: fn
};
if (!fn) {
if (isCommonjs) {
module.exports = false;
} else {
window.screenfull = false;
}
return;
}
Object.defineProperties(screenfull, {
isFullscreen: {
get: function () {
return !!document[fn.fullscreenElement];
}
},
element: {
enumerable: true,
get: function () {
return document[fn.fullscreenElement];
}
},
enabled: {
enumerable: true,
get: function () {
// Coerce to boolean in case of old WebKit
return !!document[fn.fullscreenEnabled];
}
}
});
if (isCommonjs) {
module.exports = screenfull;
} else {
window.screenfull = screenfull;
}
})(); | zikongli-jingdian-taozhuang-x3 | /zikongli-jingdian-taozhuang-x3-2022.10.15.0.tar.gz/zikongli-jingdian-taozhuang-x3-2022.10.15.0/ZikongliJingdianTaozhuangX3/js/libs/screenfull.js | screenfull.js |
!function(a){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=a();else if("function"==typeof define&&define.amd)define([],a);else{var b;b="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this,b.localforage=a()}}(function(){return function a(b,c,d){function e(g,h){if(!c[g]){if(!b[g]){var i="function"==typeof require&&require;if(!h&&i)return i(g,!0);if(f)return f(g,!0);var j=new Error("Cannot find module '"+g+"'");throw j.code="MODULE_NOT_FOUND",j}var k=c[g]={exports:{}};b[g][0].call(k.exports,function(a){var c=b[g][1][a];return e(c?c:a)},k,k.exports,a,b,c,d)}return c[g].exports}for(var f="function"==typeof require&&require,g=0;g<d.length;g++)e(d[g]);return e}({1:[function(a,b,c){(function(a){"use strict";function c(){k=!0;for(var a,b,c=l.length;c;){for(b=l,l=[],a=-1;++a<c;)b[a]();c=l.length}k=!1}function d(a){1!==l.push(a)||k||e()}var e,f=a.MutationObserver||a.WebKitMutationObserver;if(f){var g=0,h=new f(c),i=a.document.createTextNode("");h.observe(i,{characterData:!0}),e=function(){i.data=g=++g%2}}else if(a.setImmediate||"undefined"==typeof a.MessageChannel)e="document"in a&&"onreadystatechange"in a.document.createElement("script")?function(){var b=a.document.createElement("script");b.onreadystatechange=function(){c(),b.onreadystatechange=null,b.parentNode.removeChild(b),b=null},a.document.documentElement.appendChild(b)}:function(){setTimeout(c,0)};else{var j=new a.MessageChannel;j.port1.onmessage=c,e=function(){j.port2.postMessage(0)}}var k,l=[];b.exports=d}).call(this,"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{}],2:[function(a,b,c){"use strict";function d(){}function e(a){if("function"!=typeof a)throw new TypeError("resolver must be a function");this.state=s,this.queue=[],this.outcome=void 0,a!==d&&i(this,a)}function f(a,b,c){this.promise=a,"function"==typeof b&&(this.onFulfilled=b,this.callFulfilled=this.otherCallFulfilled),"function"==typeof c&&(this.onRejected=c,this.callRejected=this.otherCallRejected)}function g(a,b,c){o(function(){var d;try{d=b(c)}catch(b){return p.reject(a,b)}d===a?p.reject(a,new TypeError("Cannot resolve promise with itself")):p.resolve(a,d)})}function h(a){var b=a&&a.then;if(a&&"object"==typeof a&&"function"==typeof b)return function(){b.apply(a,arguments)}}function i(a,b){function c(b){f||(f=!0,p.reject(a,b))}function d(b){f||(f=!0,p.resolve(a,b))}function e(){b(d,c)}var f=!1,g=j(e);"error"===g.status&&c(g.value)}function j(a,b){var c={};try{c.value=a(b),c.status="success"}catch(a){c.status="error",c.value=a}return c}function k(a){return a instanceof this?a:p.resolve(new this(d),a)}function l(a){var b=new this(d);return p.reject(b,a)}function m(a){function b(a,b){function d(a){g[b]=a,++h!==e||f||(f=!0,p.resolve(j,g))}c.resolve(a).then(d,function(a){f||(f=!0,p.reject(j,a))})}var c=this;if("[object Array]"!==Object.prototype.toString.call(a))return this.reject(new TypeError("must be an array"));var e=a.length,f=!1;if(!e)return this.resolve([]);for(var g=new Array(e),h=0,i=-1,j=new this(d);++i<e;)b(a[i],i);return j}function n(a){function b(a){c.resolve(a).then(function(a){f||(f=!0,p.resolve(h,a))},function(a){f||(f=!0,p.reject(h,a))})}var c=this;if("[object Array]"!==Object.prototype.toString.call(a))return this.reject(new TypeError("must be an array"));var e=a.length,f=!1;if(!e)return this.resolve([]);for(var g=-1,h=new this(d);++g<e;)b(a[g]);return h}var o=a(1),p={},q=["REJECTED"],r=["FULFILLED"],s=["PENDING"];b.exports=c=e,e.prototype.catch=function(a){return this.then(null,a)},e.prototype.then=function(a,b){if("function"!=typeof a&&this.state===r||"function"!=typeof b&&this.state===q)return this;var c=new this.constructor(d);if(this.state!==s){var e=this.state===r?a:b;g(c,e,this.outcome)}else this.queue.push(new f(c,a,b));return c},f.prototype.callFulfilled=function(a){p.resolve(this.promise,a)},f.prototype.otherCallFulfilled=function(a){g(this.promise,this.onFulfilled,a)},f.prototype.callRejected=function(a){p.reject(this.promise,a)},f.prototype.otherCallRejected=function(a){g(this.promise,this.onRejected,a)},p.resolve=function(a,b){var c=j(h,b);if("error"===c.status)return p.reject(a,c.value);var d=c.value;if(d)i(a,d);else{a.state=r,a.outcome=b;for(var e=-1,f=a.queue.length;++e<f;)a.queue[e].callFulfilled(b)}return a},p.reject=function(a,b){a.state=q,a.outcome=b;for(var c=-1,d=a.queue.length;++c<d;)a.queue[c].callRejected(b);return a},c.resolve=k,c.reject=l,c.all=m,c.race=n},{1:1}],3:[function(a,b,c){(function(b){"use strict";"function"!=typeof b.Promise&&(b.Promise=a(2))}).call(this,"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{2:2}],4:[function(a,b,c){"use strict";function d(a,b){if(!(a instanceof b))throw new TypeError("Cannot call a class as a function")}function e(){try{if("undefined"!=typeof indexedDB)return indexedDB;if("undefined"!=typeof webkitIndexedDB)return webkitIndexedDB;if("undefined"!=typeof mozIndexedDB)return mozIndexedDB;if("undefined"!=typeof OIndexedDB)return OIndexedDB;if("undefined"!=typeof msIndexedDB)return msIndexedDB}catch(a){}}function f(){try{if(!ga)return!1;var a="undefined"!=typeof openDatabase&&/(Safari|iPhone|iPad|iPod)/.test(navigator.userAgent)&&!/Chrome/.test(navigator.userAgent)&&!/BlackBerry/.test(navigator.platform),b="function"==typeof fetch&&fetch.toString().indexOf("[native code")!==-1;return(!a||b)&&"undefined"!=typeof indexedDB&&"undefined"!=typeof IDBKeyRange}catch(a){return!1}}function g(){return"function"==typeof openDatabase}function h(){try{return"undefined"!=typeof localStorage&&"setItem"in localStorage&&localStorage.setItem}catch(a){return!1}}function i(a,b){a=a||[],b=b||{};try{return new Blob(a,b)}catch(f){if("TypeError"!==f.name)throw f;for(var c="undefined"!=typeof BlobBuilder?BlobBuilder:"undefined"!=typeof MSBlobBuilder?MSBlobBuilder:"undefined"!=typeof MozBlobBuilder?MozBlobBuilder:WebKitBlobBuilder,d=new c,e=0;e<a.length;e+=1)d.append(a[e]);return d.getBlob(b.type)}}function j(a,b){b&&a.then(function(a){b(null,a)},function(a){b(a)})}function k(a,b,c){"function"==typeof b&&a.then(b),"function"==typeof c&&a.catch(c)}function l(a){for(var b=a.length,c=new ArrayBuffer(b),d=new Uint8Array(c),e=0;e<b;e++)d[e]=a.charCodeAt(e);return c}function m(a){return new ja(function(b){var c=a.transaction(ka,"readwrite"),d=i([""]);c.objectStore(ka).put(d,"key"),c.onabort=function(a){a.preventDefault(),a.stopPropagation(),b(!1)},c.oncomplete=function(){var a=navigator.userAgent.match(/Chrome\/(\d+)/),c=navigator.userAgent.match(/Edge\//);b(c||!a||parseInt(a[1],10)>=43)}}).catch(function(){return!1})}function n(a){return"boolean"==typeof ha?ja.resolve(ha):m(a).then(function(a){return ha=a})}function o(a){var b=ia[a.name],c={};c.promise=new ja(function(a){c.resolve=a}),b.deferredOperations.push(c),b.dbReady?b.dbReady=b.dbReady.then(function(){return c.promise}):b.dbReady=c.promise}function p(a){var b=ia[a.name],c=b.deferredOperations.pop();c&&c.resolve()}function q(a,b){return new ja(function(c,d){if(a.db){if(!b)return c(a.db);o(a),a.db.close()}var e=[a.name];b&&e.push(a.version);var f=ga.open.apply(ga,e);b&&(f.onupgradeneeded=function(b){var c=f.result;try{c.createObjectStore(a.storeName),b.oldVersion<=1&&c.createObjectStore(ka)}catch(c){if("ConstraintError"!==c.name)throw c;console.warn('The database "'+a.name+'" has been upgraded from version '+b.oldVersion+" to version "+b.newVersion+', but the storage "'+a.storeName+'" already exists.')}}),f.onerror=function(a){a.preventDefault(),d(f.error)},f.onsuccess=function(){c(f.result),p(a)}})}function r(a){return q(a,!1)}function s(a){return q(a,!0)}function t(a,b){if(!a.db)return!0;var c=!a.db.objectStoreNames.contains(a.storeName),d=a.version<a.db.version,e=a.version>a.db.version;if(d&&(a.version!==b&&console.warn('The database "'+a.name+"\" can't be downgraded from version "+a.db.version+" to version "+a.version+"."),a.version=a.db.version),e||c){if(c){var f=a.db.version+1;f>a.version&&(a.version=f)}return!0}return!1}function u(a){return new ja(function(b,c){var d=new FileReader;d.onerror=c,d.onloadend=function(c){var d=btoa(c.target.result||"");b({__local_forage_encoded_blob:!0,data:d,type:a.type})},d.readAsBinaryString(a)})}function v(a){var b=l(atob(a.data));return i([b],{type:a.type})}function w(a){return a&&a.__local_forage_encoded_blob}function x(a){var b=this,c=b._initReady().then(function(){var a=ia[b._dbInfo.name];if(a&&a.dbReady)return a.dbReady});return k(c,a,a),c}function y(a){function b(){return ja.resolve()}var c=this,d={db:null};if(a)for(var e in a)d[e]=a[e];ia||(ia={});var f=ia[d.name];f||(f={forages:[],db:null,dbReady:null,deferredOperations:[]},ia[d.name]=f),f.forages.push(c),c._initReady||(c._initReady=c.ready,c.ready=x);for(var g=[],h=0;h<f.forages.length;h++){var i=f.forages[h];i!==c&&g.push(i._initReady().catch(b))}var j=f.forages.slice(0);return ja.all(g).then(function(){return d.db=f.db,r(d)}).then(function(a){return d.db=a,t(d,c._defaultConfig.version)?s(d):a}).then(function(a){d.db=f.db=a,c._dbInfo=d;for(var b=0;b<j.length;b++){var e=j[b];e!==c&&(e._dbInfo.db=d.db,e._dbInfo.version=d.version)}})}function z(a,b){var c=this;"string"!=typeof a&&(console.warn(a+" used as a key, but it is not a string."),a=String(a));var d=new ja(function(b,d){c.ready().then(function(){var e=c._dbInfo,f=e.db.transaction(e.storeName,"readonly").objectStore(e.storeName),g=f.get(a);g.onsuccess=function(){var a=g.result;void 0===a&&(a=null),w(a)&&(a=v(a)),b(a)},g.onerror=function(){d(g.error)}}).catch(d)});return j(d,b),d}function A(a,b){var c=this,d=new ja(function(b,d){c.ready().then(function(){var e=c._dbInfo,f=e.db.transaction(e.storeName,"readonly").objectStore(e.storeName),g=f.openCursor(),h=1;g.onsuccess=function(){var c=g.result;if(c){var d=c.value;w(d)&&(d=v(d));var e=a(d,c.key,h++);void 0!==e?b(e):c.continue()}else b()},g.onerror=function(){d(g.error)}}).catch(d)});return j(d,b),d}function B(a,b,c){var d=this;"string"!=typeof a&&(console.warn(a+" used as a key, but it is not a string."),a=String(a));var e=new ja(function(c,e){var f;d.ready().then(function(){return f=d._dbInfo,"[object Blob]"===la.call(b)?n(f.db).then(function(a){return a?b:u(b)}):b}).then(function(b){var d=f.db.transaction(f.storeName,"readwrite"),g=d.objectStore(f.storeName),h=g.put(b,a);null===b&&(b=void 0),d.oncomplete=function(){void 0===b&&(b=null),c(b)},d.onabort=d.onerror=function(){var a=h.error?h.error:h.transaction.error;e(a)}}).catch(e)});return j(e,c),e}function C(a,b){var c=this;"string"!=typeof a&&(console.warn(a+" used as a key, but it is not a string."),a=String(a));var d=new ja(function(b,d){c.ready().then(function(){var e=c._dbInfo,f=e.db.transaction(e.storeName,"readwrite"),g=f.objectStore(e.storeName),h=g.delete(a);f.oncomplete=function(){b()},f.onerror=function(){d(h.error)},f.onabort=function(){var a=h.error?h.error:h.transaction.error;d(a)}}).catch(d)});return j(d,b),d}function D(a){var b=this,c=new ja(function(a,c){b.ready().then(function(){var d=b._dbInfo,e=d.db.transaction(d.storeName,"readwrite"),f=e.objectStore(d.storeName),g=f.clear();e.oncomplete=function(){a()},e.onabort=e.onerror=function(){var a=g.error?g.error:g.transaction.error;c(a)}}).catch(c)});return j(c,a),c}function E(a){var b=this,c=new ja(function(a,c){b.ready().then(function(){var d=b._dbInfo,e=d.db.transaction(d.storeName,"readonly").objectStore(d.storeName),f=e.count();f.onsuccess=function(){a(f.result)},f.onerror=function(){c(f.error)}}).catch(c)});return j(c,a),c}function F(a,b){var c=this,d=new ja(function(b,d){return a<0?void b(null):void c.ready().then(function(){var e=c._dbInfo,f=e.db.transaction(e.storeName,"readonly").objectStore(e.storeName),g=!1,h=f.openCursor();h.onsuccess=function(){var c=h.result;return c?void(0===a?b(c.key):g?b(c.key):(g=!0,c.advance(a))):void b(null)},h.onerror=function(){d(h.error)}}).catch(d)});return j(d,b),d}function G(a){var b=this,c=new ja(function(a,c){b.ready().then(function(){var d=b._dbInfo,e=d.db.transaction(d.storeName,"readonly").objectStore(d.storeName),f=e.openCursor(),g=[];f.onsuccess=function(){var b=f.result;return b?(g.push(b.key),void b.continue()):void a(g)},f.onerror=function(){c(f.error)}}).catch(c)});return j(c,a),c}function H(a){var b,c,d,e,f,g=.75*a.length,h=a.length,i=0;"="===a[a.length-1]&&(g--,"="===a[a.length-2]&&g--);var j=new ArrayBuffer(g),k=new Uint8Array(j);for(b=0;b<h;b+=4)c=na.indexOf(a[b]),d=na.indexOf(a[b+1]),e=na.indexOf(a[b+2]),f=na.indexOf(a[b+3]),k[i++]=c<<2|d>>4,k[i++]=(15&d)<<4|e>>2,k[i++]=(3&e)<<6|63&f;return j}function I(a){var b,c=new Uint8Array(a),d="";for(b=0;b<c.length;b+=3)d+=na[c[b]>>2],d+=na[(3&c[b])<<4|c[b+1]>>4],d+=na[(15&c[b+1])<<2|c[b+2]>>6],d+=na[63&c[b+2]];return c.length%3===2?d=d.substring(0,d.length-1)+"=":c.length%3===1&&(d=d.substring(0,d.length-2)+"=="),d}function J(a,b){var c="";if(a&&(c=Ea.call(a)),a&&("[object ArrayBuffer]"===c||a.buffer&&"[object ArrayBuffer]"===Ea.call(a.buffer))){var d,e=qa;a instanceof ArrayBuffer?(d=a,e+=sa):(d=a.buffer,"[object Int8Array]"===c?e+=ua:"[object Uint8Array]"===c?e+=va:"[object Uint8ClampedArray]"===c?e+=wa:"[object Int16Array]"===c?e+=xa:"[object Uint16Array]"===c?e+=za:"[object Int32Array]"===c?e+=ya:"[object Uint32Array]"===c?e+=Aa:"[object Float32Array]"===c?e+=Ba:"[object Float64Array]"===c?e+=Ca:b(new Error("Failed to get type for BinaryArray"))),b(e+I(d))}else if("[object Blob]"===c){var f=new FileReader;f.onload=function(){var c=oa+a.type+"~"+I(this.result);b(qa+ta+c)},f.readAsArrayBuffer(a)}else try{b(JSON.stringify(a))}catch(c){console.error("Couldn't convert value into a JSON string: ",a),b(null,c)}}function K(a){if(a.substring(0,ra)!==qa)return JSON.parse(a);var b,c=a.substring(Da),d=a.substring(ra,Da);if(d===ta&&pa.test(c)){var e=c.match(pa);b=e[1],c=c.substring(e[0].length)}var f=H(c);switch(d){case sa:return f;case ta:return i([f],{type:b});case ua:return new Int8Array(f);case va:return new Uint8Array(f);case wa:return new Uint8ClampedArray(f);case xa:return new Int16Array(f);case za:return new Uint16Array(f);case ya:return new Int32Array(f);case Aa:return new Uint32Array(f);case Ba:return new Float32Array(f);case Ca:return new Float64Array(f);default:throw new Error("Unkown type: "+d)}}function L(a){var b=this,c={db:null};if(a)for(var d in a)c[d]="string"!=typeof a[d]?a[d].toString():a[d];var e=new ja(function(a,d){try{c.db=openDatabase(c.name,String(c.version),c.description,c.size)}catch(a){return d(a)}c.db.transaction(function(e){e.executeSql("CREATE TABLE IF NOT EXISTS "+c.storeName+" (id INTEGER PRIMARY KEY, key unique, value)",[],function(){b._dbInfo=c,a()},function(a,b){d(b)})})});return c.serializer=Fa,e}function M(a,b){var c=this;"string"!=typeof a&&(console.warn(a+" used as a key, but it is not a string."),a=String(a));var d=new ja(function(b,d){c.ready().then(function(){var e=c._dbInfo;e.db.transaction(function(c){c.executeSql("SELECT * FROM "+e.storeName+" WHERE key = ? LIMIT 1",[a],function(a,c){var d=c.rows.length?c.rows.item(0).value:null;d&&(d=e.serializer.deserialize(d)),b(d)},function(a,b){d(b)})})}).catch(d)});return j(d,b),d}function N(a,b){var c=this,d=new ja(function(b,d){c.ready().then(function(){var e=c._dbInfo;e.db.transaction(function(c){c.executeSql("SELECT * FROM "+e.storeName,[],function(c,d){for(var f=d.rows,g=f.length,h=0;h<g;h++){var i=f.item(h),j=i.value;if(j&&(j=e.serializer.deserialize(j)),j=a(j,i.key,h+1),void 0!==j)return void b(j)}b()},function(a,b){d(b)})})}).catch(d)});return j(d,b),d}function O(a,b,c,d){var e=this;"string"!=typeof a&&(console.warn(a+" used as a key, but it is not a string."),a=String(a));var f=new ja(function(f,g){e.ready().then(function(){void 0===b&&(b=null);var h=b,i=e._dbInfo;i.serializer.serialize(b,function(b,j){j?g(j):i.db.transaction(function(c){c.executeSql("INSERT OR REPLACE INTO "+i.storeName+" (key, value) VALUES (?, ?)",[a,b],function(){f(h)},function(a,b){g(b)})},function(b){if(b.code===b.QUOTA_ERR){if(d>0)return void f(O.apply(e,[a,h,c,d-1]));g(b)}})})}).catch(g)});return j(f,c),f}function P(a,b,c){return O.apply(this,[a,b,c,1])}function Q(a,b){var c=this;"string"!=typeof a&&(console.warn(a+" used as a key, but it is not a string."),a=String(a));var d=new ja(function(b,d){c.ready().then(function(){var e=c._dbInfo;e.db.transaction(function(c){c.executeSql("DELETE FROM "+e.storeName+" WHERE key = ?",[a],function(){b()},function(a,b){d(b)})})}).catch(d)});return j(d,b),d}function R(a){var b=this,c=new ja(function(a,c){b.ready().then(function(){var d=b._dbInfo;d.db.transaction(function(b){b.executeSql("DELETE FROM "+d.storeName,[],function(){a()},function(a,b){c(b)})})}).catch(c)});return j(c,a),c}function S(a){var b=this,c=new ja(function(a,c){b.ready().then(function(){var d=b._dbInfo;d.db.transaction(function(b){b.executeSql("SELECT COUNT(key) as c FROM "+d.storeName,[],function(b,c){var d=c.rows.item(0).c;a(d)},function(a,b){c(b)})})}).catch(c)});return j(c,a),c}function T(a,b){var c=this,d=new ja(function(b,d){c.ready().then(function(){var e=c._dbInfo;e.db.transaction(function(c){c.executeSql("SELECT key FROM "+e.storeName+" WHERE id = ? LIMIT 1",[a+1],function(a,c){var d=c.rows.length?c.rows.item(0).key:null;b(d)},function(a,b){d(b)})})}).catch(d)});return j(d,b),d}function U(a){var b=this,c=new ja(function(a,c){b.ready().then(function(){var d=b._dbInfo;d.db.transaction(function(b){b.executeSql("SELECT key FROM "+d.storeName,[],function(b,c){for(var d=[],e=0;e<c.rows.length;e++)d.push(c.rows.item(e).key);a(d)},function(a,b){c(b)})})}).catch(c)});return j(c,a),c}function V(a){var b=this,c={};if(a)for(var d in a)c[d]=a[d];return c.keyPrefix=c.name+"/",c.storeName!==b._defaultConfig.storeName&&(c.keyPrefix+=c.storeName+"/"),b._dbInfo=c,c.serializer=Fa,ja.resolve()}function W(a){var b=this,c=b.ready().then(function(){for(var a=b._dbInfo.keyPrefix,c=localStorage.length-1;c>=0;c--){var d=localStorage.key(c);0===d.indexOf(a)&&localStorage.removeItem(d)}});return j(c,a),c}function X(a,b){var c=this;"string"!=typeof a&&(console.warn(a+" used as a key, but it is not a string."),a=String(a));var d=c.ready().then(function(){var b=c._dbInfo,d=localStorage.getItem(b.keyPrefix+a);return d&&(d=b.serializer.deserialize(d)),d});return j(d,b),d}function Y(a,b){var c=this,d=c.ready().then(function(){for(var b=c._dbInfo,d=b.keyPrefix,e=d.length,f=localStorage.length,g=1,h=0;h<f;h++){var i=localStorage.key(h);if(0===i.indexOf(d)){var j=localStorage.getItem(i);if(j&&(j=b.serializer.deserialize(j)),j=a(j,i.substring(e),g++),void 0!==j)return j}}});return j(d,b),d}function Z(a,b){var c=this,d=c.ready().then(function(){var b,d=c._dbInfo;try{b=localStorage.key(a)}catch(a){b=null}return b&&(b=b.substring(d.keyPrefix.length)),b});return j(d,b),d}function $(a){var b=this,c=b.ready().then(function(){for(var a=b._dbInfo,c=localStorage.length,d=[],e=0;e<c;e++)0===localStorage.key(e).indexOf(a.keyPrefix)&&d.push(localStorage.key(e).substring(a.keyPrefix.length));return d});return j(c,a),c}function _(a){var b=this,c=b.keys().then(function(a){return a.length});return j(c,a),c}function aa(a,b){var c=this;"string"!=typeof a&&(console.warn(a+" used as a key, but it is not a string."),a=String(a));var d=c.ready().then(function(){var b=c._dbInfo;localStorage.removeItem(b.keyPrefix+a)});return j(d,b),d}function ba(a,b,c){var d=this;"string"!=typeof a&&(console.warn(a+" used as a key, but it is not a string."),a=String(a));var e=d.ready().then(function(){void 0===b&&(b=null);var c=b;return new ja(function(e,f){var g=d._dbInfo;g.serializer.serialize(b,function(b,d){if(d)f(d);else try{localStorage.setItem(g.keyPrefix+a,b),e(c)}catch(a){"QuotaExceededError"!==a.name&&"NS_ERROR_DOM_QUOTA_REACHED"!==a.name||f(a),f(a)}})})});return j(e,c),e}function ca(a,b){a[b]=function(){var c=arguments;return a.ready().then(function(){return a[b].apply(a,c)})}}function da(){for(var a=1;a<arguments.length;a++){var b=arguments[a];if(b)for(var c in b)b.hasOwnProperty(c)&&(Oa(b[c])?arguments[0][c]=b[c].slice():arguments[0][c]=b[c])}return arguments[0]}function ea(a){for(var b in Ja)if(Ja.hasOwnProperty(b)&&Ja[b]===a)return!0;return!1}var fa="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(a){return typeof a}:function(a){return a&&"function"==typeof Symbol&&a.constructor===Symbol&&a!==Symbol.prototype?"symbol":typeof a},ga=e();"undefined"==typeof Promise&&a(3);var ha,ia,ja=Promise,ka="local-forage-detect-blob-support",la=Object.prototype.toString,ma={_driver:"asyncStorage",_initStorage:y,iterate:A,getItem:z,setItem:B,removeItem:C,clear:D,length:E,key:F,keys:G},na="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",oa="~~local_forage_type~",pa=/^~~local_forage_type~([^~]+)~/,qa="__lfsc__:",ra=qa.length,sa="arbf",ta="blob",ua="si08",va="ui08",wa="uic8",xa="si16",ya="si32",za="ur16",Aa="ui32",Ba="fl32",Ca="fl64",Da=ra+sa.length,Ea=Object.prototype.toString,Fa={serialize:J,deserialize:K,stringToBuffer:H,bufferToString:I},Ga={_driver:"webSQLStorage",_initStorage:L,iterate:N,getItem:M,setItem:P,removeItem:Q,clear:R,length:S,key:T,keys:U},Ha={_driver:"localStorageWrapper",_initStorage:V,iterate:Y,getItem:X,setItem:ba,removeItem:aa,clear:W,length:_,key:Z,keys:$},Ia={},Ja={INDEXEDDB:"asyncStorage",LOCALSTORAGE:"localStorageWrapper",WEBSQL:"webSQLStorage"},Ka=[Ja.INDEXEDDB,Ja.WEBSQL,Ja.LOCALSTORAGE],La=["clear","getItem","iterate","key","keys","length","removeItem","setItem"],Ma={description:"",driver:Ka.slice(),name:"localforage",size:4980736,storeName:"keyvaluepairs",version:1},Na={};Na[Ja.INDEXEDDB]=f(),Na[Ja.WEBSQL]=g(),Na[Ja.LOCALSTORAGE]=h();var Oa=Array.isArray||function(a){return"[object Array]"===Object.prototype.toString.call(a)},Pa=function(){function a(b){d(this,a),this.INDEXEDDB=Ja.INDEXEDDB,this.LOCALSTORAGE=Ja.LOCALSTORAGE,this.WEBSQL=Ja.WEBSQL,this._defaultConfig=da({},Ma),this._config=da({},this._defaultConfig,b),this._driverSet=null,this._initDriver=null,this._ready=!1,this._dbInfo=null,this._wrapLibraryMethodsWithReady(),this.setDriver(this._config.driver).catch(function(){})}return a.prototype.config=function(a){if("object"===("undefined"==typeof a?"undefined":fa(a))){if(this._ready)return new Error("Can't call config() after localforage has been used.");for(var b in a){if("storeName"===b&&(a[b]=a[b].replace(/\W/g,"_")),"version"===b&&"number"!=typeof a[b])return new Error("Database version must be a number.");this._config[b]=a[b]}return!("driver"in a&&a.driver)||this.setDriver(this._config.driver)}return"string"==typeof a?this._config[a]:this._config},a.prototype.defineDriver=function(a,b,c){var d=new ja(function(b,c){try{var d=a._driver,e=new Error("Custom driver not compliant; see https://mozilla.github.io/localForage/#definedriver"),f=new Error("Custom driver name already in use: "+a._driver);if(!a._driver)return void c(e);if(ea(a._driver))return void c(f);for(var g=La.concat("_initStorage"),h=0;h<g.length;h++){var i=g[h];if(!i||!a[i]||"function"!=typeof a[i])return void c(e)}var j=ja.resolve(!0);"_support"in a&&(j=a._support&&"function"==typeof a._support?a._support():ja.resolve(!!a._support)),j.then(function(c){Na[d]=c,Ia[d]=a,b()},c)}catch(a){c(a)}});return k(d,b,c),d},a.prototype.driver=function(){return this._driver||null},a.prototype.getDriver=function(a,b,c){var d=this,e=ja.resolve().then(function(){if(!ea(a)){if(Ia[a])return Ia[a];throw new Error("Driver not found.")}switch(a){case d.INDEXEDDB:return ma;case d.LOCALSTORAGE:return Ha;case d.WEBSQL:return Ga}});return k(e,b,c),e},a.prototype.getSerializer=function(a){var b=ja.resolve(Fa);return k(b,a),b},a.prototype.ready=function(a){var b=this,c=b._driverSet.then(function(){return null===b._ready&&(b._ready=b._initDriver()),b._ready});return k(c,a,a),c},a.prototype.setDriver=function(a,b,c){function d(){g._config.driver=g.driver()}function e(a){return g._extend(a),d(),g._ready=g._initStorage(g._config),g._ready}function f(a){return function(){function b(){for(;c<a.length;){var f=a[c];return c++,g._dbInfo=null,g._ready=null,g.getDriver(f).then(e).catch(b)}d();var h=new Error("No available storage method found.");return g._driverSet=ja.reject(h),g._driverSet}var c=0;return b()}}var g=this;Oa(a)||(a=[a]);var h=this._getSupportedDrivers(a),i=null!==this._driverSet?this._driverSet.catch(function(){return ja.resolve()}):ja.resolve();return this._driverSet=i.then(function(){var a=h[0];return g._dbInfo=null,g._ready=null,g.getDriver(a).then(function(a){g._driver=a._driver,d(),g._wrapLibraryMethodsWithReady(),g._initDriver=f(h)})}).catch(function(){d();var a=new Error("No available storage method found.");return g._driverSet=ja.reject(a),g._driverSet}),k(this._driverSet,b,c),this._driverSet},a.prototype.supports=function(a){return!!Na[a]},a.prototype._extend=function(a){da(this,a)},a.prototype._getSupportedDrivers=function(a){for(var b=[],c=0,d=a.length;c<d;c++){var e=a[c];this.supports(e)&&b.push(e)}return b},a.prototype._wrapLibraryMethodsWithReady=function(){for(var a=0;a<La.length;a++)ca(this,La[a])},a.prototype.createInstance=function(b){return new a(b)},a}(),Qa=new Pa;b.exports=Qa},{3:3}]},{},[4])(4)}); | zikongli-jingdian-taozhuang-x3 | /zikongli-jingdian-taozhuang-x3-2022.10.15.0.tar.gz/zikongli-jingdian-taozhuang-x3-2022.10.15.0/ZikongliJingdianTaozhuangX3/js/libs/localforage.min.js | localforage.min.js |
=====
zilch
=====
0.1.3 (01/13/2012)
==================
Features
--------
- Applied pull request from Marius Gedminas to add prefix option support to the
error view webapp.
0.1.2 (08/07/2011)
==================
Bug Fixes
---------
- Cleanup session at end of request.
0.1.1 (07/25/2011)
==================
Bug Fixes
---------
- Fix bug with webob imports in client.py
0.1 (07/25/2011)
================
Features
--------
- Exception reporting via SQLAlchemy and/or ZeroMQ
- Recording Store can be pluggable
- WSGI Middleware to capture exceptions with WSGI/CGI environment data
- Web User Interface for the recorder to view collected exceptions
- Event tagging to record additional information per exception such as the
Hostname, Application, etc.
| zilch | /zilch-0.1.3.tar.gz/zilch-0.1.3/CHANGES.rst | CHANGES.rst |
=====
zilch
=====
``zilch`` is a small library for recording and viewing exceptions from Python.
This library is inspired by (and uses several of the same functions from)
David Cramer's Sentry_, but aims to implement just the core features in a
smaller code/feature footprint.
Requirements
============
* simplejson_
* WebError_
Optional
--------
* ZeroMQ_ (For network based reporting)
* SQLAlchemy_ (For the database backend recorder)
* Pyramid_ and WebHelpers_ (For the recorder web UI)
Basic Usage
===========
Reporting an Exception
----------------------
In the application that wants to report errors, import zilch and configure
the reporter to record directly to the database::
from zilch.store import SQLAlchemyStore
import zilch.client
zilch.client.store = SQLAlchemyStore('sqlite:///exceptions.db')
Then to report an exception::
from zilch.client import capture_exception
try:
# do something that explodes
except Exception, e:
capture_exception()
The error will then be recorded in the database for later viewing.
Advanced Usage
==============
In larger cluster scenarios, or where latency is important, the reporting of
the exception can be handed off to ZeroMQ_ to be recorded to a central
recorder over the network. Both the client and recording machine must have
ZeroMQ_ installed.
To setup the client for recording::
import zilch.client
zilch.client.recorder_host = "tcp://localhost:5555"
Then to report an exception::
from zilch.client import capture_exception
try:
# do something that explodes
except Exception, e:
capture_exception()
The exception will then be sent to the recorder_host listening at the
``recorder_host`` specified.
Recording Exceptions Centrally
==============================
The recorder uses ZeroMQ_ to record exception reports delivered over the
network. To run the recorder host, on the machine recording them run::
>> zilch-recorder tcp://localhost:5555 sqlite:///exceptions.db
Without a ``Recorder`` running, ZeroMQ_ will hold onto the messages until it
is available. After which point, it will begin to block (In the future, an
option will be added to configure the disk offloading of messages).
The recorder will create the tables necessary on its initial launch.
Viewing Recorded Exceptions
===========================
``zilch`` comes with a Pyramid_ web application to view the database of
recorded exceptions. Once you have installed Pyramid_ and WebHelpers_, you can
run the web interface by typing::
>> zilch-web sqlite:///exceptions.db
Additional web configuration parameters are available to designate the
host/port that the web application should bind to (viewable by running
``zilch-web`` with the ``-h`` option).
License
=======
``zilch`` is offered under the MIT license.
Authors
=======
``zilch`` is made available by `Ben Bangert`.
Support
=======
zilch is considered feature-complete as the project owner (Ben Bangert) has
no additional functionality or development beyond bug fixes planned. Bugs can
be filed on github, should be accompanied by a test case to retain current
code coverage, and should be in a Pull request when ready to be accepted into
the zilch code-base.
For a more full-featured error collector, Sentry_ now has a stand-alone client
that no longer requires Django called Raven_. ``zilch`` was created before
Raven_ was available, and the author now uses Raven_ rather than ``zilch``
most of the time.
.. _Raven: https://github.com/dcramer/raven
.. _Pyramid: http://docs.pylonsproject.org/docs/pyramid.html
.. _ZeroMQ: http://zeromq.org
.. _Sentry: https://github.com/dcramer/sentry
.. _simplejson: http://simplejson.github.com/simplejson/
.. _WebError: http://pypi.python.org/pypi/WebError
.. _SQLAlchemy: http://sqlalchemy.org
.. _WebHelpers: http://sluggo.scrapping.cc/python/WebHelpers/index.html
| zilch | /zilch-0.1.3.tar.gz/zilch-0.1.3/README.rst | README.rst |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.