code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import uuid as uuid_gen
from typing import Dict
from .models.hf_interface import *
from .models.BlockDevice import BlockDevice
class ZBSActionData:
mount_path = None
device = None
filesystem = None
fs_type = None
is_partition = False
partition_number = None
LV = None
VG = None
lvm_path = None
chunk_size = None
def __init__(self, mount_path=None,
device=None,
filesystem=None,
fs_type=None,
is_partition=False,
partition_number=None,
LV=None,
VG=None,
lvm_path=None,
chunk_size=None,
dev_id=None,
dev_path=None,
parent=None,
btrfs_dev_id=None,
partition_id=None,
windows_old_size=None,
size=None,
_map=None):
self.mount_path = mount_path
self.filesystem = filesystem
self.fs_type = fs_type
self.device = device
self.is_partition = is_partition
self.partition_number = partition_number
self.LV = LV
self.VG = VG
self.lvm_path = lvm_path
self.chunk_size = chunk_size
self.dev_id = dev_id
self.dev_path = dev_path
self.parent = parent
self.btrfs_dev_id = btrfs_dev_id
self.partition_id = partition_id
self.windows_old_size = windows_old_size
self.size = size
self.map = _map
def serialize(self):
return self.__dict__
def set_data(self, json):
self.mount_path = json.get('mount_path')
self.filesystem = json.get('filesystem')
self.fs_type = json.get('fs_type')
self.device = json.get('device')
self.is_partition = json.get('is_partition', False)
self.partition_number = json.get('partition_number', '')
self.LV = json.get('LV', '')
self.VG = json.get('VG', '')
self.lvm_path = json.get('lvm_path', '')
self.chunk_size = json.get('chunk_size', 0)
self.dev_id = json.get('dev_id')
self.dev_path = json.get('dev_path')
self.parent = json.get('parent')
self.btrfs_dev_id = json.get('btrfs_dev_id')
self.partition_id = json.get('partition_id')
self.windows_old_size = json.get('windows_old_size')
self.size = json.get('size')
self.map = json.get('_map')
return self
class ZBSAgentReceiver:
"""
The ZBSAgentReceiver (Receiver class in the Command pattern) contain some important business logic.
It knows how to perform any kind of action sent by the ZBS Backend.
ZBSAgent is an abstract class, while the concrete implementations should be per OS
"""
@abstractmethod
def do_nothing(self, data: ZBSActionData) -> None:
raise NotImplementedError(
"ZBSAgentReceiver 'do_nothing' is abstract, please implement a concrete per OD receiver")
@abstractmethod
def extend_fs(self, data: ZBSActionData, action_id, account_id=None) -> None:
raise NotImplementedError(
"ZBSAgentReceiver 'extend_fs' is abstract, please implement a concrete per OD receiver")
@abstractmethod
def add_disk(self, data: ZBSActionData, action_id, account_id=None) -> None:
raise NotImplementedError(
"ZBSAgentReceiver 'add_disk' is abstract, please implement a concrete per OD receiver")
@abstractmethod
def balance_fs(self, data: ZBSActionData, action_id) -> None:
raise NotImplementedError(
"ZBSAgentReceiver 'balance_fs' is abstract, please implement a concrete per OD receiver")
@abstractmethod
def remove_disk(self, data: ZBSActionData, action_id, account_id=None) -> None:
raise NotImplementedError(
"ZBSAgentReceiver 'remove_disk' is abstract, please implement a concrete per OD receiver")
@abstractmethod
def balance_ebs_structure(self, data: ZBSActionData, action_id) -> None:
raise NotImplementedError(
"ZBSAgentReceiver 'balance_ebs_structure' is abstract, please implement a concrete per OD receiver")
@abstractmethod
def start_migration(self, data: ZBSActionData, action_id, account_id=None) -> None:
raise NotImplementedError(
"ZBSAgentReceiver 'start_migration' is abstract, please implement a concrete per OD receiver")
class SpecialInstructions(ISpecialInstructions):
"""
Constructor for special instructions with optional parameters:
* dev_id: identify the device for the filesystem to which the action is attached
* size: specify the capacity for a new device or the additional capacity when extending a device
* sub_actions: when an action implements multiple actions, specify a dictionary:
-- { int(specifies action priorities): list(actions that can be run in parallel) }
-- Actions in a list keyed to a higher order cannot start until all Actions of lower orders complete
"""
def __init__(self, dev_id: str = None, size: int = None, sub_actions: Dict[int, Dict[str, IActionHF]] = None):
self.dev_id = dev_id
self.size = size
self.sub_actions = sub_actions
def __repr__(self):
return str(self.__dict__)
class ZBSAction(IActionHF):
"""
Base command class
Delegates the business logic to the receiver
There are receivers per OS (Linux and Windows for now)
"""
TYPE_FIELD_NAME = "type"
DATA_FIELD_NAME = "data"
STATUS_FIELD_NAME = "status"
UUID_FIELD_NAME = "uuid"
SPECIAL_INSTRUCTIONS_FIELD_NAME = "_ZBSAction__special_instructions"
__uuid = None
__status: IActionHF.Status = IActionHF.Status.NEW
__special_instructions: SpecialInstructions
subclasses = {}
def __init__(self, receiver: ZBSAgentReceiver = None, data: ZBSActionData = None, uuid: str = None):
self.receiver = receiver
self.data = data
if uuid is not None:
self.__uuid = uuid
else:
self.__uuid = str(uuid_gen.uuid4())
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls.subclasses[cls.__name__] = cls
def __repr__(self):
special_instructions = self.get_special_instructions() if isinstance(self.get_special_instructions(),
Dict) else self.get_special_instructions().__dict__
repr_dict = dict(zip(['Action Type','Action Status','SpecialInstructions'],
[self.get_action_type(),
str(self.get_status().name),
special_instructions]))
return str(repr_dict)
def set_data(self, data: ZBSActionData):
self.data = data
def set_receiver(self, receiver: ZBSAgentReceiver):
self.receiver = receiver
def serialize(self):
result = self.__dict__
result[ZBSAction.TYPE_FIELD_NAME] = self.get_action_type()
result[ZBSAction.DATA_FIELD_NAME] = self.data.serialize() if self.data is not None else None
result[ZBSAction.STATUS_FIELD_NAME] = self.get_status().name
result[ZBSAction.UUID_FIELD_NAME] = self.get_action_id()
if hasattr(self, '_ZBSAction__special_instructions'):
result[
ZBSAction.SPECIAL_INSTRUCTIONS_FIELD_NAME] = self.get_special_instructions().__dict__ if self.__special_instructions is not None else None
return result
# ActionHF interface implementation
def get_action_id(self) -> str:
return self.__uuid
def get_action_type(self) -> str:
return str(type(self).__name__)
def get_status(self) -> IActionHF.Status:
return self.__status
def set_status(self, status: IActionHF.Status):
self.__status = status
def get_special_instructions(self) -> SpecialInstructions:
return self.__special_instructions
def set_special_instructions(self, special_instructions: SpecialInstructions):
self.__special_instructions = special_instructions
@staticmethod
def deserialize_type(json):
return json[ZBSAction.TYPE_FIELD_NAME]
@staticmethod
def deserialize_data(json):
return ZBSActionData().set_data(json[ZBSAction.DATA_FIELD_NAME])
@staticmethod
def deserialize_uuid(serialized_action):
return serialized_action.get(ZBSAction.UUID_FIELD_NAME)
@staticmethod
def deserialize_status(serialized_action):
return serialized_action.get(ZBSAction.STATUS_FIELD_NAME)
@staticmethod
def deserialize_special_instructions(serialized_action):
if not isinstance(serialized_action, dict):
serialized_action = serialized_action.serialize()
special_instructions = SpecialInstructions(
dev_id=serialized_action.get(ZBSAction.SPECIAL_INSTRUCTIONS_FIELD_NAME, {}).get('dev_id'),
size=serialized_action.get(ZBSAction.SPECIAL_INSTRUCTIONS_FIELD_NAME, {}).get('size'),
sub_actions=serialized_action.get(ZBSAction.SPECIAL_INSTRUCTIONS_FIELD_NAME, {}).get('sub_actions'),
)
for key, val in serialized_action.get(ZBSAction.SPECIAL_INSTRUCTIONS_FIELD_NAME, {}).items():
if key not in ['dev_id', 'size', 'sub_actions']:
setattr(special_instructions, str(key), val)
return special_instructions
@staticmethod
def deserialize_action(serialized_action):
action_type = ZBSAction.deserialize_type(serialized_action)
action_data = ZBSAction.deserialize_data(serialized_action) if serialized_action.get(
ZBSAction.DATA_FIELD_NAME) is not None else None
action_uuid = ZBSAction.deserialize_uuid(serialized_action)
action_status = ZBSAction.deserialize_status(serialized_action)
action_to_perform = ZBSActionFactory.create_action(action_type, action_uuid)
action_to_perform.set_data(action_data)
action_to_perform.set_status(IActionHF.Status[serialized_action.get('status')])
if ZBSAction.SPECIAL_INSTRUCTIONS_FIELD_NAME in serialized_action:
special_instructions = ZBSAction.deserialize_special_instructions(serialized_action)
action_to_perform.set_special_instructions(special_instructions)
return action_to_perform
@abstractmethod
def execute(self):
raise NotImplementedError("BaseAction is abstract, please implement a concrete action")
class DoNothingAction(ZBSAction):
"""
Do nothing action
"""
def execute(self):
print("Do nothing || Action ID : {}".format(self.get_action_id()))
class Factory:
def create(self, uuid): return DoNothingAction(uuid=uuid)
class ExtendFileSystemAction(ZBSAction):
"""
Extend File System Action.
"""
def execute(self, fs):
try:
return self.receiver.extend_fs(self.get_special_instructions(), self.get_action_id(), fs)
except AttributeError as ex:
print("Failed to execute command '{}': error is '{}'".format(self.get_action_type(), ex))
class Factory:
def create(self, uuid): return ExtendFileSystemAction(uuid=uuid)
class AddDiskAction(ZBSAction):
"""
Add Disk Action.
"""
def execute(self, fs):
try:
return self.receiver.add_disk(self.get_special_instructions(), self.get_action_id(), fs)
except AttributeError as ex:
print("Failed to execute command '{}': error is '{}'".format(self.get_action_type(), ex))
class Factory:
def create(self, uuid): return AddDiskAction(uuid=uuid)
class RemoveDiskAction(ZBSAction):
"""
Remove Disk Action.
"""
def execute(self, fs):
try:
return self.receiver.remove_disk(self.get_special_instructions(), self.get_action_id(), fs)
except AttributeError as ex:
print("Failed to execute command '{}': error is '{}'".format(self.get_action_type(), ex))
class Factory:
def create(self, uuid): return RemoveDiskAction(uuid=uuid)
class BalanceFileSystemAction(ZBSAction):
"""
Balance File System Action.
"""
def execute(self):
try:
self.receiver.balance_fs(self.data, self.get_action_id())
except AttributeError as ex:
print("Failed to execute command '{}': error is '{}'".format(self.get_action_type(), ex))
class Factory:
def create(self, uuid): return BalanceFileSystemAction(uuid=uuid)
class BalanceEBSStructureAction(ZBSAction):
"""
Balance EBS structure Action.
"""
def execute(self):
try:
self.receiver.extend_fs(self.data, self.get_action_id())
self.receiver.remove_disk(self.data, self.get_action_id())
except AttributeError as ex:
print("Failed to execute command '{}': error is '{}'".format(self.get_action_type(), ex))
class Factory:
def create(self, uuid): return BalanceEBSStructureAction(uuid=uuid)
class MigrationStartAction(ZBSAction):
"""
Migration Start Action.
The purpose of this action is to get a BE request to start a migration action for a mount point
Returns: if migration started successfully or failed with the error
"""
def execute(self, account_id):
try:
return self.receiver.start_migration(self.get_special_instructions(), self.get_action_id(), account_id)
except AttributeError as ex:
print("Failed to execute command '{}': error is '{}'".format(self.get_action_type(), ex))
class Factory:
def create(self, uuid): return MigrationStartAction(uuid=uuid)
class ZBSActionFactory:
actions = {}
@staticmethod
def create_action(action_type, uuid=None):
if action_type not in ZBSActionFactory.actions:
action_class = ZBSAction.subclasses.get(action_type)
if action_class:
ZBSActionFactory.actions[action_type] = action_class.Factory()
else:
raise ValueError(f'Could not find action class `{action_type}`')
return ZBSActionFactory.actions[action_type].create(uuid) | zesty.zbs-api | /zesty.zbs-api-1.0.2023.8.29.1693309720.tar.gz/zesty.zbs-api-1.0.2023.8.29.1693309720/src/actions.py | actions.py |
import json
import requests
import config as cfg
"""
USAGE:
First you have to init factory with base settings
factory = RequestFactory(stage=${STAGE}, version=${VERSION}, api_key=${API_KEY})
Then need to create request instance depend on the type of the request you want to send
metrics_request = factory.create_request("Metrics")
Pass the data to set_data function
metrics_request.set_data(
agent_version,
overview,
plugins
)
Then send it to the BackEnd and receive the response
response = metrics_request.send()
"""
DEFAULT_BASE_URL = "https://api{}.cloudvisor.io"
ESTABLISH_CONN_TIMEOUT = 10
RECEIVE_RESPONSE_TIMEOUT = 30
class RequestFactory:
requests = {}
stage = None
version = None
api_key = None
api_base = None
def __init__(self, stage, version, api_key, api_base: str = DEFAULT_BASE_URL):
self.stage = stage
self.version = version
self.api_key = api_key
self.api_base = api_base
def create_request(self, request_type):
if request_type not in RequestFactory.requests:
request_class = Request.subclasses.get(request_type)
if request_class:
RequestFactory.requests[request_type] = request_class.Factory(self.stage, self.version, self.api_key,
self.api_base)
return RequestFactory.requests[request_type].create()
class Request:
stage = None
version = None
api_key = None
prefix = None
api_base = None
api_is_private_endpoint = False
subclasses = {}
def __init__(self, stage, version, api_key, api_base: str = DEFAULT_BASE_URL):
self.stage = stage
self.version = version
self.api_key = api_key
self.prefix = ""
if self.stage == 'staging':
self.prefix = "-staging"
if api_base != DEFAULT_BASE_URL:
self.api_is_private_endpoint = True
self.api_base = api_base.format(self.prefix)
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls.subclasses[cls.__name__] = cls
def send(self):
res = requests.post(
self.build_url(),
data=json.dumps(self.message, separators=(',', ':')),
headers={"Cache-Control": "no-cache", "Pragma": "no-cache", "x-api-key": self.api_key},
timeout=(ESTABLISH_CONN_TIMEOUT, RECEIVE_RESPONSE_TIMEOUT)
)
return self.Response(res)
class Metrics(Request):
message = {}
def build_url(self):
if self.api_is_private_endpoint:
return '{}{}'.format(self.api_base, "/post-metrics")
else:
return '{}{}'.format(self.api_base, cfg.post_metrics_ep)
def set_data(self, agent_version, overview, plugins, package_version=None, autoupdate_last_execution_time=None):
self.message = {
"agent": {
"version": agent_version,
"package_version": package_version,
"autoupdate_last_execution_time": autoupdate_last_execution_time
},
"overview": overview,
"plugins": plugins
}
class Response:
raw_data: dict = None
status_code = None
def __init__(self, res):
self.status_code = res.status_code
self.raw_data = res.json()
for k, v in self.raw_data.items():
setattr(self, str(k), v)
class Factory:
stage = None
version = None
api_key = None
api_base = None
def __init__(self, stage, version, api_key, api_base: str = DEFAULT_BASE_URL):
self.stage = stage
self.version = version
self.api_key = api_key
self.api_base = api_base
def create(self): return Metrics(stage=self.stage, version=self.version, api_key=self.api_key,
api_base=self.api_base)
class NotifyException(Request):
message = {}
def build_url(self):
if self.api_is_private_endpoint:
return '{}{}'.format(self.api_base, "/post-notify-exception")
else:
return '{}{}'.format(self.api_base, cfg.notify_exception_ep)
def set_data(self, account_id, instance_id, exception, msg):
self.message = {
"exception": exception,
"message": msg,
"instance_id": instance_id,
"account_id": account_id
}
class Response:
raw_data = None
status_code = None
def __init__(self, res):
self.status_code = res.status_code
self.raw_data = res.json()
for k, v in self.raw_data.items():
setattr(self, str(k), v)
class Factory:
stage = None
version = None
api_key = None
api_base = None
def __init__(self, stage, version, api_key, api_base: str = DEFAULT_BASE_URL):
self.stage = stage
self.version = version
self.api_key = api_key
self.api_base = api_base
def create(self): return NotifyException(stage=self.stage, version=self.version, api_key=self.api_key,
api_base=self.api_base)
class FsResizeCompleted(Request):
message = {}
def build_url(self):
if self.api_is_private_endpoint:
return '{}{}'.format(self.api_base, "/post-delete-resize-item")
else:
return '{}{}'.format(self.api_base, cfg.fs_resize_completed_ep)
def set_data(self, dev_path, filesystems, action_id, exit_code, resize_output, account_id):
self.message = {
"dev_path": dev_path,
"filesystems": filesystems,
"action_id": action_id,
"exit_code": exit_code,
"resize_output": resize_output,
"account_id": account_id
}
class Response:
raw_data = None
status_code = None
success = None
message = None
def __init__(self, res):
self.status_code = res.status_code
self.raw_data = res.json()
self.success = self.raw_data.get('Success')
self.message = self.raw_data.get('message')
class Factory:
stage = None
version = None
api_key = None
api_base = None
def __init__(self, stage, version, api_key, api_base: str = DEFAULT_BASE_URL):
self.stage = stage
self.version = version
self.api_key = api_key
self.api_base = api_base
def create(self): return FsResizeCompleted(stage=self.stage, version=self.version, api_key=self.api_key,
api_base=self.api_base)
class HoldingRemoveAction(Request):
message = {}
def build_url(self):
return '{}{}'.format(self.api_base, cfg.hold_remove_action_ep)
def set_data(self, dev_path, filesystems, action_id, exit_code, index, account_id):
self.message = {
"dev_path": dev_path,
"filesystems": filesystems,
"action_id": action_id,
"exit_code": exit_code,
"index": index,
"account_id": account_id
}
class Response:
raw_data = None
status_code = None
success = None
message = None
def __init__(self, res):
self.status_code = res.status_code
self.raw_data = res.json()
self.success = self.raw_data.get('Success')
self.message = self.raw_data.get('message')
class Factory:
stage = None
version = None
api_key = None
api_base = None
def __init__(self, stage, version, api_key, api_base: str = DEFAULT_BASE_URL):
self.stage = stage
self.version = version
self.api_key = api_key
self.api_base = api_base
def create(self): return HoldingRemoveAction(stage=self.stage, version=self.version, api_key=self.api_key,
api_base=self.api_base)
class FsResizeFailed(Request):
message = {}
def build_url(self):
if self.api_is_private_endpoint:
return '{}{}'.format(self.api_base, "/post-fs-resize-failed")
else:
return '{}{}'.format(self.api_base, cfg.resize_failed_ep)
def set_data(self, dev_path, filesystems, action_id, exit_code, resize_output, error, resize_steps, account_id):
self.message = {
"dev_path": dev_path,
"filesystems": filesystems,
"action_id": action_id,
"exit_code": exit_code,
"resize_output": resize_output,
"error": error,
"resize_steps": resize_steps,
"account_id": account_id
}
class Response:
raw_data = None
status_code = None
success = None
message = None
def __init__(self, res):
self.status_code = res.status_code
self.raw_data = res.json()
self.success = self.raw_data.get('Success')
self.message = self.raw_data.get('message')
class Factory:
stage = None
version = None
api_key = None
api_base = None
def __init__(self, stage, version, api_key, api_base: str = DEFAULT_BASE_URL):
self.stage = stage
self.version = version
self.api_key = api_key
self.api_base = api_base
def create(self): return FsResizeFailed(stage=self.stage, version=self.version, api_key=self.api_key,
api_base=self.api_base)
class MigrationStartActionCompleted(Request):
message = {}
def build_url(self):
if self.api_is_private_endpoint:
return '{}{}'.format(self.api_base, "/post-migration-start-action-complete")
else:
return '{}{}'.format(self.api_base, cfg.migration_start_action_completed_ep)
def set_data(self, account_id, fs_id, action_id, mount_path, volume_id, region, cloud_vendor, dev_path, exit_code,
error):
self.message = {
"account_id": account_id,
"fs_id": fs_id,
"action_id": action_id,
"mount_path": mount_path,
"volume_id": volume_id,
"region": region,
"cloud_vendor": cloud_vendor,
"dev_path": dev_path,
"exit_code": exit_code,
"error": error
}
class Response:
raw_data = None
status_code = None
success = None
message = None
def __init__(self, res):
self.status_code = res.status_code
self.raw_data = res.json()
self.success = self.raw_data.get('Success')
self.message = self.raw_data.get('message')
class Factory:
stage = None
version = None
api_key = None
api_base = None
def __init__(self, stage, version, api_key, api_base: str = DEFAULT_BASE_URL):
self.stage = stage
self.version = version
self.api_key = api_key
self.api_base = api_base
def create(self): return MigrationStartActionCompleted(stage=self.stage, version=self.version,
api_key=self.api_key,
api_base=self.api_base) | zesty.zbs-api | /zesty.zbs-api-1.0.2023.8.29.1693309720.tar.gz/zesty.zbs-api-1.0.2023.8.29.1693309720/src/protocol.py | protocol.py |
import json
import time
import traceback
from typing import Dict
from copy import deepcopy
from decimal import Decimal
from zesty.id_handler import create_zesty_id, create_zesty_filesystem_id
from ..actions import ZBSAction
from .BlockDevice import BlockDevice
from .Usage import Usage
GB_IN_BYTES = 1024**3
class FileSystem:
"""
This object interacts with DynamoDB representing a FileSystem.
As per the data model migration ZES-2884,
these will be backwards compatible and awkward in appearance until
the code is brought up to date.
"""
def __init__(
self,
fs_id: str,
account_id: str = None,
account_uuid: str = None,
agent_update_required: bool = None,
btrfs_version: str = None,
cloud: str = None,
cloud_vendor: str = None,
cycle_period: int = None,
delete_on_termination: bool = None,
devices: Dict[str, BlockDevice] = None,
encrypted: dict = None,
existing_actions: Dict[str, ZBSAction] = None,
expiredAt: int = None,
fs_cost: float = None,
fs_devices_to_count: int = None,
fs_size: int = None,
fs_type: str = None,
fs_usage: int = None,
has_unallocated_space: bool = None,
inodes: Dict[str, Usage] = None,
instance_id: str = None,
instance_type: str = None,
is_ephemeral: bool = None,
is_partition: bool = None,
is_zesty_disk: bool = None,
label: str = None,
last_update: int = None,
LV: str = None,
lvm_path: str = None,
mount_path: str = None,
name: str = None,
org_id: str = None,
partition_id: str = None,
partition_number: int = None,
platform: str = None,
potential_savings: float = None,
region: str = None,
resizable: bool = None,
space: Dict[str, Usage] = None,
tags: Dict[str, str] = None,
unallocated_chunk: int = None,
update_data_ts: int = 0,
VG: str = None,
wrong_fs_alert: bool = None,
zesty_disk_iops: int = None,
zesty_disk_throughput: int = None,
zesty_disk_vol_type: str = None,
max_utilization_in_72_hrs: int = None,
package_version: str = None,
autoupdate_last_execution_time: str = None,
statvfs_raw_data: Dict[str, str] = None,
pvc_id: str = None,
mount_options: list = None,
leading_device: str = None,
policies: Dict[str, dict] = None,
instance_tags: Dict[str, str] = None,
is_manageable: bool = False, #related migration
is_emr: bool = False
):
# Initialize empty dict not as default arg
existing_actions = {} if existing_actions is None else existing_actions
devices = {} if devices is None else devices
inodes = {} if inodes is None else inodes
space = {} if space is None else space
tags = {} if tags is None else tags
instance_tags = {} if instance_tags is None else instance_tags
self.account_id = account_id
self.account_uuid = account_uuid
self.agent_update_required = agent_update_required
self.btrfs_version = btrfs_version
if cloud is None and cloud_vendor is None:
self.cloud = 'Amazon'
self.cloud_vendor = 'Amazon'
elif cloud:
self.cloud = cloud
self.cloud_vendor = cloud
elif cloud_vendor:
self.cloud = cloud_vendor
self.cloud_vendor = cloud_vendor
self.cycle_period = cycle_period
self.devices = self.init_devices(devices)
self.delete_on_termination = delete_on_termination
self.encrypted = encrypted
self.existing_actions = existing_actions
self.expiredAt = expiredAt
self.fs_cost = fs_cost
self.fs_devices_to_count = fs_devices_to_count
try:
self.fs_id = create_zesty_filesystem_id(
cloud=self.cloud_vendor,
fs_id=fs_id
)
except Exception as e:
self.fs_id = fs_id
self.fs_size = fs_size
self.fs_type = fs_type
self.fs_usage = fs_usage
self.has_unallocated_space = has_unallocated_space
self.inodes = Usage(inodes)
self.instance_id = instance_id
self.instance_type = instance_type
self.is_ephemeral = is_ephemeral
self.is_partition = is_partition
self.is_zesty_disk = is_zesty_disk
self.label = label
if last_update is None:
self.last_update = int(time.time()) - 60
else:
self.last_update = last_update
self.LV = LV
self.lvm_path = lvm_path
self.mount_path = mount_path
self.name = name
self.org_id = org_id
self.partition_id = partition_id
self.partition_number = partition_number
self.platform = platform
self.potential_savings = potential_savings
self.region = region
self.resizable = resizable
self.space = Usage(space)
self.tags = tags
self.unallocated_chunk = unallocated_chunk
self.update_data_ts = update_data_ts
self.VG = VG
self.wrong_fs_alert = wrong_fs_alert
self.zesty_disk_iops = zesty_disk_iops
self.zesty_disk_throughput = zesty_disk_throughput
self.zesty_disk_vol_type = zesty_disk_vol_type
self.max_utilization_in_72_hrs = max_utilization_in_72_hrs
self.package_version = package_version
self.autoupdate_last_execution_time = autoupdate_last_execution_time
self.statvfs_raw_data = statvfs_raw_data
self.pvc_id = pvc_id
self.mount_options = mount_options
self.leading_device = leading_device
self.policies = policies
self.instance_tags = instance_tags
self.is_manageable = is_manageable #related migration
self.is_emr = is_emr
@staticmethod
def init_devices(devices: Dict[str, BlockDevice]):
if not devices:
return {}
else:
devices = deepcopy(devices)
for dev in devices:
if isinstance(devices[dev], BlockDevice):
continue
devices[dev] = BlockDevice(
**devices.get(dev, {})
)
return devices
def as_dict(self) -> dict:
return_dict = json.loads(json.dumps(self, default=self.object_dumper))
return {k: v for k, v in return_dict.items() if v is not None}
@staticmethod
def object_dumper(obj) -> dict:
try:
return obj.__dict__
except AttributeError as e:
if isinstance(obj, Decimal):
return int(obj)
print(f"Got exception in object_dumper value: {obj} | type : {type(obj)}")
print(traceback.format_exc())
return obj
def serialize(self) -> dict:
return self.as_dict()
def __repr__(self) -> str:
return f"FileSystem:{self.fs_id}" | zesty.zbs-api | /zesty.zbs-api-1.0.2023.8.29.1693309720.tar.gz/zesty.zbs-api-1.0.2023.8.29.1693309720/src/models/FileSystem.py | FileSystem.py |
from typing import Dict, Union
from sqlalchemy.orm import Session, sessionmaker, Query
from sqlalchemy.sql.elements import or_, Label
from .InstancesTags import InstancesTags
from .common_base import Base
try:
from sqlalchemy import Column, engine, case, func, cast, String, text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.postgresql import BOOLEAN, FLOAT, INTEGER, BIGINT, \
JSON, TIMESTAMP, VARCHAR
except ImportError:
raise ImportError("sqlalchemy is required by zesty.zbs-api but needs to be vendored separately. Add postgres-utils to your project's requirements that depend on zbs-api.")
class EbsVolumeConfig(enum.Enum):
none = 'None'
unattached = 'Unattached'
potentialZesty = 'Potential ZestyDisk'
class EbsVolume(Base):
# TODO: Move this model into our Alembic system
# when a modification of this model is needed.
__tablename__ = "disks"
volume_id = Column(VARCHAR, primary_key=True)
org_id = Column(VARCHAR, index=True)
account_uuid = Column(VARCHAR, index=True)
account_id = Column(VARCHAR, index=True)
region = Column(VARCHAR, index=True)
volume_type = Column(VARCHAR, index=True)
cloud = Column(VARCHAR, index=True)
availability_zone = Column(VARCHAR)
create_time = Column(TIMESTAMP)
encrypted = Column(BOOLEAN)
size = Column(INTEGER)
snapshot_id = Column(VARCHAR)
state = Column(VARCHAR)
iops = Column(INTEGER)
tags = Column(JSON)
attachments = Column(JSON)
attached_to = Column(JSON)
monthly_cost = Column(FLOAT, default=0)
is_unused_resource = Column(INTEGER, default=0)
unused_since = Column(VARCHAR)
agent_installed = Column(BOOLEAN, default=False)
_zbs_supported_os = Column(INTEGER)
potential_savings = Column(FLOAT, default=0)
image_id = Column(VARCHAR, nullable=True)
image_name = Column(VARCHAR, nullable=True)
# dict for custom_order_by class method
col_to_actual_sorting_col = {"instance_tags": "instance_tags_keys"}
def __init__(
self,
volume_aws_schema: Dict,
account_uuid: str = None):
if account_uuid:
self.account_uuid = account_uuid
else:
self.account_uuid = volume_aws_schema["account_uuid"]
self.volume_id = volume_aws_schema["volume_id"]
self.org_id = volume_aws_schema["org_id"]
self.account_id = volume_aws_schema["account_id"]
self.cloud = volume_aws_schema["cloud"]
self.region = volume_aws_schema["region"]
self.volume_type = volume_aws_schema["volume_type"]
self.availability_zone = volume_aws_schema["availability_zone"]
self.create_time = volume_aws_schema["create_time"]
self.encrypted = volume_aws_schema["encrypted"]
self.size = volume_aws_schema["size"]
self.snapshot_id = volume_aws_schema["snapshot_id"]
self.state = volume_aws_schema["state"]
self.iops = volume_aws_schema.get("iops", 0)
self.tags = volume_aws_schema.get("tags", {})
self.attachments = volume_aws_schema.get("attachments", [])
self.attached_to = volume_aws_schema.get("attached_to", [])
self.monthly_cost = volume_aws_schema.get("monthly_cost", 0)
self.is_unused_resource = volume_aws_schema.get(
"is_unused_resource", 0)
self.unused_since = volume_aws_schema.get("unused_since", None)
self.agent_installed = volume_aws_schema.get("agent_installed", False)
self.potential_savings = volume_aws_schema.get("potential_savings", 0)
self._zbs_supported_os = volume_aws_schema.get("_zbs_supported_os")
self.image_id = volume_aws_schema.get("ami_id")
self.image_name = volume_aws_schema.get("ami_name")
def __repr__(self):
return f"{self.__tablename__}:{self.volume_id}"
@classmethod
def instance_id_filter(cls, query: Query, value: str):
val = f'%{value}%'
query = query.filter(
case((or_(cls.attached_to == None, func.json_array_length(cls.attached_to) == 0), False),
else_=cast(cls.attached_to, String).ilike(val)))
return query
@classmethod
def instance_name_filter(cls, query: Query, value: str):
subq = query.session.query(InstancesTags.instance_name)
val = '%{}%'.format(value.replace("%", "\\%"))
query = query.filter((subq.scalar_subquery().where(
(func.jsonb(cls.attached_to).op('->>')(0) == InstancesTags.instance_id) & (
cls.account_id == InstancesTags.account_id))).ilike(val))
return query
@classmethod
def instance_tags_filter(cls, query: Query, value: str):
session = query.session
subq = session.query(InstancesTags.instance_tags)
python_types_to_pg = {int: BIGINT, float: FLOAT, bool: BOOLEAN}
for key_val in value:
key = key_val.get('key')
val = key_val.get('value')
if key is not None and val is not None:
if not isinstance(val, str):
query = query.filter(cast(cast(func.jsonb(subq.scalar_subquery().where(
(func.jsonb(cls.attached_to).op('->>')(0) == InstancesTags.instance_id) & (cls.account_id == InstancesTags.account_id)).op('->')(key)), String), python_types_to_pg[type(val)]) == val)
else:
val = f'%{val}%'
query = query.filter(cast(func.jsonb(subq.scalar_subquery().where(
(func.jsonb(cls.attached_to).op('->>')(0) == InstancesTags.instance_id) & (cls.account_id == InstancesTags.account_id)).op('->')(key)), String).ilike(val))
elif key is not None:
query = query.filter(func.jsonb(subq.scalar_subquery().where(
(func.jsonb(cls.attached_to).op('->>')(0) == InstancesTags.instance_id) & (cls.account_id == InstancesTags.account_id))).op('?')(key))
elif val is not None:
if isinstance(val, str):
query = query.filter(cast(subq.scalar_subquery().where(
(func.jsonb(cls.attached_to).op('->>')(0) == InstancesTags.instance_id) & (cls.account_id == InstancesTags.account_id)), String)
.regexp_replace(r'.+\: "[^"]*(' + str(val) + r')[^"]*"[,\s}].*', "\\1") == f"{val}")
else:
if isinstance(val, bool):
val = f'"{val}"'
query = query.filter(cast(subq.scalar_subquery().where(
(func.jsonb(cls.attached_to).op('->>')(0) == InstancesTags.instance_id) & (cls.account_id == InstancesTags.account_id)), String)
.regexp_replace(r'.+\: (' + str(val) + r')[,\s}].*', "\\1") == f"{val}")
return query
# Custom query
@classmethod
def custom_query(cls, session: Union[Session, sessionmaker]) -> Query:
q = session.query(cls)
subq_2 = session.query(func.json_object_keys(InstancesTags.instance_tags))
subq_3 = session.query(InstancesTags.instance_tags)
instance_name_clause = "regexp_replace(cast(array((select instances_tags.instance_name from instances_tags " \
"inner join json_array_elements(disks.attached_to) as attached_to_set " \
"on instances_tags.instance_id = replace(cast(attached_to_set.value as varchar), '\"', '') " \
"and instances_tags.account_id = disks.account_id)) as varchar), '[\\{\\}\"]', '', 'g')"
q = q.add_columns(case((or_(cls.attached_to == None, func.json_array_length(cls.attached_to) == 0), ''),
else_=cast(cls.attached_to, String).regexp_replace(r'[\[\]"]', '', 'g'))
.label("instance_id"),
Label('instance_name', text(instance_name_clause)),
func.array(subq_2.scalar_subquery().where(
(func.jsonb(cls.attached_to).op('->>')(0) == InstancesTags.instance_id) &
(cls.account_id == InstancesTags.account_id)))
.label('instance_tags_keys'),
subq_3.scalar_subquery().where(
(func.jsonb(cls.attached_to).op('->>')(0) == InstancesTags.instance_id) &
(cls.account_id == InstancesTags.account_id))
.label('instance_tags'))
return q
@classmethod
def custom_order_by(cls, sorting_column: str, sorting_order: str) -> str:
actual_sorting_column = cls.col_to_actual_sorting_col.get(sorting_column, sorting_column)
return f"{actual_sorting_column} {sorting_order}"
def get_volume_id(self):
return self.volume_id
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
@hybrid_property
def is_attached(self):
return len(self.attached_to) > 0
@is_attached.expression
def is_attached(cls):
return func.json_array_length(cls.attached_to) > 0
def create_tables(engine: engine.base.Engine) -> None: #type: ignore
Base.metadata.create_all(engine, checkfirst=True) | zesty.zbs-api | /zesty.zbs-api-1.0.2023.8.29.1693309720.tar.gz/zesty.zbs-api-1.0.2023.8.29.1693309720/src/models/EbsVolume.py | EbsVolume.py |
import time
from typing import Dict, List, Optional, Union
from uuid import UUID as _UUID
from uuid import uuid4
from sqlalchemy import INT
from sqlalchemy import Enum as sa_ENUM
from sqlalchemy.dialects.postgresql import ARRAY, UUID
from sqlalchemy.sql.schema import ForeignKey
try:
from sqlalchemy import Column, String, case, cast, engine, func, or_
from sqlalchemy.dialects.postgresql import (BIGINT, BOOLEAN, FLOAT, JSON,
TIMESTAMP, VARCHAR)
from sqlalchemy.orm import Query, Session, aliased, sessionmaker
except ImportError:
raise ImportError(
"sqlalchemy is required by zesty.zbs-api but needs to be vendored separately. Add postgres-utils to your project's requirements that depend on zbs-api.")
from ..actions import ZBSAction
from .BlockDevice import BlockDevice
from .common_base import Base, BaseMixin
from .Usage import Usage
class ManagedFsMixin:
fs_id = Column(VARCHAR, primary_key=True)
account_id = Column(VARCHAR, index=True, default=None)
account_uuid = Column(VARCHAR, index=True, default=None)
agent_update_required = Column(BOOLEAN, default=None)
btrfs_version = Column(VARCHAR, default=None)
cloud = Column(VARCHAR, default=None)
cloud_vendor = Column(VARCHAR, default=None)
cycle_period = Column(BIGINT, default=None)
delete_on_termination = Column(BOOLEAN, default=None)
devices = Column(JSON, default=None)
encrypted = Column(JSON, default=None)
existing_actions = Column(JSON, default=None)
expiredAt = Column(BIGINT, default=None)
fs_cost = Column(FLOAT, default=None)
fs_devices_to_count = Column(BIGINT, default=None)
fs_size = Column(BIGINT, default=None)
fs_type = Column(VARCHAR, default=None)
fs_usage = Column(BIGINT, default=None)
has_unallocated_space = Column(BOOLEAN, default=None)
inodes = Column(JSON, default=None)
instance_id = Column(VARCHAR, default=None)
instance_type = Column(VARCHAR, default=None)
is_ephemeral = Column(BOOLEAN, default=None)
is_partition = Column(BOOLEAN, default=None)
is_zesty_disk = Column(BOOLEAN, default=None)
label = Column(VARCHAR, default=None)
last_update = Column(BIGINT, default=None)
LV = Column(VARCHAR, default=None)
lvm_path = Column(VARCHAR, default=None)
mount_path = Column(VARCHAR, default=None)
name = Column(VARCHAR, default=None)
org_id = Column(VARCHAR, index=True)
partition_id = Column(VARCHAR, default=None)
partition_number = Column(BIGINT, default=None)
platform = Column(VARCHAR, default=None)
potential_savings = Column(FLOAT, default=None)
region = Column(VARCHAR, index=True)
resizable = Column(BOOLEAN, default=None)
space = Column(JSON, default=None)
tags = Column(JSON, default=None)
unallocated_chunk = Column(BIGINT, default=None)
update_data_ts = Column(BIGINT, default=0)
VG = Column(VARCHAR, default=None)
wrong_fs_alert = Column(BOOLEAN, default=None)
zesty_disk_iops = Column(BIGINT, default=None)
zesty_disk_throughput = Column(BIGINT, default=None)
zesty_disk_vol_type = Column(VARCHAR, default=None)
max_utilization_in_72_hrs = Column(BIGINT, default=None)
package_version = Column(VARCHAR, default=None)
autoupdate_last_execution_time = Column(VARCHAR, default=None)
policies = Column(JSON, default=None)
instance_tags = Column(JSON, default=None)
migration_uuid = Column(UUID(as_uuid=True), nullable=True)
is_manageable = Column(BOOLEAN, default=False)
iops_tps_vol_type_triggered = Column(BOOLEAN, default=False)
iops_tps_vol_type_change_ts = Column(BIGINT, nullable=True, default=None)
# dict for custom_order_by class method
col_to_actual_sorting_col = {
"policies": "policies_name",
"instance_tags": "instance_tags_keys"}
def __init__(
self,
fs_id: str,
account_id: str = None,
account_uuid: str = None,
agent_update_required: bool = None,
btrfs_version: str = None,
cloud: str = None,
cloud_vendor: str = None,
cycle_period: int = None,
delete_on_termination: bool = None,
devices: Dict[str, BlockDevice] = None,
encrypted: dict = None,
existing_actions: Dict[str, ZBSAction] = None,
expiredAt: int = None,
fs_cost: float = None,
fs_devices_to_count: int = None,
fs_size: int = None,
fs_type: str = None,
fs_usage: int = None,
has_unallocated_space: bool = None,
inodes: Dict[str, Usage] = None,
instance_id: str = None,
instance_type: str = None,
is_ephemeral: bool = None,
is_partition: bool = None,
is_zesty_disk: bool = None,
label: str = None,
last_update: int = None,
LV: str = None,
lvm_path: str = None,
mount_path: str = None,
name: str = None,
org_id: str = None,
partition_id: str = None,
partition_number: int = None,
platform: str = None,
potential_savings: float = None,
region: str = None,
resizable: bool = None,
space: Dict[str, Usage] = None,
tags: Dict[str, str] = None,
unallocated_chunk: int = None,
update_data_ts: int = 0,
VG: str = None,
wrong_fs_alert: bool = None,
zesty_disk_iops: int = None,
zesty_disk_throughput: int = None,
zesty_disk_vol_type: str = None,
max_utilization_in_72_hrs: int = None,
package_version: str = None,
autoupdate_last_execution_time: str = None,
statvfs_raw_data: Dict[str, str] = None, # unused to support initialization with **dict, do not remove
policies: Dict[str, dict] = None,
instance_tags: Dict[str, str] = None,
is_emr: bool = False, # unused to support initialization with **dict, do not remove
is_manageable: bool = False,
iops_tps_vol_type_triggered: bool = False,
iops_tps_vol_type_change_ts: Optional[int] = None,
**kwargs
):
self.fs_id = fs_id
self.account_id = account_id
self.account_uuid = account_uuid
self.agent_update_required = agent_update_required
self.btrfs_version = btrfs_version
if cloud is None and cloud_vendor is None:
self.cloud = 'Amazon'
self.cloud_vendor = 'Amazon'
elif cloud:
self.cloud = cloud
self.cloud_vendor = cloud
elif cloud_vendor:
self.cloud = cloud_vendor
self.cloud_vendor = cloud_vendor
self.cycle_period = cycle_period
self.delete_on_termination = delete_on_termination
self.devices = devices
if devices:
for dev in self.devices:
if isinstance(self.devices[dev], BlockDevice):
self.devices[dev] = self.devices[dev].asdict()
else:
self.devices[dev] = self.devices.get(dev, {})
self.encrypted = encrypted
if existing_actions:
for action in existing_actions:
self.existing_actions[action] = self.existing_actions[action].serialize(
)
self.expiredAt = expiredAt
self.fs_cost = fs_cost
self.fs_devices_to_count = fs_devices_to_count
self.fs_size = fs_size
self.fs_type = fs_type
self.fs_usage = fs_usage
self.has_unallocated_space = has_unallocated_space
self.inodes = inodes
self.instance_id = instance_id
self.instance_type = instance_type
self.is_ephemeral = is_ephemeral
self.is_partition = is_partition
self.is_zesty_disk = is_zesty_disk
self.label = label
if last_update:
self.last_update = last_update
else:
self.last_update = int(time.time()) - 60
self.LV = LV
self.lvm_path = lvm_path
self.mount_path = mount_path
self.name = name
self.org_id = org_id
self.partition_id = partition_id
self.partition_number = partition_number
self.platform = platform
self.potential_savings = potential_savings
self.region = region
self.resizable = resizable
self.space = space
self.tags = tags
self.unallocated_chunk = unallocated_chunk
self.update_data_ts = update_data_ts
self.VG = VG
self.wrong_fs_alert = wrong_fs_alert
self.zesty_disk_iops = zesty_disk_iops
self.zesty_disk_throughput = zesty_disk_throughput
self.zesty_disk_vol_type = zesty_disk_vol_type
self.max_utilization_in_72_hrs = max_utilization_in_72_hrs
self.package_version = package_version
self.autoupdate_last_execution_time = autoupdate_last_execution_time
self.policies = policies
self.instance_tags = instance_tags
self.is_manageable = is_manageable
self.iops_tps_vol_type_triggered = iops_tps_vol_type_triggered
self.iops_tps_vol_type_change_ts = iops_tps_vol_type_change_ts
def __repr__(self) -> str:
return f"{self.__tablename__}:{self.fs_id}"
def asdict(self) -> dict:
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def as_dict(self) -> dict:
return self.asdict()
# Custom filters
@classmethod
def policies_filter(cls, query: Query, value: str):
query = query.filter(
cast(cls.policies, String).contains(f'"name": "{value}"'))
return query
@classmethod
def instance_name_filter(cls, query: Query, value: str):
val = '%{}%'.format(value.replace("%", "\\%"))
query = query.filter(
case((cls.instance_tags == None, ''), else_=func.replace(cast(cls.instance_tags.op('->')('Name'), String), "\"", "")).ilike(val))
return query
# Custom query
@classmethod
def custom_query(cls, session: Union[Session, sessionmaker]) -> Query:
clsb = aliased(cls)
subq = session.query(func.json_object_keys(clsb.instance_tags))
q = session.query(cls)
q = q.add_columns(case((or_(cls.policies == None, cast(cls.policies, String) == 'null'), ''),
else_=cast(cls.policies, String).regexp_replace(r'.+"name":\s"([^"]+).+', "\\1"))
.label("policies_name"),
case((cls.instance_tags == None, ''),
else_=func.replace(cast(cls.instance_tags.op('->')('Name'), String), "\"", ""))
.label('instance_name'),
case((cast(cls.instance_tags, String) == 'null', []),
else_=func.array(subq.scalar_subquery().where(cls.fs_id == clsb.fs_id)))
.label('instance_tags_keys')
)
return q
@classmethod
def custom_order_by(cls, sorting_column: str, sorting_order: str) -> str:
actual_sorting_column = cls.col_to_actual_sorting_col.get(
sorting_column, sorting_column)
return f"{actual_sorting_column} {sorting_order}"
class ManagedFs(ManagedFsMixin, BaseMixin, Base):
__tablename__ = "managed_filesystems"
class MigrationStatus(Enum):
Active = auto()
Aborting = auto()
Aborted = auto()
Completed = auto()
Failed = auto()
class RunningMigrations(BaseMixin, Base):
__tablename__ = "active_migration"
fs_id = Column(VARCHAR)
migration_uuid = Column(UUID(as_uuid=True), nullable=False, primary_key=True)
finished_at = Column(TIMESTAMP, nullable=True)
account_id = Column(VARCHAR, default=None)
region = Column(VARCHAR(255))
reboot = Column(BOOLEAN, default=False)
# array of day numbers when reboot is allowed 0-6
days = Column(ARRAY(VARCHAR))
# timeframe from-to in %I:%M %p
from_ = Column(VARCHAR)
to = Column(VARCHAR)
status = Column(sa_ENUM(MigrationStatus), nullable=False, server_default=MigrationStatus.Active.name)
is_rebooting = Column(BOOLEAN, default=False) # TODO: can this be deleted?
snapshot_id = Column(VARCHAR(255))
snapshot_remove_after = Column(INT, nullable=True) # in days
snapshot_create_started_at = Column(TIMESTAMP, nullable=True)
snapshot_deleted_at = Column(TIMESTAMP, nullable=True)
ebs_id = Column(VARCHAR(255))
ebs_remove_after = Column(INT, nullable=True) # in days
ebs_detached_at = Column(TIMESTAMP, nullable=True)
ebs_deleted_at = Column(TIMESTAMP, nullable=True)
def __init__(
self,
fs_id: str,
migration_uuid: _UUID,
account_id: str = None,
region: str = None,
days: Optional[List[int]] = None,
from_: Optional[str] = None,
to: Optional[str] = None,
reboot: bool = False,
status: MigrationStatus = MigrationStatus.Active,
ebs_remove_after: int = 1,
snapshot_remove_after: int = 7):
self.migration_uuid = migration_uuid
self.fs_id = fs_id
self.account_id = account_id
self.region = region
self.days = days
self.from_ = from_
self.to = to
self.reboot = reboot
self.status = status
self.ebs_remove_after = ebs_remove_after
self.snapshot_remove_after = snapshot_remove_after
@staticmethod
def new_migration(
fs_id,
days: Optional[List[int]] = None,
from_: Optional[str] = None,
to: Optional[str] = None,
reboot: bool = False,
ebs_remove_after: int = 1,
snapshot_remove_after: int = 7) -> 'RunningMigrations':
return RunningMigrations(
fs_id,
uuid4(),
days, from_,
to, reboot,
ebs_remove_after,
snapshot_remove_after,
)
class MigrationHistory(BaseMixin, Base):
__tablename__ = "migration_history"
time_start = Column(TIMESTAMP)
time_end = Column(TIMESTAMP)
status = Column(VARCHAR)
phase = Column(VARCHAR, primary_key=True)
progress = Column(FLOAT)
completed = Column(BOOLEAN)
failed = Column(BOOLEAN)
failure_reason = Column(VARCHAR)
fs_id = Column(VARCHAR)
migration_uuid = Column(UUID(as_uuid=True), ForeignKey("active_migration.migration_uuid", ondelete="CASCADE"),
nullable=False, primary_key=True, index=True)
# should be returned from the agent in seconds
estimated = Column(INT)
name = Column(VARCHAR)
weight = Column(INT)
abortable = Column(BOOLEAN)
index = Column(INT, primary_key=True)
def __init__(
self,
status: str,
phase: str,
progress: int,
eta: int,
name: str,
weight: int,
abortable: bool,
start_time: int,
end_time: int,
migration_uuid: 'UUID',
fs_id: str,
index: int):
self.status = status
self.phase = phase
self.progress = progress
self.estimated = eta
self.name = name
self.weight = weight
self.time_start = start_time
self.time_end = end_time
self.abortable = abortable
self.index = index
self.migration_uuid = migration_uuid
self.fs_id = fs_id
class WrongActionException(Exception):
pass
class MigrationActions(BaseMixin, Base):
__tablename__ = "migration_actions"
id = Column(INT, primary_key=True, autoincrement=True)
fs_id = Column(VARCHAR)
migration_uuid = Column(UUID(as_uuid=True), ForeignKey("active_migration.migration_uuid", ondelete="CASCADE"),
nullable=False)
action = Column(VARCHAR)
value = Column(VARCHAR)
allowed_actions = ['start', 'reboot', 'reboot_now', 'abort']
def __init__(self, fs_id, migration_uuid, action, value):
self.fs_id = fs_id
self.migration_uuid = migration_uuid
self.set_action(action)
self.value = value
def set_action(self, action):
if action not in self.allowed_actions:
raise WrongActionException
self.action = action
def create_tables(engine: engine.base.Engine) -> None:
Base.metadata.create_all(engine, checkfirst=True) | zesty.zbs-api | /zesty.zbs-api-1.0.2023.8.29.1693309720.tar.gz/zesty.zbs-api-1.0.2023.8.29.1693309720/src/models/ManagedFS.py | ManagedFS.py |
import json
import time
from datetime import datetime
from typing import Dict, Union
from .common_base import Base, BaseMixin
try:
from sqlalchemy import (Column, PrimaryKeyConstraint, String, case, cast,
engine, func, or_, select, text)
from sqlalchemy.dialects.postgresql import (BIGINT, BOOLEAN, FLOAT, JSON,
VARCHAR)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Query, Session, aliased, sessionmaker
except ImportError:
raise ImportError(
"sqlalchemy is required by zesty.zbs-api but needs to be vendored separately. Add postgres-utils to your project's requirements that depend on zbs-api.")
class InstancesTags(BaseMixin, Base):
__tablename__ = "instances_tags"
instance_id = Column(VARCHAR, primary_key=True)
account_id = Column(VARCHAR, index=True, default=None)
account_uuid = Column(VARCHAR, index=True, default=None)
instance_name = Column(VARCHAR, default=None)
instance_tags = Column(JSON, default=None)
expired_at = Column(BIGINT, default=None)
__table_args__ = (
PrimaryKeyConstraint('instance_id', name='instances_tags_pkey'),)
def __init__(
self,
instance_id: str,
account_id: str = None,
account_uuid: str = None,
instance_name: str = None,
instance_tags: dict = None,
expired_at: int = None
):
self.instance_id = instance_id
self.account_id = account_id
self.account_uuid = account_uuid
self.instance_name = instance_name
self.instance_tags = instance_tags
self.expired_at = expired_at or int(datetime.utcnow().timestamp()) + 3 * 3600
def __eq__(self, other) -> bool:
return self.__hash__() == other.__hash__()
def __hash__(self) -> int:
return hash(''.join(map(lambda c: getattr(self, c.name) or '',
filter(lambda c: c.name not in ['instance_tags', 'expired_at', 'created_at', 'updated_at'],
self.__table__.columns))) +
json.dumps(self.instance_tags))
def __repr__(self) -> str:
return f"{self.__tablename__}:{self.instance_id}"
def asdict(self) -> dict:
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def as_dict(self) -> dict:
return self.asdict()
def create_tables(engine: engine.base.Engine) -> None:
Base.metadata.create_all(engine, checkfirst=True) | zesty.zbs-api | /zesty.zbs-api-1.0.2023.8.29.1693309720.tar.gz/zesty.zbs-api-1.0.2023.8.29.1693309720/src/models/InstancesTags.py | InstancesTags.py |
import json
import traceback
from decimal import Decimal
from typing import Dict
from zesty.id_handler import create_zesty_id
GB_IN_BYTES = 1024**3
class BlockDevice:
def __init__(
self,
size: int,
btrfs_dev_id: str = None,
cloud_vendor: str = 'Amazon',
created: str = None,
dev_usage: int = None,
iops: int = None,
throughput: int = None,
lun: int = None,
map: str = None,
iops_stats: Dict[str, int] = None,
parent: str = None,
unlock_ts: int = 0,
volume_id: str = None,
volume_type: str = None,
device: str = None,
extendable: bool = True,
removable: bool = True
):
"""
Block Device class doc:
:param size: Size of the device in Bytes
:param btrfs_dev_id: ID of the device inside the BTRFS structure
:param cloud_vendor: Cloud vendor (AWS/Azure/GCP)
:param created: Device creation date
:param dev_usage: How much of the device is in use (in Bytes)
:param iops: Device IOPS amount
:param lun: LUN number (Only for Azure)
:param map: The mount slot of the device inside the OS
:param iops_stats: Dict with IOPS statistics
:param parent: If it's a partition so this one represent the parent device
:param unlock_ts: TS when the device will be ready to be extended again
:param volume_id: Device ID
:param volume_type: Type of the device in the cloud
:param device: Device mount slot from the cloud
:param extendable: Whether ZestyDisk Handsfree logic is allowed to extend the device
:param removable: Whether ZestyDisk Handsfree logic is allowed to remove the device from the filesystem
"""
# Init empty dict here instead of passing as default value
iops_stats = {} if iops_stats is None else iops_stats
self.size = size
self.cloud_vendor = cloud_vendor
try:
self.volume_id = create_zesty_id(
cloud=self.cloud_vendor,
resource_id=volume_id
)
except:
self.volume_id = volume_id
self.btrfs_dev_id = btrfs_dev_id
self.created = created
self.dev_usage = dev_usage
self.iops = iops
self.throughput = throughput
self.lun = lun
self.map = map
self.iops_stats = iops_stats
if device:
self.device = device
if parent:
self.parent = parent
if not unlock_ts:
self.unlock_ts = 0
else:
self.unlock_ts = unlock_ts
self.volume_type = volume_type
self.extendable = extendable
self.removable = removable
def as_dict(self) -> dict:
return_dict = json.loads(json.dumps(self, default=self.object_dumper))
return {k: v for k, v in return_dict.items() if v is not None}
@staticmethod
def object_dumper(obj) -> dict:
try:
return obj.__dict__
except AttributeError as e:
if isinstance(obj, Decimal):
return int(obj)
print(f"Got exception in object_dumper value: {obj} | type : {type(obj)}")
print(traceback.format_exc())
return obj
def serialize(self) -> dict:
return self.as_dict()
def __repr__(self) -> str:
return str(self.as_dict()) | zesty.zbs-api | /zesty.zbs-api-1.0.2023.8.29.1693309720.tar.gz/zesty.zbs-api-1.0.2023.8.29.1693309720/src/models/BlockDevice.py | BlockDevice.py |
from abc import ABC, abstractmethod
import enum
from typing import TYPE_CHECKING, Dict
if TYPE_CHECKING:
from ..actions import ZBSAction
pass
class ISpecialInstructions(ABC):
pass
class IActionHF(ABC):
class Status(enum.Enum):
NEW = 1
PENDING = 2
RUNNING = 3
CANCELED = 4
READY = 5
HOLDING = 6
REVERT = 7
PAUSE = 8 # should stop
STOPPED = 9 # action stopped
@abstractmethod
def get_action_id(self) -> str:
raise NotImplementedError(
"ActionHF 'get_action_id' is abstract, please implement")
@abstractmethod
def get_action_type(self) -> str:
raise NotImplementedError(
"ActionHF 'get_action_type' is abstract, please implement")
@abstractmethod
def get_status(self) -> Status:
raise NotImplementedError(
"ActionHF 'get_status' is abstract, please implement")
@abstractmethod
def set_status(self, status: Status):
raise NotImplementedError(
"ActionHF 'set_status' is abstract, please implement")
@abstractmethod
def get_special_instructions(self) -> ISpecialInstructions:
raise NotImplementedError(
"ActionHF 'get_special_instructions' is abstract, please implement")
@abstractmethod
def set_special_instructions(self, special_instructions: ISpecialInstructions):
raise NotImplementedError(
"ActionHF 'set_special_instructions' is abstract, please implement")
class IDeviceHF(ABC):
@abstractmethod
def get_dev_id(self) -> str:
raise NotImplementedError(
"DeviceHF 'get_dev_id' is abstract, please implement")
@abstractmethod
def get_size(self) -> int:
raise NotImplementedError(
"DeviceHF 'get_size' is abstract, please implement")
@abstractmethod
def get_usage(self) -> int:
raise NotImplementedError(
"DeviceHF 'get_usage' is abstract, please implement")
@abstractmethod
def get_unlock_ts(self) -> int:
raise NotImplementedError(
"DeviceHF 'get_unlock_ts' is abstract, please implement")
class IFileSystemHF(ABC):
@abstractmethod
def get_fs_id(self) -> str:
raise NotImplementedError(
"IFileSystemHF 'get_fs_id' is abstract, please implement")
@abstractmethod
def get_devices(self) -> Dict[str, IDeviceHF]:
raise NotImplementedError(
"IFileSystemHF 'get_devices' is abstract, please implement")
@abstractmethod
def get_existing_actions(self) -> Dict[str, IActionHF]:
raise NotImplementedError(
"IFileSystemHF 'get_existing_actions' is abstract, please implement") | zesty.zbs-api | /zesty.zbs-api-1.0.2023.8.29.1693309720.tar.gz/zesty.zbs-api-1.0.2023.8.29.1693309720/src/models/hf_interface.py | hf_interface.py |
# Zet CLI
A Zettlekasten helper utility.
## Installation
1. Clone the repository
```
git clone https://github.com/mattdood/zet-cli.git zet-cli
```
1. Install the cloned repository via pip from the cloned folder
```
cd path/to/install
python3 -m pip install -e zet-cli
```
## Usage
The commands are well documented using `--help`.
```
zet --help
```
## Concepts
This note taking tool has a few concepts and vocabulary words that should be
understood before utilizing the various commands that are offered.
### Notes
A "zet" is a notes file that is created and stored in a "repo" (folder).
These notes are in Markdown format; however, user created templates
can be created that have different formats.
Any containing assets for a note (images, gifs, etc.) are recommended
to be stored in the folder created specifically for that note. This
allows local references within the Markdown file and helps with
organization when repos contain many zets.
### Repos (Storage)
Each zet file is stored in a date-time folder hierarchy.
Example execution:
```
zet create -t "sample title" -c "sample" -tag "test, test1"
```
Folder structure created:
```
zets/
2022/
06/
01/
20220601120100/
sample-title-20220601120100.md
```
Users can have multiple repos, each with their own zets.
Zets are stored with categories and tags as metadata. Based on the
above sample, the file would have the following information:
```
---
path: '/2022/6/sample-title-20220601120100'
title: 'sample title'
category: 'sample'
tags: ['test', 'test1']
---
# sample title
```
### Templates
A template is provided with the default installation (named "default").
This is referenced within the settings file (`~/zets/.env/.local.json`)
when calling the `zet create` command.
The template can be customized at the path that it is referenced in the
settings file; however, users are encouraged to only modify copies of the template.
For users that wish to provide their own templates, these can be created
then added to the settings file with a path that points to that template.
The settings section goes into greater detail regarding things like defaults
and concepts about modifying default command behavior.
Creating new templates is typically a good idea if other file formats are required,
or if there are fields in the default template that you would like to omit.
**Currently supported fields:**
```
path: 'templatePath'
title: 'templateTitle'
date: 'templateDate'
category: 'templateCategory'
tags: templateTags
```
The `templatePath` is useful for blogging, it has a less verbose structure
than the folder layouts provided by the `zet create` option.
### Git commands
The Zet-CLI offers wrappers around common Git commands to encourage
versioning of notes utilizing Git. This helps to track changes in the notes
repositories over time, while offering simple wrappers to reference repository
locations by name rather than managing the git operations from within the
containing folder.
### Settings
Users have local settings generated at runtime of the CLI. This ensures that
default settings exist and that the folder structure is consistent across installations.
These settings can be modified to change default behaviors, or even copied over from
other installations on separate machines.
**Note:** A potential solution to having multiple solutions may be storing the settings
in a private Gist (if on GitHub) to better keep these installations "in sync".
#### Defaults
The application utilizes defaults to check for things like editors, reduce the
need to specify a specific repo on every command, and determine a template to use
for creating a zet file.
**Note:** The default editor setting is [Neovim](https://neovim.io/).
#### Repos
The repos known to the CLI are referenced here. Repos can exist outside of the
installation directory (`~/zets/`)
Default template names can be altered within the repo record in the settings file.
There is not a CLI option for this.
#### Templates
Templates are used as a base form when creating a new zet. These are copied
and renamed in-place when creating a directory to hold a new zet file. To create
your own templates, utilize the same delimeter pattern (`---`) then place your
corresponding data keys into the file.
These templates do not have to live inside the installation pathway; however,
for organization it is encouraged. A good idea would be to create a `templates/`
directory inside of the environment variables folder (`.env/templates/`).
Templates are referenced by name from the settings file, if you prefer a new default
template then simply change the `defaults` section of the settings file to reference
the name of your new template.
When a template is added to the settings file it will become available in the
CLI for creating zets.
**Note:** All templates need their full directory listed in settings. This should
include an absolute reference.
Example:
```
"templates": {
"default": ...,
"my_template": "~/zets/.env/templates/my-template.md"
}
```
## Running tests
To run the test suite we need to tell the settings to use a different installation
location or we'll run into clashing with any other installations. This could
result in deleting your note repos, settings, etc.
Running the test suite with a `ZET_STAGE=test` will ensure the installation
pathway of the test objects is inside the project, where teardown can safely take place.
```bash
ZET_STAGE=test pytest -vv -s
```
| zet-cli | /zet-cli-0.0.1.tar.gz/zet-cli-0.0.1/README.md | README.md |
import subprocess
from .settings import Settings
settings = Settings()
def git_init_zets(zet_repo: str = None):
"""Initializes a git repo.
Params:
zet_repo (str): A zet repo name.
Defaults to ZET_DEFAULT_FOLDER.
Returns:
subprocess (Pipe): Output is the terminal
messages of the bash command.
"""
if zet_repo:
repo = settings.get_repo_path(zet_repo)
else:
# default repo
repo = settings.get_default_repo_path()
return subprocess.check_output(['git', 'init'], cwd = repo)
def git_add_zets(zet_repo: str = None):
"""Adds all files to staging in a repo.
Params:
zet_repo (str): A zet repo name.
Defaults to ZET_DEFAULT_FOLDER.
folder (Dict[str, str]): A dictionary
of zet folders. Defaults to ZET_FOLDERS.
Returns:
subprocess (Pipe): Output is the terminal
messages of the bash command.
"""
if zet_repo:
repo = settings.get_repo_path(zet_repo)
else:
# default repo
repo = settings.get_default_repo_path()
return subprocess.check_output(['git', 'add', '.'], cwd = repo)
def git_commit_zets(message: str, zet_repo: str = None):
"""Performs git commit in a repo.
Params:
message (str): The commit message.
zet_repo (str): A zet repo name.
Defaults to ZET_DEFAULT_FOLDER.
folder (Dict[str, str]): A dictionary
of zet folders. Defaults to ZET_FOLDERS.
Returns:
subprocess (Pipe): Output is the terminal
messages of the bash command.
"""
if zet_repo:
repo = settings.get_repo_path(zet_repo)
else:
# default repo
repo = settings.get_default_repo_path()
return subprocess.check_output(['git', 'commit', '-m', message], cwd = repo)
def git_push_zets(zet_repo: str = None):
"""Remote pushes a zet repo.
Params:
zet_repo (str): A zet repo name.
Defaults to ZET_DEFAULT_FOLDER.
folder (Dict[str, str]): A dictionary
of zet folders. Defaults to ZET_FOLDERS.
Returns:
subprocess (Pipe): Output is the terminal
messages of the bash command.
"""
if zet_repo:
repo = settings.get_repo_path(zet_repo)
else:
# default repo
repo = settings.get_default_repo_path()
return subprocess.check_output(['git', 'push'], cwd = repo)
def git_pull_zets(zet_repo: str = None) -> None:
"""Pulls all changes for every repo.
Params:
folder (Dict[str, str]): A dictionary
of zet folders. Defaults to ZET_FOLDERS.
"""
if zet_repo:
repo = settings.get_repo_path(zet_repo)
else:
# default repo
repo = settings.get_default_repo_path()
subprocess.check_output(['git', 'pull'], cwd = repo) | zet-cli | /zet-cli-0.0.1.tar.gz/zet-cli-0.0.1/src/zet/git_commands.py | git_commands.py |
import os
from typing import List
from .settings import Settings
settings = Settings()
class RepoDoesNotExistException(Exception):
"""Repository path does not exist."""
pass
class Repo:
"""Representation of a notes repository.
Each repo has zets organized in a datewise fashion.
This represents all known repositories from settings,
with an interface to interact with them.
Note: Repos are not deleted through this interface.
That is left up to the user.
"""
def __init__(self) -> None:
self.repos = settings.get_repos()
def add_repo(self,
zet_repo: str,
zet_path: str = None,
template: str = None) -> None:
"""Adds a new repo (folder) and appends to the env file.
Params:
zet_repo (str): A zet repo name to
append to an existing env file.
The folder is created.
zet_path (str|None): The path to a new
zet repo. This is the parent folder, defaults
to the installation location.
template (str|None): A default template to
use for the new repository.
Returns:
None
"""
# zet folder naming setup
# and creation
clean_zet_repo = zet_repo.replace(' ', '_')
zet_repo_path = os.path.join(
zet_path if zet_path else settings.install_path,
clean_zet_repo
)
if not os.path.exists(zet_repo_path):
os.makedirs(zet_repo_path)
# settings repo update with new
# folder that was just created
new_repo = {
clean_zet_repo: {
"folder": zet_repo_path,
"template": template if template else settings.get_default_template()
}
}
settings.append_setting("zet_repos", new_repo)
def list_zets(self, zet_repo: str = None, full_path: bool = False) -> List[str]:
"""Lists zets.
This will be a catch-all for listing
zets based on different argument structures.
Params:
folder (str): Folder to search.
full_path (bool): Determines if full file paths will
be provided. Defaults to False.
Returns:
zets (List[str]): List of zets.
Raises:
RepoDoesNotExistException
"""
if zet_repo:
repos = [settings.get_repo_path(zet_repo)]
else:
# all repos
repos = [repo["folder"] for repo in settings.get_repos()]
zet_list = []
for repo in repos:
if not os.path.exists(repo):
raise RepoDoesNotExistException("Repo does not exist.")
if full_path:
for root, dirs, files in os.walk(repo):
for file in files:
full_file_path = os.path.join(root, file)
zet_list.append(full_file_path)
else:
for root, dirs, files in os.walk(repo):
for file in files:
zet_list.append(file)
return zet_list | zet-cli | /zet-cli-0.0.1.tar.gz/zet-cli-0.0.1/src/zet/repo.py | repo.py |
import json
import os
import shutil
from pathlib import Path
from typing import Dict, List
# Project install defaults
ZET_PROJECT = Path(__file__)
ZET_HOME = ZET_PROJECT.parents[2]
# Check env stage (test)
if os.environ.get("ZET_STAGE") == "test":
ZET_INSTALL_PATH = ZET_HOME / "zet/"
else:
ZET_INSTALL_PATH = Path.home() / "zet/"
class Settings:
"""Object to interact with `.local.json`.
The settings for this project are created
at runtime if they don't exist already. This ensures
that the user has a defaulted config upon installation,
this can be replaced later at the `~/zet/.env/.local.json`
path.
Settings are stored in JSON to allow flexibility,
this means that there are some occassions where
the settings will change during execution and require
refreshing from the file.
To conserve space elsewhere and keep references DRY there
are quite a few getter methods to allow access to the
underlying configuration file.
"""
def __init__(self, install_path: Path = ZET_INSTALL_PATH) -> None:
"""Initializes all settings from a given `.local.json` file.
If the file does not exist it will be created from the
`.env/.example.json` file to initialize basic settings.
Users that already have a previous installation can copy
their existing file over the one that the installation
creates.
Params:
install_path (Path): Path to install the application
settings. This can be changed, really only for
testing. Defaults to `~/zets/`
Returns:
None
"""
self.zet_local_env_folder = install_path / ".env/"
self.zet_local_env_path = self.zet_local_env_folder / ".local.json"
if not install_path.exists():
example_settings = ZET_HOME / ".env/.example.json"
os.makedirs(self.zet_local_env_folder)
shutil.copyfile(example_settings, self.zet_local_env_path)
self.install_path = install_path
self.data = self.load_settings(self.zet_local_env_path)
# after initial setup the template and default repo need
# to have a path discovery, then add them to the config
if self.data["templates"]["default"] == "":
keys = ["templates", "default"]
value = ZET_HOME / "src/zet/templates/readme.md"
self.update_setting(keys, value.as_posix())
if self.data["zet_repos"]["zets"]["folder"] == "":
keys = ["zet_repos", "zets", "folder"]
value = ZET_INSTALL_PATH / "zets"
self.update_setting(keys, value.as_posix())
def refresh(self):
"""Checks for settings changes.
If an execution creates a change in the
settings, then relies on that change during
the remainder of the process it will need
to reference an up-to-date set of data.
This ensures all data is kept in-line with the
JSON file.
Example:
1. User installs for the first time
1. Executing a `zet create` immediately means there
are no template paths because the initial data load
did not have one (env is being set up).
1. Refreshing enables the user to have that change caught
during execution time. (See `Zet.create()`)
"""
self.data = self.load_settings(self.zet_local_env_path)
return self
@staticmethod
def load_settings(path: Path) -> Dict:
"""Load settings from the JSON file.
Params:
path (Path): Path to a settings file.
Returns:
data (Dict): Dictionary of all settings.
"""
with path.open("r") as file:
data = json.load(file)
return data
def get_setting(self, key: str = None) -> Dict:
"""Fetches a block of settings.
Params:
key (str): A settings key to `.local.json`.
Returns:
settings (Dict): The top-level settings
for a key. Defaults to all settings.
"""
if key:
return self.data[key]
else:
return self.data
def get_defaults(self) -> Dict:
"""Returns all default settings."""
return self.data["defaults"]
def set_item(self, settings, keys: List[str], value) -> None:
"""Recursively check settings against keys.
Recurses a list of keys to arrive at
the final key, then set the value to
something new.
"""
key = keys.pop(0)
try:
self.set_item(settings[key], keys, value)
except (IndexError, KeyError):
settings[key] = value
def update_setting(self, keys: List[str], value) -> None:
"""Updates a setting.
Changes the underlying JSON config
by traveling down a list of keys
(in order) to update the destination value.
TODO:
* This can probably be revised.
"""
# retrieve data from settings
settings_file = self.zet_local_env_path.open("r+")
data = json.load(settings_file)
# set new data values
self.set_item(data, keys, value)
# dump data to file
settings_file.seek(0)
json.dump(data, settings_file, indent=4)
settings_file.close()
# If settings are still used after updating
# we need to refresh it or they won't be
# recognized
self.refresh()
def append_setting(self, key: str, value) -> None:
"""Adds a new entry to a setting.
This allows for things like new repos,
and templates.
"""
# retrieve data from settings
settings_file = self.zet_local_env_path.open("r+")
config_data = json.load(settings_file)
settings_file.close()
# update value
data = config_data[key]
data.update(value)
# dump data to file
with self.zet_local_env_path.open("w") as settings_file:
json.dump(config_data, settings_file, indent=4)
settings_file.close()
def get_default_repo(self) -> str:
"""Returns folder path of default repo."""
return self.data["defaults"]["repo"]
def get_default_repo_path(self) -> str:
"""Returns folder path of default repo."""
return self.data["zet_repos"][self.data["defaults"]["repo"]]["folder"]
def get_templates(self) -> Dict:
"""Returns all templates."""
return self.data["templates"]
def get_template_names(self) -> List[str]:
"""Returns all template names."""
return self.data["templates"].keys()
def get_default_template(self) -> str:
"""Returns the default template."""
return self.data["defaults"]["template"]
def get_default_template_path(self) -> str:
"""Returns the default template."""
return self.data["templates"]["default"]
def get_template_path(self, template_name: str) -> str:
"""Returns the path of a template file.
Params:
template_name (str): Name of a template file.
Returns:
path (str): The path to a template.
"""
return self.data["templates"][template_name]
def get_repo_template_path(self, repo_name: str) -> str:
"""Returns the path of a template file from a repo name.
Params:
repo_name (str): Name of a repository.
Returns:
path (str): The path to a template.
"""
return self.data["templates"][self.data["zet_repos"][repo_name]["template"]]
def get_repo_path(self, repo_name: str) -> str:
"""Returns a repo path from a repo name.
Params:
repo_name (str): Name of a repository.
Returns:
path (str): The path to a repository folder.
"""
return self.data["zet_repos"][repo_name]["folder"]
def get_repos(self) -> Dict:
"""Returns all repos.
Returns:
repos (Dict): The settings for all
repositories.
"""
return self.data["zet_repos"]
def get_repo_names(self) -> List[str]:
"""Returns all repo names.
Returns:
repos (List[str]): The settings for all
repository names.
"""
return self.data["zet_repos"].keys()
def get_editor_command(self) -> str:
"""Returns the command to open an editor."""
return self.data["defaults"]["editor"]["command"] | zet-cli | /zet-cli-0.0.1.tar.gz/zet-cli-0.0.1/src/zet/settings.py | settings.py |
import argparse
import inspect
import pprint
import textwrap
from typing import Optional, Sequence
from .editor_commands import open_editor
from .git_commands import (git_add_zets, git_commit_zets, git_init_zets,
git_pull_zets, git_push_zets)
from .repo import Repo
from .settings import Settings
from .zet import Zet, bulk_import_zets
# Classes are instantiated to avoid
# doing discovery with the `__qualname__` property
# then instantiating them afterward. This is just easier.
# See:
# * `__qualname__` - https://peps.python.org/pep-3155/
# * Classes from strs - https://stackoverflow.com/questions/1176136/convert-string-to-python-class-object
# Note: None of these classes need args.
FUNCTION_MAP = {
# Zet commands
"create": Zet.create,
# Repo commands
"list": Repo().list_zets,
"add_repo": Repo().add_repo,
# Git commands
"add": git_add_zets,
"commit": git_commit_zets,
"init": git_init_zets,
"pull": git_pull_zets,
"push": git_push_zets,
# Editor commands
"editor": open_editor,
}
settings = Settings()
def main(argv: Optional[Sequence[str]] = None) -> int:
"""
TODO:
* list repos should have a choice of 1 or all
* templates should have a list option for all template
names with paths
* there should be a pretty printer for all options
that print things
"""
parser = argparse.ArgumentParser(
prog="zet",
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(f"""
Zettlekasten command line tools.
This tool creates, interacts with, and helps to
organize individual notes. A "zet" is seen as an
individual notes file that is stored in a "repo" (folder).
Installation path: `~/zets/`
Default notes repo: `{settings.get_default_repo_path()}`
Environment variables: `~/zets/.env/.local.json`
"""),
)
subparsers = parser.add_subparsers(help="sub-command help", dest="command")
parser_create = subparsers.add_parser(
"create",
help="""Creates a zet file.
The file has "metadata" added into the template
based on the parameters passed to each argument.
""",
)
parser_create.add_argument(
"-t",
"--title",
action="store",
type=str,
required=True,
help="""A zet title.
Used to create a title in the file and
generate a unique filename using a timestamp
and hyphen (-) separated naming convention.
Timestamps are in yyyyMMddHHmmS format.
Example:
`-t "some title"` becomes "some-title-20220102120051.md"
"""
)
parser_create.add_argument(
"-c",
"--category",
action="store",
type=str,
required=True,
help="A zet category."
)
parser_create.add_argument(
"-tag",
"--tags",
action="store",
type=str,
required=True,
help="""A set of zet tags. Format is `str` but will be
parsed from a comma separated list.
Example:
`-t 'tag, tag, tag'`
"""
)
parser_create.add_argument(
"-tem",
"--template",
action="store",
default=settings.get_default_template(),
const=settings.get_default_template(),
nargs="?",
choices=settings.get_template_names(),
help="""A zet template name.
Defaults to "%(default)s".
"""
)
parser_create.add_argument(
"-r",
"--zet_repo",
action="store",
default=settings.get_default_repo(),
const=settings.get_default_repo(),
nargs="?",
choices=settings.get_repo_names(),
help="""A zet repo folder name. Defaults to "%(default)s".
This option is available for all sub-commands.
""",
)
parser_create.set_defaults(which="create")
parser_bulk = subparsers.add_parser("bulk", help="Bulk imports zets from a folder.")
parser_bulk.add_argument(
"-f",
"--files_folder",
action="store",
type=str,
required=True,
help="A folder of files to copy."
)
parser_bulk.add_argument(
"-r",
"--zet_repo",
action="store",
default=settings.get_default_repo(),
const=settings.get_default_repo(),
nargs="?",
choices=settings.get_repo_names(),
help="""A zet repo folder name. Defaults to "%(default)s".
This option is available for all sub-commands.
""",
)
parser_bulk.set_defaults(which="bulk")
parser_add_repo = subparsers.add_parser(
"add_repo",
help="""Creates a zet repo.
Repos are folders that store zets. Separate repos
are used to organize notes at a higher level than
categories/tags.
This could be useful for separating
things like general/personal notes from work-specific
knowledge.
""",
)
parser_add_repo.add_argument(
"-r",
"--zet_repo",
action="store",
required=True,
default=settings.get_default_repo(),
help="A repo folder name."
)
parser_add_repo.add_argument(
"-tem",
"--template",
action="store",
default=settings.get_default_template(),
const=settings.get_default_template(),
nargs="?",
choices=settings.get_template_names(),
help="""Template to use in this repo.
Template to assign to the newly created repo.
Defaults to "%(default)s".
"""
)
parser_add_repo.add_argument(
"-f",
"--zet_path",
action="store",
default="~/zets/",
help="""A new zet folder path.
Defaults to the `~/zets/` installation directory.
Only use this if you need your zets to be stored separately
from where the installation directory is. Not advised for
general use, as it breaks the organization design of the tool.
"""
)
parser_add_repo.set_defaults(which="add_repo")
parser_list = subparsers.add_parser("list", help="List zets from a folder.")
parser_list.add_argument(
"-full",
"--full_path",
action="store",
default=False,
help="Full paths to zets. Defaults to false.",
)
parser_list.add_argument(
"-r",
"--zet_repo",
action="store",
default=settings.get_default_repo(),
const=settings.get_default_repo(),
nargs="?",
choices=settings.get_repo_names(),
help="""A zet repo folder name. Defaults to "%(default)s".
This option is available for all sub-commands.
""",
)
parser_list.set_defaults(which="list")
parser_git_init = subparsers.add_parser("init", help="Git init inside a repo.")
parser_git_init.add_argument(
"-r",
"--zet_repo",
action="store",
default=settings.get_default_repo(),
const=settings.get_default_repo(),
nargs="?",
choices=settings.get_repo_names(),
help="""A zet repo folder name. Defaults to "%(default)s".
This option is available for all sub-commands.
""",
)
parser_git_init.set_defaults(which="init")
parser_git_add = subparsers.add_parser(
"add",
help="Git add all untracked zets inside a repo.",
)
parser_git_add.add_argument(
"-r",
"--zet_repo",
action="store",
default=settings.get_default_repo(),
const=settings.get_default_repo(),
nargs="?",
choices=settings.get_repo_names(),
help="""A zet repo folder name. Defaults to "%(default)s".
This option is available for all sub-commands.
""",
)
parser_git_add.set_defaults(which="add")
parser_git_commit = subparsers.add_parser("commit", help="Git commit zets in a repo.")
parser_git_commit.add_argument(
"-m",
"--message",
action="store",
default="",
help="Commit message. Defaults to none."
)
parser_git_commit.add_argument(
"-r",
"--zet_repo",
action="store",
default=settings.get_default_repo(),
const=settings.get_default_repo(),
nargs="?",
choices=settings.get_repo_names(),
help="""A zet repo folder name. Defaults to "%(default)s".
This option is available for all sub-commands.
""",
)
parser_git_commit.set_defaults(which="commit")
parser_git_push = subparsers.add_parser("push", help="Git push zets in a repo.")
parser_git_push.add_argument(
"-r",
"--zet_repo",
action="store",
default=settings.get_default_repo(),
const=settings.get_default_repo(),
nargs="?",
choices=settings.get_repo_names(),
help="""A zet repo folder name. Defaults to "%(default)s".
This option is available for all sub-commands.
""",
)
parser_git_push.set_defaults(which="push")
parser_git_pull = subparsers.add_parser("pull", help="Git pull zet repo.")
parser_git_pull.add_argument(
"-r",
"--zet_repo",
action="store",
default=settings.get_default_repo(),
const=settings.get_default_repo(),
nargs="?",
choices=settings.get_repo_names(),
help="""A zet repo folder name. Defaults to "%(default)s".
This option is available for all sub-commands.
""",
)
parser_git_pull.set_defaults(which="pull")
parser_open_editor = subparsers.add_parser("editor", help="Open the editor to a repo.")
parser_open_editor.add_argument(
"-r",
"--zet_repo",
action="store",
default=settings.get_default_repo(),
const=settings.get_default_repo(),
nargs="?",
choices=settings.get_repo_names(),
help="""A zet repo folder name. Defaults to "%(default)s".
This option is available for all sub-commands.
""",
)
parser_open_editor.set_defaults(which="editor")
args = parser.parse_args(argv)
pprint.pprint(vars(args))
if args.command:
# Map arg to command
func = FUNCTION_MAP[args.command]
# Filter argparse specific keys from
# argument values to only ones used
# in the function call.
# This could be done with `**_` as a "kwargs"
# placeholder in the function as well.
# Inspiration: https://stackoverflow.com/a/43238973/12387496
filtered_args = {}
func_params = [param.name for param in inspect.signature(func).parameters.values()]
for key, value in vars(args).items():
if key in func_params:
filtered_args[key] = value
# Edge case handling
# for anything that has multiple function
# calls outside the function map
if args.command == "create":
zet = Zet()
zet.create(**filtered_args)
open_editor(path=zet.path)
else:
func(**filtered_args)
else:
parser.print_help()
return 0
if __name__ == "__main__":
exit(main()) | zet-cli | /zet-cli-0.0.1.tar.gz/zet-cli-0.0.1/src/zet/main.py | main.py |
import ast
import datetime
import fileinput
import os
import shutil
import time
from typing import Dict, List
from .settings import Settings
settings = Settings()
class ZetDoesNotExistException(Exception):
"""Zet does not exist."""
pass
class Zet:
"""A Zettlekasten file.
The representation of a physical zet
on-disk. If one does not exist it will
be created after a `create()` call
and passed back to the caller.
"""
def __init__(self, path: str = None) -> None:
self.path = path
@property
def metadata(self) -> Dict:
"""Get file metadata.
Generates a dictionary of the
metadata available on each of the
zets, this assumes that a path
is available on generation.
Does not support multi-line metadata.
This requires a consistent delimeter
be used to enclose a chunk of metadata
and that each be in a key-value form.
Example file:
* The delimeters below are `+++`
* Lists are allowed
* All key-values have colon and space
`: ` between them
-------------------------------
|+++ |
|something: 'some-value-here' |
|list: ['some','values',] |
|+++ |
| |
| |
| |
| |
-------------------------------
Returns:
metadata (Dict): A dictionary of the available
metadata in the file.
Raises:
ZetDoesNotExistException
"""
if self.path is not None and os.path.exists(self.path):
metadata = {}
# read a file line by line until we
# hit our second delimeter
# this assumes we have a consistent delimeter
with open(self.path, "r") as file:
delimeter = file.readline()
for line in file.readlines()[0:]:
if line.startswith(delimeter):
break
else:
# split the line to a named key
# and a value (value contains newline "\n")
name, value = line.partition(": ")[::2]
# check if the value is a list or not
# example representation:
# path: 'some/path/to/file.md'
if "[" not in value:
metadata[name.strip()] = value.rstrip().split("\'")[1]
# value is a list
# example representation:
# tags: ['some', 'tag', 'here',]
else:
value_list = ast.literal_eval(value.rstrip())
metadata[name.strip()] = value_list
return metadata
else:
raise ZetDoesNotExistException("Zet does not exist")
def create(self,
title: str,
category: str,
tags: str,
zet_repo: str = None,
template: str = None) -> None:
"""Creates a new zet.
Takes in the zet folder and returns
a path to the new zet. This will
be time sensitive.
Params:
title (str): Title of the zet,
does not replace filename.
zet_repo (str): A zet repo name.
template (str): Template path
for the file.
Returns:
zet_path (str): Full path to the newly
created zet.
"""
today = datetime.datetime.now()
today_year = str(today.year)
today_month = str(today.month)
today_str = str(today.strftime("%Y%m%d%H%M%S"))
if zet_repo:
repo = settings.get_repo_path(zet_repo)
else:
zet_repo = settings.get_default_repo()
repo = settings.get_default_repo_path()
full_path = os.path.join(
repo, today_year, today_month, today_str
)
clean_title = title.lower().replace(' ', '-')
full_title = str(clean_title) + "-" + today_str + ".md"
filename = os.path.join(full_path, full_title)
tags_list = tags.split(', ')
zet_template_path = "/" + os.path.join(today_year, today_month, clean_title + "-" + today_str)
metadata = [
["templatePath", zet_template_path],
["templateDate", today_str],
["templateTitle", str(title)],
["templateCleanTitle", str(clean_title)],
["templateCategory", str(category)],
["templateTags", str(tags_list)]
]
if not os.path.exists(full_path):
os.makedirs(full_path)
if template is None:
# if the settings haven't been made then the
# data will not be refreshed
settings.refresh()
template = settings.get_default_template_path()
else:
template = settings.get_template_path(template)
new_file = shutil.copyfile(template, filename)
for line in fileinput.input(new_file, inplace=True):
for item in metadata:
line = line.replace(item[0], item[1])
# line = re.sub(r"/{({word_match}*)}/".format(word_match=item[0]), item[1], line)
fileinput.close()
self.path = filename
def bulk_import_zets(files_folder: str,
zet_repo: str = None) -> List:
"""Bulk create zets from a folder.
Takes in the folder of existing files
to import to a zet repo.
Params:
files_folder (str): A folder with
pre-existing files ready to import.
zet_repo (str): A zet repo name.
Returns:
zet_list (List): A list of dict objects
for each of the file names, original
paths, zet file paths, and newly folder paths.
"""
zet_list = []
if zet_repo:
repo = settings.get_repo_path(zet_repo)
else:
repo = settings.get_default_repo_path()
for root, dirs, files in os.walk(files_folder):
for file in files:
today = datetime.datetime.now()
today_year = str(today.year)
today_month = str(today.month)
today_str = str(today.strftime("%Y%m%d%H%M%S"))
full_path = os.path.join(
repo, today_year, today_month, today_str
)
clean_title = file.lower().replace(' ', '-')
full_title = str(clean_title) + "-" + today_str + ".md"
filename = os.path.join(full_path, full_title)
existing_file_path = os.path.join(root, file)
if not os.path.exists(full_path):
os.makedirs(full_path)
shutil.copyfile(existing_file_path, filename)
time.sleep(1)
return zet_list | zet-cli | /zet-cli-0.0.1.tar.gz/zet-cli-0.0.1/src/zet/zet.py | zet.py |
=================================================================
Zeta -- computing zeta functions of groups, algebras, and modules
=================================================================
::
ZZZZZZZZZZZZZZZZZZZ tttt
Z:::::::::::::::::Z ttt:::t
Z:::::::::::::::::Z t:::::t
Z:::ZZZZZZZZ:::::Z t:::::t
ZZZZZ Z:::::Z eeeeeeeeeeee ttttttt:::::ttttttt aaaaaaaaaaaaa
Z:::::Z ee::::::::::::ee t:::::::::::::::::t a::::::::::::a
Z:::::Z e::::::eeeee:::::et:::::::::::::::::t aaaaaaaaa:::::a
Z:::::Z e::::::e e:::::tttttt:::::::tttttt a::::a
Z:::::Z e:::::::eeeee::::::e t:::::t aaaaaaa:::::a
Z:::::Z e:::::::::::::::::e t:::::t aa::::::::::::a
Z:::::Z e::::::eeeeeeeeeee t:::::t a::::aaaa::::::a
ZZZ:::::Z ZZZZe:::::::e t:::::t ttttta::::a a:::::a
Z::::::ZZZZZZZZ:::e::::::::e t::::::tttt:::::a::::a a:::::a
Z:::::::::::::::::Ze::::::::eeeeeeee tt::::::::::::::a:::::aaaa::::::a
Z:::::::::::::::::Z ee:::::::::::::e tt:::::::::::tta::::::::::aa:::a
ZZZZZZZZZZZZZZZZZZZ eeeeeeeeeeeeee ttttttttttt aaaaaaaaaa aaaa
Introduction
------------
Zeta provides methods for computing local and topological zeta functions
arising from the enumeration of subalgebras, ideals, submodules, and
representations of suitable algebraic structures as well as some other types of
zeta functions.
This package is an *experimental fork* of Zeta, turning it into a
pip-installable SageMath package. You can check this `temporary link
<http://u.math.biu.ac.il/~bauerto/zetalib/html/index.html>`_ for the full
documentation.
Please also check the `original homepage of Zeta
<http://www.maths.nuigalway.ie/~rossmann/Zeta/>`_ by `Tobias Rossmann
<http://www.maths.nuigalway.ie/~rossmann/>`_.
Installation
------------
Dependencies
^^^^^^^^^^^^
We assume SageMath version 8.3, or higher, is used.
The `wheel <https://pypi.org/project/wheel/>`__ packaging standard is needed at
installation time. It can be installed by running::
$ sage -pip install wheel
Zeta will try to invoke the programs ``count`` (a part of `LattE integrale
<https://www.math.ucdavis.edu/~latte/software.php>`__) and ``normaliz`` (a part
of `Normaliz <https://www.normaliz.uni-osnabrueck.de>`__). They can be
installed by running::
$ sage -i latte_int
$ sage -i normaliz
See the full documentation how to use other versions of these programs.
Install from PyPI
^^^^^^^^^^^^^^^^^
The easiest way to obtain Zeta is to run::
$ sage -pip install zetalib
Local install from source
^^^^^^^^^^^^^^^^^^^^^^^^^
Download the source from the git repository::
$ git clone https://gitlab.com/mathzeta2/zetalib.git
For convenience this package contains a `Makefile <Makefile>`_ with some often
used commands. To build the C extensions, install and test you should change to
the root directory and run::
$ make
Alternatively, you can do it in separate steps::
$ make build
$ make test
$ sage -pip install --upgrade --no-index -v . # or `make install`
To uninstall you can run::
$ sage -pip uninstall zetalib # or `make uninstall`
If you want to use another version of SageMath you have installed, you can
modify the ``SAGE`` variable when calling ``make``::
$ make SAGE=/path/to/sage build
Usage
-----
Once the package is installed, you can use it in Sage with::
sage: import zetalib
sage: M = zetalib.Algebra(rank=3, operators=[ [[1,1,-1], [0,1,1], [0,0,1]] ])
sage: zetalib.topological_zeta_function(M)
1/((3*s - 2)*(2*s - 1)*s)
See the documentation for further details.
Packaging
---------
All packaging setup is internally done through `setup.py <setup.py>`_. To
create a "source package" run::
$ sage setup.py sdist
To create a binary wheel package run::
$ sage setup.py bdist_wheel
Or use the shorthand::
$ make build_wheel
Documentation
-------------
The source files of the documentation are located in the `docs/source
<docs/source>`_ directory, and are written in Sage's `Sphinx
<http://www.sphinx-doc.org>`_ format.
Generate the HTML documentation by running::
$ cd docs
$ sage -sh -c "make html"
Or using the shorthand::
$ make doc
Then open ``docs/build/html/index.html`` in your browser.
Acknowledgements
----------------
* The `Sage Sample Package <https://github.com/sagemath/sage_sample>`_ was used
for the initial package structure.
License
-------
See the `LICENSE <LICENSE>`_ file. This fork of Zeta is released under
GPL-3.0-or-later, like the original version, as quoted in the original
documentation:
Copyright 2014, 2015, 2016, 2017 Tobias Rossmann.
Zeta is free software: you can redistribute it and/or modify it under the
terms of the `GNU General Public License
<http://www.gnu.org/copyleft/gpl.html>`_ as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
Zeta is distributed in the hope that it will be useful, but without
any warranty; without even the implied warranty of merchantability or
fitness for a particular purpose. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with Zeta. If not, see http://www.gnu.org/licenses.
| zetalib | /zetalib-0.4.5.tar.gz/zetalib-0.4.5/README.rst | README.rst |
.. nodoctest
TODO
====
#. Create a proper extension module for `crunch.c <zetalib/crunch.c>`_.
#. Improve the installation instructions. Possibly split to install guide and
usage guide.
#. Make Zeta into an experimental SageMath package.
#. Add doctests to all modules, and add them to the documentation.
#. Update ``CHANGES`` to use ReST.
#. Use Sage LaurentPolynomialRing?
#. Use the Sage LattE interface, or consider an update to it by adding
maple.cpp as sage.cpp. See :trac:`18232`, :trac:`18211`, :trac:`22067`,
:trac:`22099`, :trac:`22109`, :trac:`22066`, :trac:`22111`.
#. Add topzetas.txt to the static documentation.
#. Add GitLab Continuous integration.
#. Add a Jupyter demo notebook with Binder support (https://opendreamkit.org/2018/07/23/live-online-slides-with-sagemath-jupyter-rise-binder/).
| zetalib | /zetalib-0.4.5.tar.gz/zetalib-0.4.5/docs/source/todo.rst | todo.rst |
=================================================================
Zeta -- computing zeta functions of groups, algebras, and modules
=================================================================
Introduction
============
Zeta provides methods for computing local and topological zeta functions
arising from the enumeration of subalgebras, ideals, submodules, and
representations of suitable algebraic structures as well as some other
types of zeta functions. For theoretical background and descriptions of
the methods used, see :ref:`references`.
This package is an *experimental fork* of Zeta, turning it into a
pip-installable SageMath package.
Zeta is distributed as a `Python <http://www.python.org>`_-package for the
computer algebra system `SageMath <http://sagemath.org/>`_. In addition to
`Singular <https://www.singular.uni-kl.de>`_ and other software included with
Sage, Zeta also relies on `LattE integrale
<https://www.math.ucdavis.edu/~latte/software.php>`_ and `Normaliz
<https://www.normaliz.uni-osnabrueck.de>`_.
This work is supported by the `Alexander von
Humboldt-Foundation <https://www.humboldt-foundation.de>`_. From
2013β2016, the development of Zeta was supported by the
`DFG <http://www.dfg.de>`_ Priority Programme "`Algorithmic and
Experimental Methods in Algebra, Geometry and Number
Theory <https://spp.computeralgebra.de>`_".
Please also check the `original homepage of Zeta
<http://www.maths.nuigalway.ie/~rossmann/Zeta/>`_ by `Tobias Rossmann
<http://www.maths.nuigalway.ie/~rossmann/>`_.
Installation
============
Dependencies
------------
We assume SageMath version 8.3, or higher, is used.
The `wheel <https://pypi.org/project/wheel/>`__ packaging standard is needed at
installation time. It can be installed by running::
$ sage -pip install wheel
Zeta will try to invoke the programs ``count`` (a part of `LattE integrale
<https://www.math.ucdavis.edu/~latte/software.php>`__) and ``normaliz`` (a part
of `Normaliz <https://www.normaliz.uni-osnabrueck.de>`__). They can be
installed by running::
$ sage -i latte_int
$ sage -i normaliz
If you want to use your own versions of these progrmas, you can set the
variables ``zetalib.common.count`` and ``zetalib.common.normaliz`` for the
desired paths, respectively.
Older versions of Zeta required a patched version of ``count``. To that end,
the file ``latte-int-1.7.3/code/latte/genFunction/maple.cpp`` in the sources of
LattE integrale 1.7.3 should be replaced by the file ``maple.cpp`` included
with Zeta. In order to compile the patched version of LattE integrale 1.7.3
from scratch, you may want to use `this modified version
<http://www.maths.nuigalway.ie/~rossmann/Zeta/latte-integrale-1.7.3-for-Zeta.tar>`__
(26M) of the `LattE integrale 1.7.3 bundle
<https://www.math.ucdavis.edu/~latte/software/packages/latte_current/latte-integrale-1.7.3.tar.gz>`__.
Compiled versions of ``normaliz``, ``count`` and ``scdd_gmp`` were included in
the ``bin`` directory for ``linux-x86_64`` until version 0.4.0.
Install from PyPI
-----------------
The easiest way to obtain Zeta is to run::
$ sage -pip install zetalib
Local install from source
-------------------------
Download the source from the git repository::
$ git clone https://gitlab.com/mathzeta2/zetalib.git
For convenience this package contains a ``Makefile`` with some often
used commands. To build the C extensions, install and test you should change to
the root directory and run::
$ make
Alternatively, you can do it in separate steps::
$ make build
$ make test
$ sage -pip install --upgrade --no-index -v . # or `make install`
To uninstall you can run::
$ sage -pip uninstall zetalib # or `make uninstall`
If you want to use another version of SageMath you have installed, you can
modify the ``SAGE`` variable when calling ``make``::
$ make SAGE=/path/to/sage build
Build documentation
-------------------
The source files of the documentation are located in the ``docs/source``
directory, and are written in Sage's `Sphinx <http://www.sphinx-doc.org>`_
format.
Generate the HTML documentation by running::
$ cd docs
$ sage -sh -c "make html"
Or using the shorthand::
$ make doc
Then open ``docs/build/html/index.html`` in your browser.
Packaging
=========
All packaging setup is internally done through ``setup.py``. To create a
"source package" run::
$ sage setup.py sdist
To create a binary wheel package run::
$ sage setup.py bdist_wheel
Or use the shorthand::
$ make build_wheel
Basic usage
===========
.. _creating-algebra:
Creating algebras
-----------------
By an **algebra**, we mean a free `\mathbf{Z}`-module of finite rank
endowed with a biadditive multiplication; we do not require this
multiplication to be associative or Lie. Given a `\mathbf Z`-basis
`x_1,\dotsc,x_d` of an algebra `L`, define `\alpha_{ije}\in
\mathbf Z` by
.. MATH::
x_i x_j = \sum_{e=1}^d \alpha_{ije} x_e.
The numbers `\alpha_{ije}` are the **structure constants** of `L` with
respect to the chosen basis `(x_1,\dotsc,x_d)`. The principal method
for specifying an algebra in Zeta is to provide structure constants as a
nested list
.. MATH::
\begin{matrix}
[[ (\alpha_{111},\dotsc,\alpha_{11d}), &
\dotsc & (\alpha_{1d1},\dotsc,\alpha_{1dd}) ]\phantom], \\
\vdots & & \vdots \\
\phantom[[ (\alpha_{d11},\dotsc,\alpha_{d1d}), & \dotsc &
(\alpha_{dd1},\dotsc,\alpha_{ddd}) ]] \\
\end{matrix}
as the first argument of ``zetalib.Algebra``. (We note that the table of
structure constants of an instance of ``zetalib.Algebra`` is stored in the
``table`` attribute.)
.. _computing-topological-zeta-functions:
Computing topological zeta functions
------------------------------------
Given an algebra obtained via ``zetalib.Algebra``, the function
``zetalib.topological_zeta_function`` can be used to attempt to compute an
associated topological zeta function. Specifically,
``zetalib.topological_zeta_function(L, 'subalgebras')`` will attempt to
compute the topological subalgebra zeta function of `L` as a rational
function in `s`, while ``zetalib.topological_zeta_function(L, 'ideals')``
will do the same for ideals. If `L` is a nilpotent Lie algebra, then
``zetalib.topological_zeta_function(L, 'reps')`` will attempt to compute
the topological representation zeta function of the unipotent algebraic
group over `\mathbf Q` corresponding to `L\otimes_{\mathbf Z}
\mathbf Q`.
In general, such computations are not guaranteed to succeed. If the method for
computing topological zeta functions from [Ro2015a_, Ro2015b_] (for subalgebras
and ideals) or [Ro2016]_ (for representations) fails,
``zetalib.topological_zeta_function`` will raise an exception of type
``zetalib.ReductionError``. Disregarding bugs in Zeta, Sage, or elsewhere,
whenever ``zetalib.topological_zeta_function`` does finish successfully,
its output is supposed to be correct.
.. _example-subalgebras-and-ideals:
Example (subalgebras and ideals)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To illustrate the computation of topological subobject zeta functions,
consider the commutative algebra `L = \mathbf Z[X]/X^3`. As a `\mathbf
Z`-basis of `L`, we choose `(1,x,x^2)`, where `x` is the image of `X` in
`L`. The associated nested list of structure constants is
.. MATH::
\begin{matrix}
[[(1, 0, 0), & (0, 1, 0), & (0, 0, 1)]\phantom],\\
\phantom[ [(0, 1, 0), & (0, 0, 1), & (0, 0, 0)]\phantom],\\
\phantom[[(0, 0, 1), & (0, 0, 0), & (0, 0, 0)]].
\end{matrix}
The following documents a complete Sage session leading to the
computation of the topological subalgebra and ideal zeta functions of
`L`.
::
sage: import zetalib
sage: L = zetalib.Algebra([[(1, 0, 0), (0, 1, 0), (0, 0, 1)], [(0, 1, 0), (0, 0,1), (0, 0, 0)], [(0, 0, 1), (0, 0, 0), (0, 0, 0)]])
sage: zetalib.topological_zeta_function(L, 'subalgebras')
2*(15*s - 8)/((5*s - 4)*(3*s - 2)^2*s)
sage: zetalib.topological_zeta_function(L, 'ideals')
1/((3*s - 2)*(2*s - 1)*s)
Example (representations)
^^^^^^^^^^^^^^^^^^^^^^^^^
We illustrate the computation of topological representation zeta
functions of unipotent algebraic groups (over `\mathbf Q`) using the
familiar example of the Heisenberg group `\mathbf H`. The first step is
to construct a `\mathbf Z`-form of its Lie algebra. We choose the
natural `\mathbf Z`-form `L = \mathbf Z x_1 \oplus \mathbf Z x_2
\oplus \mathbf Z x_3` with `[x_1,x_2] = x_3`, `[x_2,x_1] =
-x_3` and `[x_i,x_j] = 0` in the remaining cases. The list of
structure constants of `L` with respect to the basis `(x_1,x_2,x_3)`
is
.. MATH::
\begin{matrix}
[[(0, 0, \phantom-0), & (0, 0, 1), & (0, 0, 0)]\phantom],\\
\phantom[ [(0, 0, -1), & (0, 0, 0), & (0, 0,0)]\phantom],\\
\phantom[[(0, 0, \phantom-0), & (0, 0, 0), & (0, 0, 0)]].
\end{matrix}
The following documents a complete Sage session leading to the
computation of the topological representation zeta function of `\mathbf
H`.
::
sage: import zetalib
sage: L = zetalib.Algebra([[(0, 0, 0), (0, 0, 1), (0, 0, 0)], [(0, 0,-1), (0, 0, 0), (0, 0, 0)], [(0, 0, 0), (0, 0, 0), (0, 0, 0)]])
sage: zetalib.topological_zeta_function(L, 'reps')
s/(s - 1)
.. _computing-local-zeta-functions:
Computing local zeta functions
------------------------------
Uniform zeta functions
^^^^^^^^^^^^^^^^^^^^^^
Using most of the same arguments as ``zetalib.topological_zeta_function`` from
:ref:`computing-topological-zeta-functions`, the function
``zetalib.local_zeta_function`` can be used to attempt to compute *generic*
local subalgebra, ideal, or representation zeta functionsβthat is to say,
computed zeta functions will be valid for all but finitely many primes `p` and
arbitrary finite extensions of `\mathbf Q_p` as in [Ro2015a_, Β§5.2] and
[Ro2016_, Β§2.2]. If the method from [Ro2018a]_ is unable to compute a specific
zeta function, an exception of type ``zetalib.ReductionError`` will be raised.
By default, ``zetalib.local_zeta_function`` will attempt to construct a
single rational function, `W(q,t)` say, in `(q,t)` such that for almost
all primes `p` and all `q = p^f` (`f \ge 1`), the local zeta function
in question obtained after base extension from `\mathbf Q_p` to a
degree `f` extension is given by `W(q,q^{-s})`. Crucially, such a
rational function `W(q,t)` need not exist and even if it does, Zeta may
be unable to compute it.
Example (uniform local zeta functions)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Let `L` be the Heisenberg Lie algebra as above. The following computes
the associated generic local subalgebra, ideal, and representation zeta
functions.
::
sage: import zetalib
sage: L = zetalib.Algebra([[(0, 0, 0), (0, 0, 1), (0, 0, 0)], [(0, 0,-1), (0, 0, 0), (0, 0, 0)], [(0, 0, 0), (0, 0, 0), (0, 0, 0)]])
sage: zetalib.local_zeta_function(L, 'subalgebras')
-(q^2*t^2 + q*t + 1)/((q^3*t^2 - 1)*(q*t + 1)*(q*t - 1)*(t - 1))
sage: zetalib.local_zeta_function(L, 'ideals')
-1/((q^2*t^3 - 1)*(q*t - 1)*(t - 1))
sage: zetalib.local_zeta_function(L, 'reps')
(t - 1)/(q*t - 1)
That is, for almost all primes `p` and all finite extensions `K/\mathbf
Q_p`, the subalgebra and ideal zeta functions of `L \otimes \mathfrak
O_K` are exactly the first two rational functions in `q` and `t =
q^{-s}`; here, `\mathfrak O_K` denotes the valuation ring of `K` and
`q` the residue field size. These results are due to :doi:`Grunewald, Segal,
and Smith <10.1007/BF01393692>` and in fact valid
for arbitrary `p`; the restriction to `K = \mathbf Q_p` in their work
is not essential. Similarly, the above computation using Zeta shows that
if `H \leqslant \mathrm{GL}_3` is the Heisenberg group scheme, then
for almost all primes `p` and all finite extensions `K/\mathbf Q_p`,
the representation zeta function of `H(\mathfrak O_K)` is
`(q^{-s}-1)/(q^{1-s}-1)`, as proved (for all `p`) by :doi:`Stasinski and
Voll <10.1353/ajm.2014.0010>`.
.. _non-uniform-zeta-functions-the-symbolic-mode:
Non-uniform zeta functions: the symbolic mode
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Assuming the method from [Ro2018a]_ applies, Zeta supports
limited computations of non-uniform generic local zeta functionsβthat
is, instances where no rational function `W(q,t)` as above exists. For
that purpose, ``symbolic=True`` needs to be passed to
``zetalib.local_zeta_function``. If successful, the output will then be
given by a rational function in `q`, `t`, and finitely many variables of
the form ``sc_i``, each corresponding to the number of rational points
over the residue field of `K` of (the reduction modulo `p` of) the
subvariety ``zetalib.common.symbolic_count_varieties[i]`` of some algebraic
torus.
Example (non-uniform local zeta functions)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Let `L` be the Lie algebra with `\mathbf Z`-basis `(x_1,\dotsc,x_6)` and
non-trivial commutators `[x_1,x_2] = x_3`, `[x_1,x_3] = x_5`, `[x_1,x_4] =
3x_6`, `[x_2,x_3] = x_6`, and `[x_2,x_4] = x_5`; this algebra is called
`L_{6,24}(3)` in :doi:`deΒ Graaf's classification
<10.1016/j.jalgebra.2006.08.006>`. We may compute the generic local
representation zeta functions associated with `L` as follows.
::
sage: import zetalib
sage: table = [zero_matrix(6,6) for _ in range(6)]
sage: table[0][1,2] = 1; table[1][0,2] = -1
sage: table[0][2,4] = 1; table[2][0,4] = -1
sage: table[0][3,5] = 3; table[3][0,5] = -3
sage: table[1][2,5] = 1; table[2][1,5] = -1
sage: table[1][3,4] = 1; table[3][1,4] = -1
sage: L = zetalib.Algebra(table)
sage: zetalib.local_zeta_function(L, 'reps', symbolic=True)
-(q*sc_0*t - q*t^2 - sc_0*t + 1)*(t - 1)/((q^3*t^2 - 1)*(q*t - 1))
sage: zetalib.common.symbolic_count_varieties[0]
Subvariety of 1-dimensional torus defined by [x^2 - 3]
We thus see how the generic local representation zeta functions
associated with `L` depend on whether `3` is a square in the residue
field of `K`. Calling ``zetalib.local_zeta_function(L, 'reps')`` without
``symbolic=True`` will result in an error. As computations with
``symbolic=True`` are generally substantially more computationally
demanding, they should only be attempted as a last resort.
Computing Igusa-type zeta functions
-----------------------------------
Zeta also provides rudimentary support for the computation of local and
topological zeta functions associated with polynomials and polynomial mappings
under the non-degeneracy assumptions from [Ro2015a]_. Given `f_1,\dotsc,f_r
\in \mathbf Q[X_1,\dotsc,X_n]`, Zeta can be used to attempt to compute the
generic local zeta functions (in the sense discussed above) defined by
.. MATH::
\int_{\mathfrak O_K^n} \lVert f_1(x),\dotsc, f_r(x) \rVert^s_K \mathrm d\mu_K(x)
or the associated topological zeta function; here, `\mu_K` denotes the Haar
measure and `\lVert \cdotp \rVert_K` the maximum norm, both normalised as
usual.
For a single polynomial, the method used by Zeta is very closely related to
combinatorial formulae of :doi:`Denef and Loeser <10.2307/2152708>` and
:doi:`Denef and Hoornaert <10.1006/jnth.2000.2606>`. In order to attempt to
compute topological or generic local zeta functions associated with a
polynomial (or a polynomial mapping), simply pass a multivariate polynomial (or
a list of these) to ``zetalib.topological_zeta_function`` or
``zetalib.local_zeta_function``, respectively.
Example (Igusa-type zeta functions)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following computes the local and topological zeta functions associated with
`f` and `(f,g)`, where `f = X^3 - XYZ` and `g = X^2 - Y^2`.
::
sage: import zetalib
sage: R.<x,y,z> = QQ[]
sage: f = x^3 -x*y*z
sage: g = x^2 - y^2
sage: zetalib.local_zeta_function(f)
(q^4 + q^2*t^2 - q^3 - 2*q^2*t - q*t^2 + q^2 + t^2)*(q - 1)/((q^2 + q*t + t^2)*(q - t)^3)
sage: zetalib.topological_zeta_function(f)
1/3*(s^2 + 2*s + 3)/(s + 1)^3
sage: zetalib.local_zeta_function([f,g])
(q^2 + 2*q + t)*(q - 1)^2/((q^2 - t)*(q + t)*(q - t))
sage: zetalib.topological_zeta_function([f,g])
2/((s + 2)*(s + 1))
Non-uniform examples can be handled as in
:ref:`non-uniform-zeta-functions-the-symbolic-mode`.
Modules and algebras with operators
-----------------------------------
In [Ro2015a_, Ro2015b_], (topological) ideal zeta
functions were treated as special cases of submodule zeta functions. In
Zeta, we regard modules as special cases of algebras with operators.
Namely, each algebra `L` in Zeta is endowed with a possibly empty set
`\Omega` of operators, i.e.Β `\Omega` consists of additive
endomorphisms of `L`. The topological and local subalgebra and ideal
zeta functions of `L` are always understood to be those arising from the
enumeration of `\Omega`-invariant subalgebras or ideals, respectively.
Thus, if the multiplication of `L` is trivial, then the
`\Omega`-invariant subalgebras (and ideals) of `L` are precisely the
submodules of `L` under the action of the enveloping associative unital
ring of `\Omega` within `\mathrm{End}(L)`.
In practice, `\Omega` is given by a finite list of matrices (or nested
lists of integers representing those matrices) corresponding to the
defining basis of `L`. This list is then supplied to ``zetalib.Algebra``
using the keyword parameter ``operators``. For algebras with zero
multiplication, instead of entering structure constants, you can provide
a keyword argument ``rank`` to ``zetalib.Algebra`` which initialises all
structure constants to zero.
Example (operators)
^^^^^^^^^^^^^^^^^^^
We illustrate the computation of the topological submodule zeta function
arising from the enumeration of sublattices within `\mathbf Z^3`
invariant under the matrix
.. MATH::
\begin{bmatrix}
1 & 1 & -1 \\
0 & 1 & 1 \\
0 & 0 & 1
\end{bmatrix}
::
sage: import zetalib
sage: M = zetalib.Algebra(rank=3, operators=[ [[1,1,-1],[0,1,1],[0,0,1]] ])
sage: zetalib.topological_zeta_function(M)
1/((3*s - 2)*(2*s - 1)*s)
In the database included with Zeta, for examples of algebras with
trivial multiplication but non-empty lists of operators, we did not
include ideal zeta functions; they coincide with the corresponding
subalgebra and submodule zeta functions.
.. _average-sizes-of-kernels:
Average sizes of kernels
------------------------
Subject to the same restrictions as above, Zeta supports the computation of the
(local) "ask zeta functions" defined and studied in [Ro2018b]_.
Let `\mathfrak{O}` be a compact discrete valuation ring with maximal
ideal `\mathfrak{P}`. Let `M \subset \mathrm{M}_{d\times
e}(\mathfrak{O})` be a submodule. Let `M_n \subset
\mathrm{M}_{d\times e}(\mathfrak{O}/\mathfrak{P}^n)` denote the
image of `M` under the natural map `\mathrm{M}_{d\times
e}(\mathfrak{O}) \to \mathrm{M}_{d\times
e}(\mathfrak{O}/\mathfrak{P}^n)`. The **ask zeta function** of `M` is
.. MATH::
\mathsf{Z}_M(t) = \sum_{n=0}^\infty \mathrm{ask}(M_n) t^n,
where `\mathrm{ask}(M_n)` denotes the average size of the kernels
of the elements of `M_n` acting by right-multiplication on
`(\mathfrak{O}/\mathfrak{P}^n)^d`.
Zeta can be used to attempt to compute generic local ask zeta functions in the
following global setting. Let `M \subset \mathrm{M}_{d\times e}(\mathbf{Z})` be
a submodule of rank `\ell`. Let `A` be an integral `d \times e` matrix of
linear forms in `\ell` variables such that `M` is precisely the module of
specialisations of `A`. Then ``zetalib.local_zeta_function(A, 'ask')``
attempts to compute `\mathsf{Z}_{M \otimes \mathfrak{O}_K}(t)` for almost all
primes `p` and all finite extensions `K/\mathbf{Q}_p` in the same sense as in
:ref:`computing-local-zeta-functions`. The optional keyword parameter ``mode``
determines whether Zeta attempts to compute ask zeta functions using the
functions `\mathrm{K}_M` (``mode='K'``) or `\mathrm{O}_M` (``mode='O'``) from
[Ro2018b_, Β§4], respectively; the default is ``mode='O'``.
Example (ask zeta function)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
We compute the generic local ask zeta functions associated with
`\mathrm{M}_{2\times 3}(\mathbf{Z})`.
::
sage: import zetalib
sage: R.<a,b,c,d,e,f> = QQ[]
sage: A = matrix([[a,b,c],[d,e,f]])
sage: zetalib.local_zeta_function(A, 'ask')
-(q^3 - t)/((q - t)*q^2*(t - 1))
Conjugacy class zeta functions
------------------------------
Let `L` be a nilpotent Lie algebra constructed as in
:ref:`creating-algebra`. Then ``zetalib.local_zeta_function(L, 'cc')``
attempts to compute the generic local conjugacy class zeta functions
associated with the unipotent algebraic group corresponding to `L
\otimes \mathbf{Q}`; see [Ro2018b_, Β§7.5]. The optional keyword
parameter ``mode`` has the same interpretation as in
:ref:`average-sizes-of-kernels`.
Example
^^^^^^^
We compute the generic local conjugacy class zeta functions of the
Heisenberg group.
::
sage: import zetalib
sage: L = zetalib.Algebra([[(0, 0, 0), (0, 0, 1), (0, 0, 0)], [(0, 0,-1), (0, 0, 0), (0, 0, 0)], [(0, 0, 0), (0, 0, 0), (0, 0, 0)]])
sage: zetalib.local_zeta_function(L, 'cc')
-(t - 1)/((q^2*t - 1)*(q*t - 1))
.. _the-built-in-database-of-examples:
The built-in database of examples
=================================
Accessing the database
----------------------
Zeta includes a βdatabaseβ of algebras. When topological or local zeta
functions associated with an algebra in the database have been
successfully computed using Zeta, these are stored as well.
Each algebra stored in Zeta can be referred to using its unique
identification number or one of finitely many names; identification
numbers may change between versions of Zeta. Access to these algebras is
provided using the function ``zetalib.lookup``.
If ``zetalib.lookup`` is called with precisely one argument ``entry``, then
``entry`` should be either an identification number or a name of an
algebra, `L` say, in the database. In this case, ``zetalib.lookup`` will
return `L`. Optional further arguments to ``zetalib.lookup`` can be used to
access other information about `L`:
- If the second argument is ``'subalgebras'``, ``'ideals'``, or
``'reps'`` and the third argument is ``'local'`` or
``'topological'``, then ``zetalib.lookup`` will return the local or
topological subalgebra, ideal, or representation zeta function of
`L`, respectively, if it is known, and ``None`` otherwise.
- If the second argument is ``'id'``, then ``zetalib.lookup`` returns the
identification number of `L`.
- If the second argument is ``'names'``, then ``zetalib.lookup`` returns a
list of the stored names of `L`.
When called without arguments, ``zetalib.lookup`` returns a list of pairs
``(i,names)``, where ``i`` ranges over the identification numbers of all
algebras in the database and ``names`` is a possibly empty list of names
associated with the ``i``\ th algebra.
Example
^^^^^^^
The algebra `L = \mathbf Z[X]/X^3` from :ref:`example-subalgebras-and-ideals`
is known to Zeta under the name ``'ZZ[X]/X^3'``; it can be retrieved via ``L =
zetalib.lookup('ZZ[X]/X^3')``. We may recover the pre-computed topological zeta
functions of `L` as follows:
::
sage: import zetalib
sage: zetalib.lookup('ZZ[X]/X^3', 'subalgebras', 'topological')
2*(15*s - 8)/((5*s - 4)*(3*s - 2)^2*s)
sage: zetalib.lookup('ZZ[X]/X^3', 'ideals', 'topological')
1/((3*s - 2)*(2*s - 1)*s)
Algebras and their names
------------------------
Apart from self-explanatory names such as ``'sl(2,ZZ)'`` and
``'gl(2,ZZ)'``, Zeta also includes algebras `L_{d,i}`,
`L_{d,i}(\varepsilon)`, `L^i`, `L^i_a`, `M^i`, and `M^i_a` taken
from deΒ Graaf's tables of
:doi:`nilpotent <10.1016/j.jalgebra.2006.08.006>` and
`soluble <http://projecteuclid.org/euclid.em/1120145567>`__ Lie
algebras; their corresponding names in Zeta are of the form
``'L(d,i)'``, ``'L(d,i;eps)'``, ``'L^i'``, ``'L^i(a)'``, ``'M^i'``, and
``'M^i(a)'``. For the infinite families among these algebras, we only
included selected specialisations of the parameters. Recall
[Ro2015a_,Β Prop.Β 5.19(ii)] that the topological subalgebra and
ideal zeta functions of an algebra `L` (over `\mathbf Z`) only depend
on the `\mathbf C`-isomorphism type of `L\otimes_{\mathbf Z}\mathbf
C`; a similar statement holds for topological representation zeta
functions by [Ro2016_,Β Prop.Β 4.3].
Similar to `Woodward's tables <http://www.lack-of.org.uk/zfarchive/>`__, we use
the notation ``'g(...)'`` to refer to `\mathbf Z`-forms of algebras from
:doi:`Seeley <10.2307/2154390>`'s list of 7-dimensional nilpotent Lie algebras
over `\mathbf C`; for example ``'g(147A)'`` is a `\mathbf Z`-form of the
algebra `1,4,7_A` in Seeley's list.
The algebras ``'N_i^(8,d)'`` are taken from the lists of :doi:`Ren and Zhu
<10.1080/00927872.2010.483342>`, and :doi:`Yan and Deng
<10.1007/s10587-013-0057-6>`.
The algebras called ``'C(d,i)'`` and ``'C(d,i;eps)'`` in Zeta are
βcommutative versionsβ of the nilpotent Lie rings ``'L(d,i)'`` and
``'L(d,i;eps)'`` respectively: they were obtained by inverting the signs
of all entries underneath the diagonal in the matrices of structure
constants.
An algebra called ``'name[eps]'`` in Zeta is obtained by tensoring
``'name'`` with the dual numbers as in [Ro2016_, Β§6].
Listing algebras, topological zeta functions, and their properties
------------------------------------------------------------------
The function ``zetalib.examples.printall`` generates a text-based list of
- algebras known to Zeta,
- structural information about each algebra,
- known associated topological zeta functions,
- numerical invariants of these zeta functions (degree, complex roots,
...)
and writes these to an optional file-like object (which defaults to
``stdout``). The output of this function is also available for
`download <http://www.math.uni-bielefeld.de/~rossmann/Zeta/topzetas.txt>`__.
By the **essential value** of a rational function `Z\in \mathbf Q(s)`
at a point `w\in \mathbf C`, we mean the value of `Z/(s-w)^m` at `s =
w`, where `m` is the order of `Z` at `w`; similarly, for `w = \infty`.
The output of ``zetalib.examples.printall`` (and hence the content of the
file linked to above) contains the essential values of topological zeta
functions at `0` and `\infty`; these are related to ConjecturesΒ IVβV
from [Ro2015a_, Ro2015b_].
Advanced usage
==============
More on the creation of algebras
--------------------------------
As an integral version of terminology used by
:doi:`Evseev <10.1515/CRELLE.2009.065>`, we say that a
`\mathbf Z`-basis `(x_1,\dotsc,x_d)` of an algebra `L` is **simple**
if each product `x_ix_j` is of the form `\varepsilon_{ij}
x_{a_{ij}}` for `\varepsilon_{ij} \in \{-1,0,1\}`. In this case,
the structure constants of `L` with respect to `(x_1,\dotsc,x_d)` are
determined by the matrix `A = [\varepsilon_{ij}
a_{ij}]_{i,j=1,\dotsc,d}`. Zeta supports the creation of algebras
from such a matrix `A` by passing ``simple_basis=True`` and
``matrix=``\ `A` as arguments to ``zetalib.Algebra``.
For example, the Heisenberg Lie ring with `\mathbf Z`-basis `(x_1,x_2,x_3)` and
non-trivial products `[x_1,x_2] = x_3` and `[x_2,x_1] = -x_3` from above can be
defined in Zeta via ``zetalib.Algebra(simple_basis=True, matrix=[[0,3,0],
[-3,0,0], [0,0,0] ])``.
TODO: Add documentation of the ``bilinear`` argument.
Additive gradings: blocks
-------------------------
Zeta supports the computation of graded subalgebra and ideal zeta
functions as in [Ro2018a]_. These zeta functions enumerate
homogeneous subobjects with respect to a given additive decomposition of
the underlying module. Such decompositions are specified using the
keyword argument ``blocks`` of ``zetalib.Algebra``. To that end, ``blocks``
should be assigned a list `(\beta_1,\dotsc,\beta_r)` of positive
integers summing up to the rank of the algebra `L` in question. If
`(x_1,\dotsc,x_d)` is the defining basis of `L`, then the associated
additive decomposition is `L = L_1 \oplus \dotsb \oplus L_r` for
`L_j = \bigoplus_{i=\sigma_{j-1}+1}^{\sigma_j} \mathbf Z x_i`
and `\sigma_i = \sum_{e=1}^i \beta_e`.
Example (graded zeta functions)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Let `L` be the Heisenberg Lie algebra with `\mathbf Z`-basis
`(x_1,x_2,x_3)` and `[x_1,x_2] = x_3` as above. Then `L = L_1
\oplus L_2` with `L_1 = \mathbf Z x_1 \oplus \mathbf Z x_2` and
`L_2 = \mathbf Z x_3` is the associated graded Lie algebra and the
following computes the generic graded local zeta functions arising from
the enumeration of its homogeneous subalgebras.
::
sage: import zetalib
sage: L = zetalib.Algebra([[(0, 0, 0), (0, 0, 1), (0, 0, 0)], [(0, 0,-1), (0, 0, 0), (0, 0, 0)], [(0, 0, 0), (0, 0, 0), (0, 0, 0)]], blocks=[2,1])
sage: zetalib.local_zeta_function(L, 'subalgebras')
-(q*t^3 - 1)/((q*t^2 - 1)*(q*t - 1)*(t + 1)*(t - 1)^2)
Changing bases
--------------
(The following only applies to the computation of subalgebra and ideal
zeta functions and not to representation or Igusa-type zeta functions.)
Computations using Zeta are usually very sensitive to the choice of the
basis used to define the structure constants of the algebra under
consideration. If a particular zeta function cannot be directly computed
using Zeta, it might be useful to consider different bases. Given an
algebra ``L`` of rank `d` and an invertible `d\times d` matrix ``A``
over `\mathbf Z`, the algebra obtained from `L` by taking the rows of
``A`` as a basis (relative to the original one) can be constructed via
``L.change_basis(A)``. In the presence of a non-trivial grading, the
latter is required to be respected by ``A``.
Unless ``zetalib.local_zeta_function`` or
``zetalib.topological_zeta_function`` is called with the keyword argument
``optimise_basis=False``, Zeta will attempt to find a basis of the
algebra, `L` say, in question such that the associated toric datum (see
[Ro2015b]_) is βsmallβ. Currently, Zeta simply loops over
permutations of the defining basis of `L`.
Verbosity
---------
If ``zetalib.local_zeta_function`` or ``zetalib.topological_zeta_function`` is
called with the keyword argument ``verbose=True``, then detailed
information on the various stages of computations will be displayed.
Apart from illustrating the key steps explained in
[Ro2015a_, Ro2015b_, Ro2016_, Ro2018a_],
this can often be helpful when it comes to estimating the feasibility of
the intended computation.
Computational resources
-----------------------
An upper bound on the number of CPUs used by
``zetalib.local_zeta_function`` and ``zetalib.topological_zeta_function`` can
be enforced by providing a numerical value for the keyword parameter
``ncpus``.
During computations of zeta functions, Zeta uses various temporary
files. Be warned that for some computations carried out by the author,
the combined size of these files exceeded 50G.
Zeta can be equally demanding when it comes to system memory, in
particular when computing local zeta functions. If computations run out
of memory, you can try reducing the number of CPUs used as indicated
above or try setting the keyword parameter ``profile`` to
``zetalib.Profile.SAVE_MEMORY``. Setting ``profile=zetalib.Profile.SPEED``
will result in slightly better performance at the cost of increased
memory use.
Reduction strategies
--------------------
(The following only applies to the computation of subalgebra and ideal zeta
functions.) The reduction step explained in [Ro2015b]_ depends on a strategy
for choosing βreduction candidatesβ. A particular strategy can be chosen using
the keyword parameter ``strategy`` of ``zetalib.local_zeta_function`` or
``zetalib.topological_zeta_function``. In particular, setting
``strategy=zetalib.Strategy.NONE`` disables reduction completely while
``strategy=zetalib.Strategy.NORMAL`` yields the strategy used in the paper.
Passing ``strategy=zetalib.Strategy.PREEMPTIVE`` will result in a more
aggressive reduction strategy which tries to anticipate and remove causes of
singularity in advance. While often slower than the
``zetalib.Strategy.NORMAL``, this strategy is needed to reproduce some of the
computations recorded in the database
(:ref:`the-built-in-database-of-examples`).
Acknowledgements
================
* The `Sage Sample Package <https://github.com/sagemath/sage_sample>`_ was used
as the initial package structure.
.. _references:
References
==========
.. [Ro2015a] T. Rossmann, *Computing topological zeta functions of groups,
algebras, and modules, I*, Proc. Lond. Math. Soc. (3) 110 (2015), no. 5,
1099--1134. :doi:`10.1112/plms/pdv012`, `preprint
<http://www.maths.nuigalway.ie/~rossmann/files/topzeta.pdf>`__.
.. [Ro2015b] T. Rossmann, *Computing topological zeta functions of groups,
algebras, and modules, II*, J.Β Algebra 444 (2015), 567--605.
:doi:`10.1016/j.jalgebra.2015.07.039`, `preprint
<http://www.maths.nuigalway.ie/~rossmann/files/topzeta2.pdf>`__.
.. [Ro2016] T. Rossmann, *Topological representation zeta functions of
unipotent groups*, J.Β Algebra 448 (2016), 210--237.
:doi:`10.1016/j.jalgebra.2015.09.050`, `preprint
<http://www.maths.nuigalway.ie/~rossmann/files/unipotent.pdf>`__.
.. [Ro2018a] T. Rossmann, *Computing local zeta functions of groups, algebras,
and modules*. `preprint
<http://www.maths.nuigalway.ie/~rossmann/files/padzeta.pdf>`__.
.. [Ro2018b] T. Rossmann, *The average size of the kernel of a matrix and
orbits of linear groups*. `preprint
<http://www.maths.nuigalway.ie/~rossmann/files/ask.pdf>`__.
License
=======
See the ``LICENSE`` file. This fork of Zeta is released under
GPL-3.0-or-later, like the original version, as quoted in the original
documentation:
Copyright 2014, 2015, 2016, 2017 Tobias Rossmann.
Zeta is free software: you can redistribute it and/or modify it under the
terms of the `GNU General Public License
<http://www.gnu.org/copyleft/gpl.html>`_ as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
Zeta is distributed in the hope that it will be useful, but without
any warranty; without even the implied warranty of merchantability or
fitness for a particular purpose. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with Zeta. If not, see http://www.gnu.org/licenses.
.. This built documentation is licensed under a `Creative Commons Attribution-Share Alike 4.0 License <https://creativecommons.org/licenses/by-sa/4.0/>`_.
Individual modules documentation
================================
.. toctree::
:maxdepth: 1
algebra
tmplist
.. toctree::
:maxdepth: 1
:hidden:
todo
There is also a :doc:`todo` list. Contributions are welcomed!
Indices and Tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| zetalib | /zetalib-0.4.5.tar.gz/zetalib-0.4.5/docs/source/index.rst | index.rst |
Zeta library
============
**Zeta library** is a framework allows to create, collect and pack css, scss, js files much easier. Documentation_ during development.
.. image:: https://secure.travis-ci.org/klen/zeta-library.png?branch=develop
:target: http://travis-ci.org/klen/zeta-library
:alt: Build Status
.. contents::
Features
========
- Collect **JS** files;
- Collect **CSS** and **SCSS** files in any order;
- Compress output files;
- Parse custom files in support formats;
- Watch files or folders and auto repack static;
- Has included popular js and css frameworks (you can expand);
- And more...
* **CSS import support**::
@import url(path or http);
* **JS require support**::
require("path or http");
* **SCSS compile and imports support** See SCSS_ for more information about language::
@import url(path or http);
// or Scss style also supported
@import 'compass/css3'
* **Blueprint css framework** Ex. ::
@import url(zeta://blueprint.css);
* **Compass scss framework** Ex. ::
@import url(zeta://compass.scss);
// or
@import 'compass/reset'
* **Boilerrplate framework support** Ex. ::
@import url(zeta://boilerplate.css);
* **Zeta css, js framework** Ex: ::
@import url(zeta://zeta.css);
require("zeta://zeta.js");
Installation
============
**Zeta library** should be installed using pip or setuptools: ::
pip install zetalibrary
easy_install zetalibrary
Usage
=====
$zeta ::
$ zeta help
usage: zeta [-h] [-v] {pack,watch,shell,libs} ...
positional arguments:
{pack,watch,shell,libs}
pack Parse file or dir, import css, js code and save with
prefix
watch Watch directory for changes and auto pack sources
shell A helper command to be used for shell integration
libs Show zeta libs
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
$ zeta pack --help
usage: zeta pack [-h] [-p PREFIX] [-f FORMAT] [-c] [-d DIRECTORY] [-o OUTPUT]
[-s SETUP_FILE]
source
positional arguments:
source
optional arguments:
-h, --help show this help message and exit
-p PREFIX, --prefix PREFIX
Save packed files with prefix. Default is '_'
-f FORMAT, --format FORMAT
Force format (css, js, ...). By default format parse
from file extension
-c, --compress Compress packed sources
-d DIRECTORY, --directory DIRECTORY
Add custom directory for search with prefix: 'zeta://'
By default $ZETA_LIBDIR
-o OUTPUT, --output OUTPUT
Set output directory path
-s SETUP_FILE, --setup-file SETUP_FILE
Configuration ini file, with 'Zeta' section
Changes
=======
Make sure you`ve read the following document if you are upgrading from previous versions of zetalibrary:
http://packages.python.org/zetalibrary/changes.html
Examples
==========
#. Parse all static files in directory ''/tmp/static'' with default prefix::
$> ls -la /tmp/static
drwxr-xr-x 4 www-data www-data 4096 2011-02-16 15:09 main
-rw-r--r-- 1 www-data www-data 335 2011-02-16 15:09 main.css
-rw-r--r-- 1 www-data www-data 343 2011-02-16 15:09 main.js
-rw-r--r-- 1 www-data www-data 0 2011-02-16 15:09 print.css
$> zeta /tmp/static
...
$> ls -la /tmp/static
drwxr-xr-x 4 www-data www-data 4096 2011-02-16 15:09 main
-rw-r--r-- 1 www-data www-data 335 2011-02-16 15:09 main.css
-rw-r--r-- 1 www-data www-data 335 2011-02-16 15:09 _main.css
-rw-r--r-- 1 www-data www-data 343 2011-02-16 15:09 main.js
-rw-r--r-- 1 www-data www-data 343 2011-02-16 15:09 _main.js
-rw-r--r-- 1 www-data www-data 0 2011-02-16 15:09 print.css
-rw-r--r-- 1 www-data www-data 0 2011-02-16 15:09 _print.css
#. Parse `/static/main.js` and minify ::
$ zeta -c /static/main.js
#. Watch directory `/static/` ::
$ zeta watch /static
Options
==========
Under construction.
Bug tracker
===========
If you have any suggestions, bug reports or
annoyances please report them to the issue tracker
at https://github.com/klen/zeta-library/issues
Contributing
============
Development of zeta-library happens at github: https://github.com/klen/zeta-library
* klen_ (Kirill Klenov)
License
=======
Licensed under a `GNU lesser general public license`_.
Copyright
=========
Copyright (c) 2011 Kirill Klenov ([email protected])
Compass_:
(c) 2009 Christopher M. Eppstein
http://compass-style.org/
SCSS_:
(c) 2006-2009 Hampton Catlin and Nathan Weizenbaum
http://sass-lang.com/
jQuery_:
(c) 2009-2010 jQuery Project
http://jquery.org/
Note
====
**Your feedback are welcome!**
.. _Documentation: http://packages.python.org/zetalibrary/
.. _zeta-library: http://github.com/klen/zeta-library.git
.. _GNU lesser general public license: http://www.gnu.org/copyleft/lesser.html
.. _SCSS: http://sass-lang.com
.. _compass: http://compass-style.org/
.. _jQuery: http://jquery.com
.. _klen: https://klen.github.com
| zetalibrary | /zetalibrary-0.5.93.tar.gz/zetalibrary-0.5.93/README.rst | README.rst |
import requests
import sys
import json
class Zetalytics(object):
"""
You can preconfigure all services globally with a ``config`` dict.
Example::
zl = Zetalytics(token='AABBCCDDEEFFGG')
"""
def __init__(self, **kwargs):
self.requester = requests.session()
self.config = {
'base_url': 'https://zonecruncher.com/api/v1/'
}
self.config.update(kwargs)
if len(self.config.get('token')) != 32:
#TODO: Figure out server response for wrong API token
raise ValueError("Incorrect API token provided")
self.set_token(self.config.get('token'))
self.__set_params(self.config)
def set_token(self, token):
if token:
self.requester.params['token'] = token
def __set_params(self, config):
if config.get('verbose'):
self.requester.config = {'verbose': config['verbose']}
if config.get('timeout'):
self.requester.timeout = config['timeout']
def check_integrity(self, arg_dict, **params):
for k, v in params.items():
if k not in arg_dict and v['required']:
raise ValueError("Missing required parameter " + k)
elif k not in arg_dict:
raise ValueError("Unknown parameter " + k)
elif k in arg_dict:
#print(arg_dict[k])
if arg_dict[k]['constraints']:
for _k, _v in arg_dict[k]['constraints'].items():
if _k == 'length':
if not len(v) == _v:
raise ValueError("Parameter " + k + " does not have a length of " + str(_v['length']))
if _k == 'range':
if not _v[0] <= int(v) <= _v[1]:
raise ValueError(
"Parameter " + k + " is not in allowed range of " + str(_v[0]) + "-" + str(_v[1]))
if _k == 'multi':
#print(v)
#print(_v)
if v not in _v:
raise ValueError("Parameter " + v + " in " + k + " is not an allowed value")
return True
def dateToEpoch(self, date):
pass
def cname2qname(self, **params):
"""
Params;
q: Domain name
toBaseDomain: Boolean, if true convert to base domain
size: Result size
start: Epoch time to start search from
end: Epoch time to end search at
tsfield: Shows first seen, last seen, or both 'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]
"""
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'toBaseDomain': {'type': 'Boolean', 'constraints': None, 'required': False},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def domain2aaaa(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'toBaseDomain': {'type': 'Boolean', 'constraints': None, 'required': False},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def domain2cname(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'toBaseDomain': {'type': 'Boolean', 'constraints': None, 'required': False},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def domain2d8s(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'live': {'type': 'Boolean', 'constraints': None, 'required': False},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def domain2ip(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'toBaseDomain': {'type': 'Boolean', 'constraints': None, 'required': False},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def domain2malwaredns(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'toBaseDomain': {'type': 'Boolean', 'constraints': None, 'required': False},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def domain2malwarehttp(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'toBaseDomain': {'type': 'Boolean', 'constraints': None, 'required': False},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def domain2mx(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'toBaseDomain': {'type': 'Boolean', 'constraints': None, 'required': False},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def domain2ns(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'toBaseDomain': {'type': 'Boolean', 'constraints': None, 'required': False},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def domain2nsglue(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'toBaseDomain': {'type': 'Boolean', 'constraints': None, 'required': False},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def domain2ptr(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'toBaseDomain': {'type': 'Boolean', 'constraints': None, 'required': False},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def domain2txt(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'toBaseDomain': {'type': 'Boolean', 'constraints': None, 'required': False},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def domain2whois(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def email_address(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def email_domain(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def email_user(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def firstseen(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'cctld': {'type': 'Boolean', 'constraints': None, 'required': False},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["indexTS", "date"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def hash2malwaredns(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def hash2malwarehttp(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def hostname(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'toBaseDomain': {'type': 'Boolean', 'constraints': None, 'required': False},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def ip(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'toBaseDomain': {'type': 'Boolean', 'constraints': None, 'required': False},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def ip2malwaredns(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def ip2malwarehttp(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def ip2nsglue(self, **params):
params.update(token=self.config.get('token'))
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def mx2domain(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'toBaseDomain': {'type': 'Boolean', 'constraints': None, 'required': False},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def ns2domain(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'toBaseDomain': {'type': 'Boolean', 'constraints': None, 'required': False},
'size': {'type': 'Integer', 'constraints': {'range': [0, 100000]}, 'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text)
def subdomains(self, **params):
endpoint = sys._getframe().f_code.co_name
args_allowed = {'q': {'type': 'String', 'constraints': None, 'required': True},
'token': {'type': 'String', 'constraints': {'length': 32}, 'required': True},
'toBaseDomain': {'type': 'Boolean', 'constraints': None, 'required': False},
'v': {'type': 'Boolean', 'constraints': None, 'required': False},
'vv': {'type': 'Boolean', 'constraints': None, 'required': False},
'vvv': {'type': 'Boolean', 'constraints': None, 'required': False},
'sort': {'type': 'String',
'constraints': {'multi': ["first", "last", "first:desc", "last:asc"]},
'required': False},
't': {'type': 'String',
'constraints': {
'multi': ["a", "aaaa", "cname", "mx", "name", "ns", "ptr", "soa_email", "soa_server",
"txt"]},
'required': False},
'start': {'type': 'Epoch', 'constraints': None, 'required': False},
'end': {'type': 'Epoch', 'constraints': None, 'required': False},
'tsfield': {'type': 'String',
'constraints': {'multi': ["first_seen", "first_ts", "last_seen", "last_ts", "all"]},
'required': False}
}
if (self.check_integrity(args_allowed, **params)):
return json.loads(self.requester.get(self.config['base_url'] + endpoint, params=params).text) | zetalytics-api | /zetalytics_api-1.0.1-py3-none-any.whl/zetalytics/zetalytics.py | zetalytics.py |
class Context:
"""The context object is responsible for managing the connection with the Zetane engine and holding information about the content of the engine. Because of that, objects are constructed via the context object which ensures they will have the correct socket connection.
New in v1.7.2: The context can be used as a python context manager, which is the recommended approach when possible.
Returns:
Context: An object wrapping the Zetane Engine.
"""
def __init__(self, host="127.0.0.1", port=4004, socket="", remote=False, append=False, update_on_exit=True):
pass
def update(self):
""" Update all context objects """
return self
def plain_launch(self):
"""Launches the Zetane window"""
return self
def launch(self):
"""Launches the Zetane window and connects to the socket"""
return self
def running(self):
"""Returns whether or not the Zetane window is running.
Returns:
Bool: Whether the Zetane process is running or not
"""
return self
def address(self):
""" returns address:port. """
return self
def connect(self, refresh=False, retry_connection=True, retries=10):
""" Attempts connection with the socket. """
return self
def disconnect(self):
""" Disconnect from the socket. """
return self
def close(self):
""" Close the Zetane window and disconnect from the socket. """
return self
def debug(self):
""" Puts the engine into debug mode, stopping at the point this method is called. """
return self
def image(self, data=None, filepath=None):
"""The image object holds a reference to an image in the Zetane universe, which is either a 2D representation of numpy data in the format Height x Width x Depth or a string to a filepath.
Args:
data (numpy, optional): A numpy array of the format Height x Width x Depth that will be interpreted as a 2D image. Also takes an array of numpy arrays for multiple images.
filepath (str, optional): A string to an image filepath, or an array of strings for multiple images.
Returns:
Image: An object wrapping an image in the Zetane engine.
"""
return self
def numpy(self, data=None, filepath=None):
"""The numpy object holds a reference to a tensor object in the zetane engine, which can be visualized in a variety of ways or used to power other graphics principles.
Args:
data (numpy, optional): A numpy array of any N dimensions.
Returns:
Numpy: A zetane object for a numpy array.
"""
return self
def mesh(self, filepath=None):
"""The mesh object holds a reference to a mesh in the Zetane universe.
Args:
filepath (str, optional): A string to a mesh filepath.
Returns:
Mesh: A zetane object for a mesh.
"""
return self
def pointcloud(self, filepath=None):
"""The pointcloud object holds a reference to a pointcloud in the Zetane universe. Supports file formats las, laz, xml, obj, gltf, glb, and ply.
Args:
filepath (str): Set the filepath of the pointcloud object.
Returns:
PointCloud: Returns a zetane PointCloud object.
"""
return self
def vector(self, data=None):
""" Create a new Zetane Vector object
.. seealso:: Module :mod: `vector`
:param data: numpy data or file path to a numpy file (.npy, .npz).
:type data: str, numpy array.
:return: Zetane Vector object.
"""
return self
def model(self):
"""The model class creates a reference to a machine learning model in the Zetane engine and renders the model architecture. Additionally, the model has data about the model internals and inputs. There are UI elements that allow the model intermediate information to be expanded and explored in order to examine weight and convolutional feature maps.
Returns:
Model: A zetane model object
"""
return self
def text(self, text=""):
"""The text object holds a reference to renderable text in the Zetane universe. Text has a number of methods that adjust how the text is rendered.
Args:
text (str): the text to be displayed in the engine
Returns:
Text: A zetane text object
"""
return self
def table(self, filepath=None):
"""The table object holds a reference to a table in the Zetane universe. It represents a dataframe in a visual, inspectable way.
Args:
filepath (str): Path to a tabular data file (.csv)
Returns:
Table: Returns a zetane Table object.
"""
return self
def form(self, type="", get_child_data=False):
""" Create a new form object in Zetane.
.. seealso:: Module :mod: `form`
:param type: Form type to search for in Universe
:param get_child_data: If true, will retrieve all the data in the child forms as well
:return: Zetane Form object.
"""
return self
def chart(self, title='Chart', domain=[0.0, 1.0], range=[0.0, 1.0], visual_type='Line', metrics=[]):
"""Create a new Chart object in Zetane that holds metrics. Charts can be one of a variety of different types.
Args:
title (str): the title for the Chart.
domain (float list): pair of floats containing domain info.
range (float list): pair of floats containing range info.
visual_type (str): The method of representation.
metrics (list of Metrics): metrics contained by the Chart object.
Returns:
Chart: a Chart object.
"""
return self
def clear_universe(self):
""" Clear the Zetane universe.
:return: None
"""
return self
def clear(self):
""" Clear the Zetane universe and invalidate all context objects """
return self
def panel(self, name, width=0.10, height=0.10, screen_x=0.0, screen_y=0.0, navigation='3d', depth_priority=1, is_dynamic=True, has_border=True, light=1.0):
"""
Create a new panel object in Zetane
Args:
name (str): title for the panel
width (float): value representing the screen ratio for the panel to occupy
height (float): value representing the screen ratio for the panel to occupy
screen_x (float): value between 0.0 and 1.0 for panel location on screen
screen_y (float): value between 0.0 and 1.0 for panel location on screen
navigation (str): navigation mode either 'static', '2d', or '3d'.
depth_priority (int): depth relative to silbing panels (higher values bring the panel forwards)
Returns:
Panel: a Panel object
"""
return self
def snapshot(self, filename=""):
return self
def render(self):
"""Render all objects created in this Zetane context.
"""
return self
def save(self, filename):
""" Save the current context as a .ztn file
:param filename: path of the .ztn file to be saved
:return: Save object.
"""
return self
def load(self, filename, receiving_panel=None, verbose=True, parse_xml_intelligently=False, parse_api_tags=True, resolve_absolute_path=True):
""" Load the current context as a .ztn file.
Retrieved Load object will have handles to every object that was present in the loaded Zetane universe.
Handles can be accessed trough the .objects() method which returns a dictionary keyed to the unique object names
Alternatively, users can call .get_objects_sorted_by_name() to get all objects in a list sorted by unique names
example: `zimg, zmodel, zmodel = zcontext.load(filename).get_objects_sorted_by_name()`
example: `zimg = zcontext.load(filename).objects()['unique_img_name']`
.. note::
* Due to the new loading/saving functionalities it is expected that default-constructed Zobj derived objects have no active logic
* Ie: their __init__ methods should not actually trigger an update that sends data to Zetane, otherwise we will not be able to retrieve values without side-effects
:param filename: path of the .ztn file to be loaded
:param verbose: if True, will print out message when loaded object's name is already taken
:param parse_xml_intelligently: if True, will parse the .ztn file as an XML. Else simple regex used to parse.
:param receiving_panel: panel object to contain the loaded .ztn contents
:param parse_api_tags: if False, will skip parsing object handles.
:param resolve_abs_path: when True, will resolve the filename's absolute path before sending it to the engine.
:return: Load object.
"""
return self
def is_name_taken(self, name):
""" returns True if an object in the context has the name."""
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/context.py | context.py |
class Metric:
"""This class is used to initialize panels, dials and pie chart metrics"""
def __init__(self):
pass
def metric_initialization(self):
"""This function is used to initialize the metrics variables
Returns
ztxt_overfitting(text): used to print whether model is overfitting, underfitting or working properly
zchart_accuracy(chart dial): used to represent accuracy in dial
Accuracy(z metric): used to show label and numeric value of accuracy
zchart_val_accuracy(chart dial): used to represent validation accuracy in dial
Val_Accuracy(z metric): used to show label and numeric value of validation accuracy
z_train_accuracy(image): used to show the plot of the accuracy and validation accuracy on the matplotlib plot
z_train_loss(image): used to show the plot of the loss and validation loss on the matplotlib plot
zchart_Loss(char dial): used to represent loss in dial
Loss(z metric):used to show label and numeric value of loss
zchart_val_Loss(chart dial): used to represent validation loss in dial
Val_Loss(z metric):used to show label and numeric value of validation loss
z_precision(image):used to show the plot of the precision on the matplotlib plot
zchart_precision(char dial):used to represent precision in dial
Precision(z metric):used to show label and numeric value of precision
z_recall(image): used to show the plot of the recall on the matplotlib plot
zchart_recall(chart dial): used to represent recall in dial
Recall(z metric): used to show label and numeric value of Recall
zchart_Pie(pie chart): used to represent true positive, false positive, true negative and false negative in dial
True_pos(z metric): used to show label and numeric value of true positive
False_pos(z metric): used to show label and numeric value of false positive
True_neg(z metric): used to show label and numeric value of true negative
False_neg(z metric): used to show label and numeric value of false negative
z_conf_matrix(image): used to represent the confusion matrix using matplotlib plot
"""
return self
class Dashboard:
"""This class is used to create the Dashboard for both keras and pytorch classification models
Output -> [0,1,2,3,4,5,6,7,8,9,10] categorical variables
Args:
model : defined or saved model used for training and inference
"""
def __init__(self, model, zcontext, zmodel):
pass
def ztxt_initialization(self):
"""This function is used to initialize the metrics variables
Returns
ztxt_1(z_text): used to print the name and probability of the top prediction of the image
ztxt_2(z_text): used to print the name and probability of the second best prediction of the image
ztxt_3(z_text): used to print the name and probability of the third best prediction of the image
ztxt_4(z_text): used to print the name and probability of the fourth best prediction of the image
ztxt_5(z_text): used to print the name and probability of the fifth best prediction of the image
ztxt_target(z_text): used to print the name of the target class
ztxt_prediction(z_text): used to show prediction heading
ztxt_output(z_text): used to show target heading
time_ztxt(z_text): used to show the time taken for running one inference image
zimg(z_image): used to show the images on the panel
"""
return self
def image_map(self, classes, image_table, N=1):
"""Take the most probable labels (output of postprocess).
Args:
classes (list): names of the classes used for training the model
image_table (dictionary): dictionary to map the id to the names of the classes
N (int): top N labels that fit the picture
Returns:
images (list):top N labels that fit the picture.
"""
return self
def plot_confusion_matrix(self, cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.PuOr_r):
"""Plots and save the confusion matrix as jpg
Args:
cm (numpy): numpy confusion matrix which will be plotted using this function
classes (list): names of the classes used for training the model
normalize (Boolean): Used to normalize the values of the confusion matrix
title (string): used to provide the title to the image
cmap (color map): used to provide the color map for the confusion matrix
"""
return self
def softmax(self, x):
"""Compute softmax values (probabilities from 0 to 1) for each possible label.
Args:
x (numpy): numpy (in this case score) for which softmax will be calculated
Returns:
e_x / e_x.sum(axis=0): softmax for the x values
"""
return self
def postprocess(self, scores):
"""This function takes the scores generated by the network.
Args:
scores(numpy): scores generated by the network
Returns:
classes(numpy): the classes ids for the number of classes
probability (float): probability of the classes predicted by the model
"""
return self
def metric_plots(self, title, metric_1 = [0], metric_2 = [0]):
"""This function plots the metrics and save them as png in the local directory
Args:
title(string): Title of the plot
metric_1(list): numeric values of the metric in the list format to plot it on the graph
metric_2(list): numeric values of the second metric in the list format to plot it on the graph
"""
return self
def keras_training(self, data, data_id, classes, model_type = 'classification', model_name='keras_model', validation_split =0.2, batch_size = 8, epochs = 1, verbose = 1):
"""
This function is used to train the model, all types of models will be used for this template in future currently it handles only
classification models
Args:
data (numpy): training data under which model will be trained
data_id (numpy): class index or class id of the image under which model will be trained
classes (list): names of the classes used for training the model
model_type(string): Type of the model used
model_name (string): Name under which the keras model will be converted to onnx and will saved
validation_split (float): used to split the training data in validation and train set
batch_size (int): specifies the number of images that will send together for training
epochs (int): specifies the number of iterations that will be used for training the model
verbose(int): specifies the verbage of the training model
Returns:
model: Returns the trained model which can used in inference
"""
return self
def keras_inference(self, test_data, test_data_id, image_table, model_name='keras_model', verbose = 1):
"""
This function is used to test the model, all types of models will be used for this template in future currently it handles only
classification models
Args:
test_data (numpy): test data under which model will be trained
test_data_id (numpy): class index or class id of the image under which model will be tested
image_table (dictionary): dictionary to map the id to the names of the classes
model_name (onnx model name): Name under which the keras model will be converted to onnx and will saved
verbose(int): specifies the verbage of the training model
Returns:
model: Returns the infered model
"""
return self
def pytorch_train(self, device, train_loader, optimizer, epochs, loss_criteria, test_loader, classes, model_name='pytorch_model', model_type='classification', opset_version=12):
"""
This function is used to train the model, all types of models will be used for this template in future currently it handles only
classification models
Args:
device: type of device used (cpu or cuda) for model
train_loader: training data loader under which model will be trained
epochs (int): specifies the number of iterations that will be used for training the model
optimizer : specifies the type of optimizer used such as Adam, SGD or RMSProp
loss_criteria : used to define the loss for the model
test_loader: testing data loader under which model will be tested
classes (list): names of the classes used for training the model
dummy_input (tuple): dummy_input to convert the model to onnx
model_name (string): Name under which the pytorch model will be converted to onnx and will saved
model_type(string): Type of the model used
opset_version (int): ONNX opset version (default: 12)
Returns:
model: Returns the trained model which can used in inference
"""
return self
def pytorch_test_loss(self, device, test_loader, loss_criteria):
"""
This function is used to generate the test loss for the model
Args:
device: type of device used (cpu or cuda) for model
test_loader: test data under which model will be tested
loss_criteria : used to define the loss for the model
Returns:
avg_loss: Returns the average loss
test_acc: Returns the test accuracy
"""
return self
def pytorch_inference(self, device, test_loader, loss_criteria, image_table, model_name='pytorch_model', opset_version=12):
"""
This function is used to test the model, all types of models will be used for this template in future currently it handles only
classification models
Args:
device: type of device used (cpu or cuda) for model
test_loader: testing data loader under which model will be tested
loss_criteria : used to define the loss for the model
image_table (dictionary): dictionary to map the id to the names of the classes
model_name (string): Name under which the pytorch model will be converted to onnx and will saved
opset_version (int): ONNX opset version (default: 12)
Returns:
model: Returns the infered model
"""
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/dashboard.py | dashboard.py |
class Metric:
def __init__(self):
pass
class Inference_Dashboard:
def __init__(self, model, zcontext, zmodel):
pass
def image_map(self, classes, image_table, N=1):
"""Take the most probable labels (output of postprocess).
Args:
classes (list): names of the classes used for training the model
image_table (dictionary): dictionary to map the id to the names of the classes
N (int): top N labels that fit the picture
Returns:
images (list):top N labels that fit the picture.
"""
return self
def softmax(self, x):
"""Compute softmax values (probabilities from 0 to 1) for each possible label.
Args:
x (numpy): numpy (in this case score) for which softmax will be calculated
Returns:
e_x / e_x.sum(axis=0): softmax for the x values
"""
return self
def postprocess(self, scores):
"""This function takes the scores generated by the network.
Args:
scores(numpy): scores generated by the network
Returns:
classes(numpy): the classes ids for the number of classes
probability (float): probability of the classes predicted by the model
"""
return self
def keras_training(self, data, data_id, classes, model_type='classification', model_name='keras_model', validation_split =0.2, batch_size = 8, epochs = 1, verbose = 1):
"""
This function is used to train the model, all types of models will be used for this template in future currently it handles only
classification models
Args:
data (numpy): training data under which model will be trained
data_id (numpy): class index or class id of the image under which model will be trained
classes (list): names of the classes used for training the model
model_type(string): Type of the model used
model_name (string): Name under which the keras model will be converted to onnx and will saved
validation_split (float): used to split the training data in validation and train set
batch_size (int): specifies the number of images that will send together for training
epochs (int): specifies the number of iterations that will be used for training the model
verbose(int): specifies the verbage of the training model
Returns:
model: Returns the trained model which can used in inference
"""
return self
def keras_inference(self, test_data, test_data_id, image_table, model_name='keras_model', verbose = 1, debug_data_index=0):
"""
This function is used to test the model, all types of models will be used for this template in future currently it handles only
classification models
Args:
test_data (numpy): data under which model will be tested
test_data_id (numpy): class index or class id of the image under which model will be tested
image_table (dictionary): dictionary to map the id to the names of the classes
model_name (onnx model name): Name under which the keras model will be converted to onnx and will saved
verbose(int): specifies the verbage of the training model
debug_data_index (int): index of a single 'data' input to debug in Zetane (per layer feature maps, tensor viz)
Returns:
model: Returns the infered model
"""
return self
def pytorch_train(self, device, train_loader, optimizer, epochs, loss_criteria, test_loader, classes, model_name='pytorch_model', model_type = 'classification', opset_version=12):
"""
This function is used to train the model, all types of models will be used for this template in future currently it handles only
classification models
Args:
device: type of device used (cpu or cuda) for model
train_loader: training data loader under which model will be trained
epochs (int): specifies the number of iterations that will be used for training the model
optimizer : specifies the type of optimizer used such as Adam, SGD or RMSProp
loss_criteria : used to define the loss for the model
test_loader: testing data loader under which model will be tested
classes (list): names of the classes used for training the model
dummy_input (tuple): dummy_input to convert the model to onnx
model_name (string): Name under which the pytorch model will be converted to onnx and will saved
model_type(string): Type of the model used
opset_version(int): ONNX opset version (default:12)
Returns:
model: Returns the trained model which can used in inference
"""
return self
def pytorch_test_loss(self, device, test_loader, loss_criteria):
"""
This function is used to generate the test loss for the model
Args:
device: type of device used (cpu or cuda) for model
test_loader: test data under which model will be tested
loss_criteria : used to define the loss for the model
Returns:
avg_loss: Returns the average loss
test_acc: Returns the test accuracy
"""
return self
def pytorch_inference(self, device, test_loader, loss_criteria, image_table, model_name='pytorch_model', debug_data_index=0, opset_version=12):
"""
This function is used to test the model, all types of models will be used for this template in future currently it handles only
classification models
Args:
device: type of device used (cpu or cuda) for model
test_loader: testing data loader under which model will be tested
loss_criteria : used to define the loss for the model
image_table (dictionary): dictionary to map the id to the names of the classes
model_name (string): Name under which the pytorch model will be converted to onnx and will saved
debug_data_index (int): index of a single 'data' input to debug in Zetane (per layer feature maps, tensor viz)
opset_version (int): ONNX opset version (default: 12)
Returns:
model: Returns the infered model
"""
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/inference_dashboard.py | inference_dashboard.py |
class XAIDashboard:
"""
The XAIDashboard class provides the base of the XAI template.
It requires a PyTorch or Keras model and a zcontext object, and visualizes the provided XAI algorithms, the original image and the predicted classes within panels. It also allows for visualizing certain XAI algorithms that work on a per-layer basis (e.g. Grad-CAM) on the model itself, under the associated Conv nodes.
Attributes:
model (torch.nn.Module or tf.Keras.nn.Model): Model to be used for XAI algorithms as well as visualization
zcontext (zetane.context.Context): The context object all visual elements will be sent to
zmodel (zetane.render.model.Model): The Zetane Model (converted from the Keras/PyTorch model) to be visualized
algorithms (list(str)): The list of XAI algorithms to be visualized
scale_factor (int): The scaling factor to scale images appropriately in the Zetane panels.
xai_panel (zetane.render.panel.Panel): The main panel object that houses all other panels
org_img_panel (zetane.render.panel.Panel): Panel for visualizing the original image
topk_panel (zetane.render.panel.Panel): Panel for visualizing the top k predictions of the model as well as the target prediction if available
explain_panel (zetane.render.panel.Panel): Panel that visualizes all global XAI algorithms
radio_panel (zetane.render.panel.Panel): Panel to visualize the radio buttons for toggling per-layer XAI algorithms
"""
def __init__(self, model, zcontext):
"""
Args:
model (torch.nn.Module or tf.Keras.nn.Model): Model to be used for XAI algorithms and visualization
zcontext (zetane.context.Context): The context object all visual elements will be sent to
"""
pass
def set_model(self, model):
"""
Updates the model used for XAI algorithms and visualization.
Args:
model (torch.nn.Module or tf.Keras.nn.Model): Model to be used for XAI algorithms as well as visualization
Returns:
None
"""
return self
def set_algorithms(self, algorithms):
"""
Updates the list of XAI algorithms to be visualized.
Args:
algorithms (list(str)): The list of XAI algorithms to be visualized
Returns:
None
"""
return self
def normalize(self, x):
"""
Applies 0-1 normalization.
Args:
x (ndarray): The numpy array to be normalized
Returns:
ndarray: The normalized array
"""
return self
def softmax(self, x):
"""Compute softmax values for each sets of scores in x."""
return self
def explain_torch(self, img_data, target_class=None, label_class=None, class_dict=None, algorithms=None, mean=None, std=None, opset_version=12):
"""
Runs the explainability template on a PyTorch classification model. Given an image path or data, computes the desired XAI algorithms and the top k predicted classes, and displays them along with the model and the original image.
Args:
img_data (str, ndarray or torch.Tensor): The input image in filepath or Numpy/torch array form
target_class (int): The output class for which the gradients will be calculated when generating the XAI images (default: None)
label_class (int): If available, the ground truth class label (default: None)
class_dict (dict): The class dictionary for the class names
algorithms (list(str)): The list of XAI algorithms to be visualized
mean (list(float)): The mean values for each channel if any in normalization is applied to the original image (default: None)
std (list(float)): The standard deviation values for each channel if any in normalization is applied to the original image (default: None)
opset_version (int): ONNX opset version (default: 12)
Returns:
None
"""
return self
def explain_keras(self, img_data, target_class=None, label_class=None, class_dict=None, algorithms=None, loss_fn=None, postprocess_fn=None):
"""
Runs the explainability template on a Keras classification model. Given an image path or data, computes the desired XAI algorithms and the top k predicted classes, and displays them along with the model and the original image.
Args:
img_data (str or ndarray): The input image in filepath or Numpy array form
target_class (int): The output class for which the gradients will be calculated when generating the XAI images (default: None)
label_class (int): If available, the ground truth class label (default: None)
class_dict (dict): The class dictionary for the class names
algorithms (list(str)): The list of XAI algorithms to be visualized
loss_fn (function): Custom loss function for the provided model if needed. If set to None, this defaults to categorical cross-entropy, which is the standard for most multiclass classification tasks (default: None)
postprocess_fn (function): Custom postprocessing function to extract class probabilities from model outputs if needed. If set to None, this defaluts to indexing into the 1D outputs array, assuming softmaxed outputs (default: None)
Returns:
None
"""
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/XAI_dashboard.py | XAI_dashboard.py |
def get_binary():
""" Return path to the Zetane Binaries. """
return None
def get_project_root():
"""Return project root folder."""
return None
def confusion_matrix(label, pred, nc=None):
"""Generates a confusion matrix for given predictions and target labels.
:param label: Array of target classes
:type label: np.ndarray
:param pred: Array of predicted classes,
:type pred: np.ndarray
:param nc: Number of classes, inferred automatically if not specified
:type nc: int
:return: A confusion matrix as a [nc, nc] NumPy array.
:rtype: np.ndarray
"""
return None
def precision_score(label, pred, nc=None):
"""Calculates precision for given predictions and target labels.
Precision is defined as sum(true_positives)/sum(all_pred_positives).
:param label: Array of target classes
:type label: np.ndarray
:param pred: Array of predicted classes,
:type pred: np.ndarray
:param nc: Number of classes, inferred automatically if not specified
:type nc: int
:return: A confusion matrix as a [nc, nc] NumPy array.
:rtype: np.ndarray
"""
return None
def recall_score(label, pred, nc = None):
"""Calculates recall for given predictions and target labels.
Recall is defined as sum(true_positives)/sum(all_label_positives).
:param label: Array of target classes
:type label: np.ndarray
:param pred: Array of predicted classes,
:type pred: np.ndarray
:param nc: Number of classes, inferred automatically if not specified
:type nc: int
:return: A confusion matrix as a [nc, nc] NumPy array.
:rtype: np.ndarray
"""
return None
def plot_confusion_matrix(conf_mat, classes, normalize=False, title='Confusion matrix', cmap=None):
"""Plots a confusion matrix.
:param conf_mat: Confusion matrix as an ndarray object
:type conf_mat: np.ndarray
:param classes: List of class labels
:type classes: list<str>
:param normalize: Whether to normalize matrix values, defaults to False
:type normalize: bool, optional
:param title: Title of the confusion matrix, defaults to 'Confusion matrix'
:type title: str, optional
:param cmap: Matplotlib color mapping, defaults to plt.cm.Blues
:type cmap: plt.cm.cmap, optional
:return: None, plots a Matplotlib graph
:rtype: None
"""
return None
def f1_score(label, pred, nc=None):
return None
def grid_placement(list_of_zobjs, max_number_of_columns = 3, flip_y_order = False, padding = 1.0, origin = (0.0, 0.0, 0.0)):
return None
def remap(in_values, in_range=None, out_range=[0.0, 1.0], clamp=False):
""" Read values of a given numpy array, returning a numerically remapped copy
:param in_values: input numpy array
:type label: np.ndarray
:param in_range: if not provided, assumes range is in_value [min,max]
:type label: tuple
:param out_range: target range of values
:type label: tuple
:param clamp: in_values outside of in_range are clamped
:type label: bool
:return: remapped copy of in_values
:rtype: np.ndarray
"""
return None
def wait_until(predicate_fn, timeout):
"""
Poll the predicate until it becomes True. Non-busy wait sleeps the
thread between polls.
Args:
predicate_fn (function): a function that returns a Boolean.
timeout (float): period in seconds to wait for the predicate to be met.
Returns:
True when the predicate is met or False on timeout.
"""
return None | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/utils.py | utils.py |
def chained(method):
"""Method decorator to allow chaining and trigger auto_updates. """
return None
class Zobj:
def __init__(self, nsocket=None, basetype='object'):
"""
Base class for Zetane API objects.
Args:
nsocket (Socket): Socket object.
basetype (str): identifier of the object type.
"""
pass
def get_type(cls):
"""An object's type is defined by its class name
:meta private:
"""
return None
def set_random_unique_name(self):
return self
def set_name(self, context, name):
""" Set the object's unique name, pass in context to ensure name is unique. """
return self
def get_name(self):
""" Get the object's unique name. """
return self
def update(self, debug=False):
"""Updates the object in Zetane with new data. Mechanically, this dispatches a data message to the Zetane engine. The engine holds an id for this python object. It will not create a new object, but will instead intelligently mutate the existing object as efficiently as possible.
Args:
debug (bool): A boolean that sets the debugging state of a particular object. This acts as a breakpoint in a python script, as the socket will wait indefinitely for an OK response from the Zetane Engine. The Engine has UI elements to allow for continuing with the script or stopping the debug state.
Return:
Zobj: Returns an instance of this class or the inheriting class that can be chained for additional method calls.
"""
return self
def debug(self):
return self
def delete(self):
""" Sets the object to be deleted on the Zetane side. """
return self
def position(self, x=0, y=0, z=0):
"""
Sets the position of the object to be sent to Zetane.
Args:
x (float): position of the object along the X-axis
y (float): position of the object along the Y-axis
z (float): position of the object along the Z-axis
Returns:
Returns this object so that method calls can be chained.
"""
return self
def scale(self, x=1, y=1, z=1):
"""
Sets the scale of the object to be sent to Zetane.
Args:
x (float): scaling value of the object in the X-axis
y (float): scaling value of the object in the Y-axis
z (float): scaling value of the object in the Z-axis
Returns:
Returns this object so that method calls can be chained.
"""
return self
def rotation(self, x=0, y=0, z=0):
"""
Sets the euler angles (radians) of the rotation of the object to be sent to Zetane.
Args:
x (float): rotation in radians around the X-axis of the object
y (float): rotation in radians around the Y-axis of the object
z (float): rotation in radians around the Z-axis of the object
Returns:
Returns this object so that method calls can be chained.
"""
return self
def quaternion(self, x=0, y=0, z=0, w=1):
""" Sets the quaternion parameters of the rotation of the object to be sent to Zetane.
Args:
x (float): rotation in radians around the X-axis of the object
y (float): rotation in radians around the Y-axis of the object
z (float): rotation in radians around the Z-axis of the object
Returns:
Returns this object so that method calls can be chained.
"""
return self
def send_to(self, panel_zobj):
"""
Sends the zobj to the engine to be displayed in
the specified panel.
Args:
panel_zobj (zobj): The panel to contain the specified zobject.
"""
return self
def add_zobj(self, zobj):
"""
Adds zobj object to the content of the panel
Args:
zobj (Zobj): a zobj object to display in the panel.
Returns:
Zobj: Returns this object so calls can be chained.
"""
return self
def auto_update(self, auto=True):
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/render/zobj.py | zobj.py |
class PointCloud:
"""The pointcloud object holds a reference to a pointcloud in the Zetane universe. Supports file formats las, laz, xml, obj, gltf, glb, and ply.
Args:
filepath (str): Set the filepath of the pointcloud object.
Returns:
PointCloud: Returns a zetane PointCloud object.
"""
def __init__(self, nsocket, filepath=None):
pass
def obj(self, filepath=""):
"""
Set the source pointcloud filepath.
Args:
filepath (str): Path to an pointcloud file. Relative or absolute. Supports las, laz, xml, obj, gltf, ply.
Returns:
PointCloud: Returns this pointcloud object so calls can be chained.
"""
return self
def elevation_texture(self, filepath=""):
"""Set the color of each points based on the y-axis of the texture
and the y position of a point.
Args:
filepath (str): Path to a vertical texture file.
Returns:
PointCloud: Returns this pointcloud object so calls can be chained.
"""
return self
def particle_texture(self, filepath=""):
"""Replace each points by an image.
Args:
filepath (str): Path to an image file.
Returns:
PointCloud: Returns this pointcloud object so calls can be chained.
"""
return self
def point_is_circle(self, is_circle=False):
"""
Set each points to a circle/square.
Args:
is_circle (bool): render points as circles.
Returns:
PointCloud: Returns this pointcloud object so calls can be chained.
"""
return self
def point_is_deep(self, is_deep=False):
"""
Enable/Disable shading of each points.
Args:
is_deep: Boolean to enable/disable shading.
Returns:
PointCloud: Returns this pointcloud object so calls can be chained.
"""
return self
def point_size(self, size=0.1):
"""
Set the size of each points. Must be > 0.
Args:
size: Float representing the size of each points.
Returns:
PointCloud: Returns this pointcloud object so calls can be chained.
"""
return self
def clone(self):
"""
Clones this PointCloud in Zetane returns the cloned PointCloud object
Returns:
PointCloud: cloned from this PointCloud.
"""
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/render/pointcloud.py | pointcloud.py |
class Form:
""" The Form objects allows us to retrieve dynamically from the Zetane 'universe'. The Zetane universe is composed of a scene tree that is queryable in several different ways.
Args:
nsocket (Socket): Socket to communicate with the Zetane Engine
type (string): The type of the object we will search for in the Zetane Engine. See the above example for how this works with the Zetane universe.
get_child_data (bool): Sets a property whether to query objects owned by the searched for object in the scene tree.
Example:
If our tree is composed like the following:
>>>
<universe>
<vertex />
</universe>
We can query for the vertex object using `Form(type='vertex')`
"""
def __init__(self, nsocket, type="", get_child_data=False):
pass
def update(self):
return self
def has_received(self):
return self
def get_received_data(self):
"""
Gets the zobject returned by the query
"""
return self
def get_values(self, timeout=10):
"""
Gets values associated with the queried object. Blocks the thread
until the values are retreived or the query times out.
The query is guaranteed to happen at least once, regardless of
timeout value.
Args:
timeout (float): period in seconds to wait for the retreival to be complete.
Returns:
queried object values; or None on timeout.
"""
return self
def get_words(self, timeout=10):
"""
Gets 'words', which are strings associated with the queried object
"""
return self
def get_subforms(self):
"""
Gets any objects that are owned by the queried objects. These are child nodes of the current object.
"""
return self
def get_parent(self):
"""
Gets the parent of the queried node in the scene tree.
"""
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/render/form.py | form.py |
class Mesh:
"""The mesh object holds a reference to a mesh in the Zetane universe.
Args:
nsocket (Socket): Socket to communicate with the Zetane Engine.
filepath (str, optional): A string to a mesh filepath.
Returns:
Mesh: A zetane object for a mesh.
"""
def __init__(self, nsocket, filepath=None):
pass
def obj(self, filepath=""):
"""
Set the source mesh OBJ filepath (.obj).
Args:
filepath (str): Path to an OBJ mesh file (.obj). Relative or absolute.
Returns:
Mesh: self
"""
return self
def highlight(self, r=0, g=0, b=0, a=0):
"""
Set the highlight color of the text overlayed over the base color.
Args:
r (float): Red channel value [0,1].
g (float): Green channel value [0,1].
b (float): Blue channel value [0,1].
a (float): Alpha channel value [0,1] strength of highlight.
Returns:
Mesh: self
"""
return self
def transparency(self, amount=0.0):
"""
Set mesh transparency from opaque (0.0) to fully transparent (1.0).
Args:
amount (float): transparency [0.0 = opaque, 1.0 = transparent].
Returns:
Mesh: self
"""
return self
def backface_culling(self, enable=True):
"""
Enable/disable backface culling.
Args:
enable (bool): enable/disable backface culling.
Returns:
Mesh: self
"""
return self
def wireframe(self, show=True):
"""
Set wireframe visibility status.
Args:
enable (bool): show/hide wireframe.
Returns:
Mesh: self
"""
return self
def clone(self):
"""
Clones this mesh in Zetane returns the cloned Mesh object
Returns:
Mesh: A new mesh object, cloned from this mesh.
"""
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/render/mesh.py | mesh.py |
class Chart:
"""
Chart is a container to hold Metric objects. This object is meant to be sent to zetane
to be rendered in a particular manner.
Args:
title (str): the title for the Chart.
metrics (list of Metrics): metrics contained by the Chart object.
type (str): The method of representation.
domain (float list): pair of floats containing domain info.
range (float list): pair of floats containing range info.
Returns:
Chart: a Chart object.
"""
def __init__(self, nsocket, title='', data_domain=[0.0, 1.0], data_range=[0.0, 1.0], visual_type='Line', metrics = []):
pass
def update(self):
return self
def include_metrics(self, metrics, append=True):
"""
Includes the specified list of Metrics in the Chart to be rendered in Zetane.
Args:
metrics (list of Metrics): metrics to add to the Chart object.
append (bool): decides whether these Metrics are appended or replace the current.
Returns:
Chart: Returns this object so calls can be chained.
"""
return self
def set_title(self, chart_title):
"""
Sets the title of the Chart, which will denote it in the Zetane universe.
Args:
chart_title (str): the title for the Chart.
Returns:
Chart: Returns this object so calls can be chained.
"""
return self
def set_type(self, type):
"""
Sets how the chart is going to represent it's data, methods available
include: 'Line', 'Bar', 'Pie', 'Surface', 'Dial'.
Args:
type (str): The method of representation.
Returns:
Chart: Returns this object so calls can be chained.
"""
return self
def set_domain(self, min, max):
"""
Sets the minimum and maximum of the domain.
Args:
min (float): domain minimum.
max (float): domain maximum.
Returns:
Chart: Returns this object so calls can be chained.
"""
return self
def set_range(self, min, max):
"""
Sets the minimum and maximum of the range.
Args:
min (float): range minimum.
max (float): range maximum.
Returns:
Chart: Returns this object so calls can be chained.
"""
return self
def set_attribute(self, attributes = [], clear_attributes = False):
"""
Adds attribute tags to the chart which may trigger certain features
within the Zetane Universe.
Attributes for 'Surface' charts:
wireframe - shows the connections between points
Args:
attributes (str list): tags to add to the chart.
Returns:
Chart: Returns this object so calls can be chained.
"""
return self
def metric(self, x=None, y=None, z=None, label='', attributes=[]):
"""
Creates a new metric object.
Args:
x (float list): x axis data.
y (float list): y axis data.
z (float list): z axis data.
label (str): name of metric.
Returns:
new_metric (Metric): a new metric object.
"""
return self
def set_window(self, window = 50, sparse=False):
return self
def clone(self):
"""Clones this chart in Zetane returns the cloned Chart object
Returns:
Chart: a clone of this Chart.
"""
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/render/chart.py | chart.py |
class Panel:
"""
A panel object sets up an area on the screen dedicated to
either a 2D or 3D navigable space, available for the user
to add content to.
Args:
name (str): title for the panel
width (float): value representing the screen ratio for the panel to occupy
height (float): value representing the screen ratio for the panel to occupy
screen_x (float): value between 0.0 and 1.0 for panel location on screen
screen_y (float): value between 0.0 and 1.0 for panel location on screen
navigation (str): navigation mode; either 'static', '2d', or '3d'.
depth_priority (int): layer number (greater == higher priority)
is_dynamic (bool): ability to re-size and re-position the panel.
has_border (bool): add a border to the panel.
light (float): content brightness.
Returns:
Panel: a Panel object
"""
def __init__(self, nsocket, name, width, height, screen_x, screen_y, navigation, depth_priority, is_dynamic=True, has_border=True, light=1.0):
pass
def set_panel_name(self, name):
"""
Sets the name of the panel form
Args:
name (str): name of the panel form.
Returns:
Panel: Returns this object so calls can be chained.
"""
return self
def set_width(self, width):
"""
Sets the width of this panel in Zetane Engine.
Args:
width (float): panel width as a fraction of parent panel width.
At 1.0, this panel will have the same width as its parent panel.
Returns:
Panel: Returns this object so calls can be chained.
"""
return self
def set_height(self, height):
"""
Sets the height of this panel in Zetane Engine.
Args:
height (float): panel height as a fraction of parent panel height.
At 1.0, this panel will have the same height as its parent panel.
Returns:
Panel: Returns this object so calls can be chained.
"""
return self
def set_screen_X(self, screen_x=0.0):
"""
The bottom left corner is the panel's origin point. This function sets the x position of the origin as a fraction of the parent panel's width.
Args:
screen_x (float): panel location on screen; as a fraction of parent panel width. Negative values and values greater than 1 are allowed, placing the origin outside the boundaries of the parent panel. Default 0.0.
Returns:
Panel: Returns this object so calls can be chained.
"""
return self
def set_screen_Y(self, screen_y=0.0):
"""
The bottom left corner is the panel's origin point. This function sets the y position of the origin as a fraction of the parent panel's height.
Args:
screen_y (float): panel location on screen; as a fraction of parent panel height.Negative values and values greater than 1 are allowed, placing the origin outside the boundaries of the parent panel. Default 0.0.
Returns:
Panel: Returns this object so calls can be chained.
"""
return self
def set_navigation_mode(self, mode: str = None):
"""
Sets the navigation mode of the panel.
Args:
mode (str): navigation mode. One of:
'static': no navigation; useful for HUDs and overlays
'2d': mouse navigation in XY is enabled
'3d': mouse navigation in XYZ is enabled
Returns:
Panel: Returns this object so calls can be chained.
"""
return self
def set_depth_order(self, priority: int = 0):
"""
Sets the depth order of this panel in relation to sibling panels.
Sibling panels share the same parent panel. The default depth is 0.
Higher values bring the panel forward. Siblings that share the same
depth value will be stacked in the same order they were created.
Args:
priority(int): layer number (greater == brings panel forward).
Returns:
Panel: Returns this object so calls can be chained.
"""
return self
def dynamic(self, is_dynamic=True):
"""
Decides whether the panel can be dynamically re-sized and re-positioned in
the zetane engine.
TODO: this function will be deprecated soon; with its functionality moving to
set_panel_controls.
Args:
is_dynamic (bool): ability to re-size and re-position the panel.
Returns:
Panel: Returns this object so calls can be chained.
"""
return self
def movable(self, movable: bool = False):
return self
def maximizable(self, maximizable: bool = False):
"""
Enable layout controls to maximize/minimize this panel.
Args:
maximizable (bool): determines whether the panel can be maximized
Returns:
Panel: Returns this object so calls can be chained.
"""
return self
def set_background_color(self, rgb: tuple = None):
""" Sets a background color.
Args:
rgb (tuple): RGB color for the panel background.
Returns:
Panel: Returns this object so calls can be chained.
"""
return self
def set_background_gradient(self, top_left: tuple = None, top_right: tuple = None, bottom_left: tuple = None, bottom_right: tuple = None):
""" Sets a 4-corner color gradient for the panel background. Corners with unspecified colors will use a default built-in color dependending on the
current theme.
Args:
top_left (tuple): RGB color for the top left corner.
top_right (tuple): RGB color for the top right corner.
bottom_left (tuple): RGB color for the bottom left corner.
bottom_right (tuple): RGB color for the bottom right corner.
Returns:
Panel: Returns this object so calls can be chained.
"""
return self
def set_background_image(self, image_path: str = None):
""" Sets a background image, stretched to fit the panel.
Args:
image_path (str): file path to an image (jpg, png, bmp, etc.).
Returns:
Panel: Returns this object so calls can be chained.
"""
return self
def set_background_alpha(self, alpha: float = 1.0):
""" Sets the background's opacity.
Args:
alpha (float): value in range [0.0, 1.0], where 0.0 is transparent,
and 1.0 is opaque. Values outside this range will be clamped.
Returns:
Panel: Returns this object so calls can be chained.
"""
return self
def remove_background(self):
""" Removes the panel's background. The depth buffer will not be cleared
and the panel will not contribute any UUID to the screen. Improves performance.
Useful for null panels used purely for organization as middle nodes for
layout heirarchy without interactivity features like camera/navigation.
Returns:
Panel: Returns this object so calls can be chained.
"""
return self
def border(self, pixels: int = 1.0):
"""
Adds an inner outline to the panel.
Args:
pixels (bool): border thickness, in pixels.
Returns:
Panel: Returns this object so calls can be chained.
"""
return self
def set_border_color(self, rgb=None):
""" Sets a border color.
Args:
rgb (tuple): RGB color for the panel border. To enable the border, call 'border(pixels)' with a value > 0.
Returns:
Panel: Returns this object so calls can be chained.
"""
return self
def set_border_alpha(self, alpha=1.0):
""" Sets the border's opacity.
Args:
alpha (float): value in range [0.0, 1.0], where 0.0 is transparent,
and 1.0 is opaque. Values outside this range will be clamped.
Returns:
Panel: Returns this object so calls can be chained.
"""
return self
def set_camera(self, position: tuple = None, aim: tuple = None):
""" Sets up the panel's camera.
Args:
position (tuple): XYZ position of the camera.
aim (tuple): XYZ position of the camera aim.
Returns:
Panel: Returns this object so calls can be chained.
"""
return self
def content_brightness(self, intensity=1.0):
"""
Change the default brightness of the content in the panel.
Does not affect the background nor any sibling or child panels.
Args:
intensity (float): brightness of panel content. Default is 1.0.
Returns:
Panel: Returns this object so calls can be chained.
"""
return self
def update(self, debug=False):
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/render/panel.py | panel.py |
class ChartDEPRECATED:
def __init__(self, nsocket, points=None, data_domain=1.0, data_range=1.0):
pass
def points(self, points=None, append=False):
"""
Sets a 1D numpy array of coords to be sent to the Zetane engine
to be plotted as a graph at the next .update() call.
:param points: a vector of points
:type points: 1-dimensional list
"""
return self
def color(self, r=0.0, g=0.0, b=0.0):
"""
sets the color of the visual data on the graph
:params r: value (between 0 and 1) of the red color component
:type r: Float
:params g: value (between 0 and 1) of the green color component
:type g: Float
:params b: value (between 0 and 1) of the blue color component
:type b: Float
"""
return self
def compress(self, compress=True):
"""
Setting this causes the rendered points in zetane to remain within a
predetermined space along the X-axis
:param compress_points: compress the rendered point spacing
:type compress_points: Boolean
"""
return self
def set_labels_with_indices(self, labels_with_indices_dict=None):
"""
Intakes a dictionary where the keys are assigned to labels which will appear
next to the data in the bar graph, and the values will decide which locations
these labels appear at
:params labels_with_indices: Labels as keys and index locations as values
:type labels_with_indices:
"""
return self
def dimensions(self, data_domain, data_range):
return self
def smooth(self, smooth=True):
"""
Sets the the graph to have it's points interpolated with a curved spline
:param enable_smooth: Boolean to activate spline interpolation
:type smooth: Bool
"""
return self
def as_bar_graph(self, enable_as_bar_graph=True):
"""
Renders the points in the graph as bars
:param enable_as_bar_graph: set rendering of bar graph
:type enable_as_bar_graph: Bool
"""
return self
def color_floor(self, enable_color_floor=True):
"""Colors the entire bar according to the points value"""
return self
def add_border(self, enable_border=True):
return self
def filled(self, enable_filled=True):
"""Fills the space underneath the plotted line with a color gradient"""
return self
def heightmap(self, width, length, heightmap=True):
"""
Sets the plotted vector to be rendered as a heightmap.
shape of the plane can be set through the 'width' and 'depth'.
If left empty, the square root of the vector point quantity
is used for the both dimensions of the rendered plane.
:param width: width of the plane
:type width: Int
:param length: depth of the plane
:type length: Int
"""
return self
def wireframe(self, wireframe=True):
"""
Sets the Chart object to be plotted as a wireframe
Only has an effect if .heightmap() is called on the object
"""
return self
def clone(self):
"""
Clones this chart in Zetane returns the cloned Chart object
.. todo:: move to zobj when other subclasses are also cloneable
.. todo:: custom zobj.deep_copy function to fine-tune deepcopy() behaviour
:return: a new Chart, cloned from this Chart.
"""
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/render/chartDEPRECATED.py | chartDEPRECATED.py |
class Text:
"""The text object holds a reference to renderable text in the Zetane universe. Text has a number of methods that adjust how the text is rendered.
Args:
text (str): the text to be displayed in the engine
Returns:
Text: A zetane text object
"""
def __init__(self, nsocket, text=None):
pass
def color(self, color_l=(0,0,0)):
"""
Set the base color of the text.
Args:
r (float): Red channel value [0,1].
g (float): Green channel value [0,1].
b (float): Blue channel value [0,1].
Returns:
self: .
"""
return self
def highlight(self, highlight_l=(0,0,0,0)):
"""
Set the highlight color of the text overlayed over the base color
Args:
r (float): Red channel value [0,1].
g (float): Green channel value [0,1].
b (float): Blue channel value [0,1].
a (float): Alpha channel value [0,1], higher values implies more opaque overlay.
Returns:
self: .
"""
return self
def gradient(self, color_list=((0, 0, 0), (1, 1, 1))):
"""
Define a gradient of colors to be applied over the text.
Args:
color_list (tuple list): list of colors to interpolate the gradient from, from left to right.
Should be 3-tuples of color values in [0,1] range.
Returns:
self: .
"""
return self
def font(self, font):
"""
Set the text font.
Args:
font (str): name of the selected font.
.. note::
Some supported fonts include::
- slab, slab-bold
- roboto-mono, roboto-mono-bold
- fira-mono, fira-mono-bold
- office-code-pro, office-code-pro-bold
Returns:
self: .
"""
return self
def font_size(self, font_size):
"""
Set the font Size.
Args:
font_size (float): Size of the font.
"""
return self
def scale(self):
return self
def prefix(self, prefix):
"""
Set a prefix string to be prepended to the text.
Args:
prefix (str): prefix to append to text.
"""
return self
def postfix(self, postfix):
"""
Set a postfix string to be appended to the text.
Args:
prefix (str): prefix to append to text.
"""
return self
def precision(self, precision):
"""
Set the precision of numerical values. """
return self
def chars_per_line(self, num=20):
"""
Set the max number of characters per line.
Args:
num (int): Limit of characters per line.
"""
return self
def fixed(self, fixed=True):
"""
Set if a fixed precision is to be used.
Args:
fixed (bool): setting of fixed precision.
"""
return self
def billboard(self, billboard=True):
"""
Set if the characters of the text should always face the camera.
Args:
billboard (bool): setting of billboard.
"""
return self
def align(self, alignment: str = ''):
"""Set the text alignment, with respect to the Text's position.
An empty string ('') indicates unaligned text which does not guarantee
precise placement with respect to this Text's position.
Args:
alignment (str): one of '', 'left', 'center', or 'right'. Defaults to ''.
Returns:
Text: Returns this text object so calls can be chained.
Raises:
ValueError: if 'alignment' is not one of '', 'left', 'center', or 'right'
"""
return self
def valign(self, alignment: str = ''):
"""Set the vertical text alignment, with respect to the Text's position.
An empty string ('') indicates unaligned text which does not guarantee
precise placement with respect to this Text's position.
Args:
alignment (str): one of '', 'top', 'middle', or 'bottom'. Defaults to ''.
Returns:
Text: Returns this text object so calls can be chained.
Raises:
ValueError: if 'alignment' is not one of '', 'top', 'middle', or 'bottom'
"""
return self
def text(self, text):
"""
Set the text to be rendered in Zetane
Args:
text (str): The text to be rendered in Zetane. It can be a string or a number.
Returns:
Text: Returns this text object so calls can be chained.
"""
return self
def clone(self):
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/render/text.py | text.py |
class Model:
"""The model class creates a reference to a machine learning model in the Zetane engine and renders the model architecture. Additionally, the model has data about the model internals and inputs. There are UI elements that allow the model intermediate information to be expanded and explored in order to examine weight and convolutional feature maps.
Returns:
Model: A zetane model object
"""
from enum import Enum
class Verbosity(Enum):
"""An enumeration of the debug levels to set for debugging model issues
:meta private:
"""
DEFAULT = 'default'
TRACE = 'trace'
DEBUG = 'debug'
INFO = 'info'
WARNING = 'warning'
ERROR = 'error'
FATAL = 'fatal'
def __init__(self, nsocket, visualize_inputs=False):
pass
def update(self, inputs=None):
"""Send data to Zetane to update the model.
Args:
inputs (str, numpy.array): file path to the inputs (.npy or .npz files), or numpy array of raw data.
Returns:
self: .
"""
return self
def onnx(self, model, run_model_check=False):
"""Update model data with an ONNX model
Args:
model (str): File path to a `.onnx` file.
run_model_check(bool): Runs the onnx model validity checker, which will throw errors for models that are improperly structured.
Returns:
Model: Returns self so that calls can be chained.
"""
return self
def tensorflow(self, sess, inputs, outputs, names=('saved_model.ckpt', 'tensorflow_model.onnx'), run_model_check=False):
"""Update model data with a tensorflow model
* Temporarily saves Tensorflow model to a checkpoint file and freezes the graph during call.
* Requires installation of tensorflow.
Args:
sess (str): Running tensorflow session.
inputs (list): List of input names in tf graph, formatted as node_name:port_id.
outputs (list): List of output names in tf graph, formatted as node_name:port_id.
names (tuple): Tuple of size 2 with filenames for checkpoint and onnx files.
run_model_check(bool): Runs the onnx model validity checker, which will throw errors for models that are improperly structured.
Returns:
Model: Returns self so that calls can be chained.
"""
return self
def keras(self, model, input_signature, opset=13, output_path='keras_model.onnx', run_model_check=False):
"""Update model data with a keras model
Args:
model (tf.keras.Model): Keras model to be rendered.
input_signature (tf.TensorSpec, np.array): a tf.TensorSpec or a numpy array defining the shape/dtype of the input
output_path (str): Name for the keras model save file. Define it uniquely if rendering multiple models.
run_model_check(bool): Runs the onnx model validity checker, which will throw errors for models that are improperly structured.
Returns:
Model: Returns self so that calls can be chained.
"""
return self
def torch(self, model, inputs, name='torch_model.onnx', run_model_check=False, opset_version=12, input_names=None, output_names=None):
"""Update model data with a pytorch model
Args:
model (torch.nn.Module): Pytorch model to be rendered.
inputs (torch.Tensor): Pytorch tensor to serve as input to the network. This can be the actual model input which will be displayed in Zetane, or a dummy tensor with the proper shape.
name (str): Name for the pytorch model. Define it uniquely if rendering multiple models.
run_model_check(bool): Runs the onnx model validity checker, which will throw errors for models that are improperly structured.
Returns:
Model: Returns self so that calls can be chained.
"""
return self
def model(self, model=None, run_model_check=False, expose_onnx_outputs=False, overwrite_onnx=False, overwrite_conv_names=False):
"""
Set the absolute file path to the .onnx object.
Args:
model (str): relative path to .onnx object
Returns:
Model: Returns self so that calls can be chained.
"""
return self
def prep_model_for_onnx_runtime(self, overwrite=False, run_model_validity_checker=False, verbose=False, overwrite_conv_names=False):
return self
def inputs(self, inputs=None):
"""Set the numpy file to be loaded into the input of the ONNX model.
Args:
inputs (str, numpy.array): path to the .npy/.npz file or a numpy array of the raw data.
Returns:
Model: Returns self so that calls can be chained.
"""
return self
def execute_model(self, execute_model=True):
"""Sets whether the model will run an inference pass in the Zetane engine.
Args:
execute_model (bool): Sets whether the model will run an inference pass.
Returns:
Model: Returns self so that calls can be chained.
"""
return self
def visualize_inputs(self, visualize=True):
"""
Set up the model to either visualize the inputs or not.
Args:
visualize (bool): if true, the model's inputs will be visualized when they are loaded in.
Returns:
Model: Returns self so that calls can be chained.
"""
return self
def run_model_checker(self, run_checker=True):
"""
If true, will run ONNX Model validity checker every time we expose nodes for ONNX Runtime.
Args:
run_checker (bool): If true, will run onnx model checker when exposing nodes for ONNX Runtime.
Returns:
Model: Returns self so that calls can be chained.
"""
return self
def disable_gpu(self, disable=True):
"""
Tell model to avoid using GPU for inference and prefer CPU passes. (Only valid if using deprecated runtime).
Args:
disable (bool): If true, GPU will not be used for inference passes.
Returns:
Model: Returns self so that calls can be chained.
"""
return self
def use_hierarchical_layout(self, use_hierarchical=True):
"""
Model will be loaded using a hierarchical layout (aka dot/sugiyama/layered layout). Otherwise, will use a basic layout.
Args:
use_hierarchical (bool): If true, ONNX Model will be rendered using hierarchical graph layout
Returns:
Model: Returns self so that calls can be chained.
"""
return self
def use_deprecated_runtime(self, use_deprecated=False):
"""
Tell model to use deprecated runtime (Libtorch) instead of ONNX Runtime for inference passes.
Args:
use_deprecated (bool): If true, model will use deprecated runtime (Libtorch) for inference passes.
Returns:
Model: Returns self so that calls can be chained.
"""
return self
def set_verbosity(self, verbosity=Verbosity.DEFAULT):
"""
Set the verbosity of the logs in-engine when executing the ONNX model.
Args:
verbosity (Verbosity): The verbosity of the logs printed by the engine during inference.
Returns:
Model: Returns self so that calls can be chained.
"""
return self
def set_output_toggle(self, state=True):
"""
If True, will retrieve and store output data for each ONNX node, else will not.
Args:
state (bool): If true, all nodes will be toggled on to store their output data by default.
Returns:
Model: Returns self so that calls can be chained.
"""
return self
def xai_previews(self, name_preview_pairs, xai_type, clearfields=False):
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/render/model.py | model.py |
class Chart3D:
"""The Chart3D object holds a reference to a 3D chart for displaying or animating 3D points. The object takes values either in a more traditional graphics style (a list of points (x,y,z)) or in the matplotlib style where x, y, and z vectors are all sent separately and their consistency is enforced by the developer.
Args:
points (list, optional): A list of (x,y,z) points by point : ((x,y,z),(x,y,z))
x (list, optional): A list of x float values.
y (list, optional): A list of y float values.
z (list, optional): A list of z float values.
Returns:
Chart3D: Returns a Chart3D object.
"""
def __init__(self, nsocket, points=[], x=[], y=[], z=[]):
pass
def set_3Dpoints(self, points=None, append=False):
"""Chart 3D coordinates on a surface. Sends a 3D numpy array of coordinates to the Zetane engine to be plotted as a mesh surface.
Args:
points (3D numpy array): a 3 dimensional vector of x,y,z coordinates.
ex: [ [ [x0, y0, z0], [x1, y1, z1] ], [ [x2, y2, z2], [x3, y3, z3] ] ].
append (bool): Whether to append points to th existing chart in Zetane or clear the data and start over again.
Returns:
Chart3D: Returns this object so that methods can be chained.
"""
return self
def set_points(self, x, y, z, append=False):
"""Add points to the x, y, and z vectors of the chart
Args:
x (float list): x coordinates to append
y (float list): y coordinates to append
z (float list): z coordinates to append
append (bool): Whether to append points to th existing chart in Zetane or clear the data and start over again.
Returns:
Chart3D: Returns this object so that methods can be chained.
"""
return self
def as_surface(self, surface=False):
"""Set to change whether to render individual points or to render a surface between points.
Args:
surface (bool): toggles rendering a surface connecting the 3d points.
"""
return self
def wireframe(self, enable_wireframe=True):
"""Render the surface as a wireframe
Args:
enable_wireframe (bool): render as wireframe
Returns:
Chart3D: Returns this object so that methods can be chained.
"""
return self
def clone(self):
"""
Clones this 3D chart in Zetane returns the cloned Chart3D object
Returns:
Chart3D: a clone of this Chart3D.
"""
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/render/chart3D.py | chart3D.py |
class Metric:
"""
Metric is an object which contains 3 vectors of data to be represented in a visual way by adding them to Chart objects.
Args:
x (float list): A vector of X coordinate values.
y (float list): A vector of Y coordinate values.
z (float list): A vector of Z coordinate values.
label (str): a word to describe the metric.
Returns:
Metric: a Metric object.
"""
def __init__(self, x=None, y=None, z=None, label='', attributes=[]):
pass
def init_values(self):
"""
initializes the x, y, z vectors to an empty state.
Args:
(none)
Returns:
Metric: Returns this object so calls can be chained.
"""
return self
def set_values(self, x=None, y=None, z=None):
"""
Sets the vectors in the metric of specified parameters.
Args:
x (float list): A vector of X coordinate values.
y (float list): A vector of Y coordinate values.
z (float list): A vector of Z coordinate values.
Returns:
Metric: Returns this object so calls can be chained.
"""
return self
def set_3Dpoints(self, points):
"""
takes a list of xyz coordinates to populate a metric's x, y and z vectors.
Args:
points (numpy): a 3D numpy array containing x,y,z point coordinates.
ex: [ [ [x0, y0, z0], [x1, y1, z1] ], [ [x2, y2, z2], [x3, y3, z3] ] ].
Returns:
Metric: Returns this object so calls can be chained.
"""
return self
def append_values(self, x=None, y=None, z=None):
"""
Appends values to the specified vector(s) in the parameter list.
Args:
x (float list): A vector of X coordinate values to append.
y (float list): A vector of Y coordinate values to append.
z (float list): A vector of Z coordinate values to append.
Returns:
Metric: Returns this object so calls can be chained.
"""
return self
def set_label(self, label):
"""
Sets the label which will denote the metric in zetane.
Args:
label (str): a word to describe the metric.
Returns:
Metric: Returns this object so calls can be chained.
"""
return self
def set_attribute(self, attributes=[], clear_attributes=False):
"""
Adds attribute tags to the metric which may trigger certain features
within the Zetane Universe.
Args:
attributes (str list): tags to add to the metric.
Returns:
Metric: Returns this object so calls can be chained.
Current possible attributes:
line charts:
points - render as points
linked - connects coordinates when the 'points' attribute is in use
smooth - Renders edges in the chart as splines.
filled - fills the space underneath a chart with color.
"""
return self
def set_color(self, r=None, g=None, b=None):
"""
Set a custom color to be used for the metric displayed in Zetane.
(default color will be random if none is chosen)
Args:
r (float): intensity of red in the color.
g (float): intensity of green in the color.
b (float): intensity of blue in the color.
Returns:
Metric: Returns this object so calls can be chained.
"""
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/render/metric.py | metric.py |
def vanilla_backprop_darknet(net, prep_img, out_class, class_dict, out_dir=None, map_type='default', grad_times_image=True, smooth_grad=False, n=50, sigma=4):
"""
Performs vanilla backpropagation, optionally applies SmoothGrad and Grad x Image, and saves the gradients as an image.
Args:
net (torch.nn.Module): the model used
prep_img (torch.Tensor): input image to the network as tensor
out_class (int): output class
class_dict (dict): dictionary of classes mapping integers to class strings
out_dir (str): output directory. If set to None, does not save the output as an image (default: None)
map_type (str): color map of the outputs, 'default' or 'grayscale' (default: 'default')
grad_times_image (bool): whether to perform Grad x Image, which multiplies the two to generate B&W viz images. (default: True)
smooth_grad (bool): whether to perform SmoothGrad (default: False)
n (int): amount of images used to smooth gradient, only used if smooth_grad=True (default: 50)
sigma (int): Sigma multiplier when calculating std of noise, only used if smooth_grad=True (default: 4)
Returns:
ndarray: backprop image as 3-channel ('default') or 1-channel ('grayscale'), HxWx3 (channels last) format with float values between 0-1
"""
return None
def guided_backprop_darknet(net, prep_img, out_class, class_dict, out_dir=None, map_type='default', smooth_grad=False, n=50, sigma=4):
"""
Performs guided backpropagation, optionally applies SmoothGrad and Grad x Image, and saves the gradients as an image.
Args:
net (torch.nn.Module): the model used
prep_img (torch.Tensor): input image to the network as tensor
out_class (int): output class
class_dict (dict): dictionary of classes mapping integers to class strings
out_dir (str): output directory. If set to None, does not save the output as an image (default: None)
map_type (str): color map of the outputs, 'default' or 'grayscale' (default: 'default')
smooth_grad (bool): whether to perform SmoothGrad (default: False)
n (int): amount of images used to smooth gradient, only used if smooth_grad=True (default: 50)
sigma (int): Sigma multiplier when calculating std of noise, only used if smooth_grad=True (default: 4)
Returns:
ndarray: backprop image as 3-channel ('default') or 1-channel ('grayscale'), HxWx3 (channels last) format with float values between 0-1
"""
return None
def gradcam_darknet(net, prep_img, out_class, class_dict=None, img_org=None, out_dir=None, layer_list=None, map_type='heatmap'):
"""
Creates Grad-CAM images for a given class for the given list of convolutional layers.
Args:
net (torch.nn.Module): the model used
out_class (int): output class
class_dict (dict): dictionary of classes mapping integers to class strings
prep_img (torch.Tensor): input image to the network as tensor
img_org (PIL.Image): the original image to overlay on, required for map_type='heatmap_on_image' (default: None)
out_dir (str): output directory. If set to None, does not save the output as an image (default: None)
layer_list (list(str)): the list of convolutional layers, None automatically infers all Conv layers. (default: None)
map_type (str): type of map to be generated. One of 'heatmap', 'heatmap_on_image' or 'grayscale'. 'heatmap_on_image' is not advised for small images (default: 'heatmap')
Returns:
dict(str, ndarray): a dict of (layer name, Grad-CAM ndarray) pairs, with ndarrays in HxWx3 (channels last) format with float values between 0-1
"""
return None
def guided_gradcam_darknet(net, out_class, class_dict, prep_img, out_dir=None, layer_list=None, map_type='default'):
"""
Creates Guided Grad-CAM images for a given class for the given list of convolutional layers.
Args:
net (torch.nn.Module): the model used
out_class (int): output class
class_dict (dict): dictionary of classes mapping integers to class strings
prep_img (torch.Tensor): input image to the network as tensor
out_dir (str): output directory. If set to None, does not save the output as an image (default: None)
layer_list (list(str)): the list of convolutional layers, None automatically infers all Conv layers. (default: None)
map_type (str): type of map to be generated. One of 'heatmap', 'heatmap_on_image' or 'grayscale'. 'heatmap_on_image' is not advised for small images (default: 'heatmap')
Returns:
dict(str, ndarray): a dict of (layer name, Guided Grad-CAM ndarray) pairs, with ndarray in HxWx3 (channels last) format with float values between 0-1
"""
return None
def scorecam_darknet(net, out_class, class_dict, prep_img, img_org=None, out_dir=None, layer_list=None, map_type='heatmap'):
"""
Creates Score-CAM images for a given class for the given list of convolutional layers.
Args:
net (torch.nn.Module): the model used
out_class (int): output class
class_dict (dict): dictionary of classes mapping integers to class strings
prep_img (torch.Tensor): input image to the network as tensor
img_org (PIL.Image): the original image to overlay on, required for map_type='heatmap_on_image' (default: None)
out_dir (str): output directory. If set to None, does not save the output as an image (default: None)
layer_list (list(str)): the list of convolutional layers, None automatically infers all Conv layers. (default: None)
map_type (str): type of map to be generated. One of 'heatmap', 'heatmap_on_image' or 'grayscale'. 'heatmap_on_image' is not advised for small images (default: 'heatmap')
Returns:
dict(str, ndarray): a dict of (layer name, Score-CAM ndarray) pairs, with ndarrays in HxWx3 (channels last) format with float values between 0-1
"""
return None
def integrated_gradients_darknet(net, out_class, class_dict, prep_img, out_dir=None, steps=100):
"""
Generates Integrated Gradients visualizations from model gradients and saves them images.
Args:
net (torch.nn.Module): the model used
out_class (int): output class
class_dict (dict): dictionary of classes mapping integers to class strings
prep_img (torch.Tensor): input image to the network as tensor
out_dir (str): output directory. If set to None, does not save the output as an image (default: None)
steps (int): the number of steps IG should be applied (default: 100)
Returns:
ndarray: the integrated gradients as ndarray, HxWx3 (channels last) format with float values between 0-1
"""
return None
def image_generation_darknet(net, target_class, image_size, out_dir=None, regularize=True):
"""
Optimizes a given network to produce vimages resembling a given class.
Args:
net (torch.nn.Module): the model used
target_class (int): the class for which the images will be generated
image_size (tuple(int)): size of the input image
out_dir (str): output directory. If set to None, does not save the output as an image (default: None)
regularize (bool): whether to regularize the images. regularization improves the quality of generated images significantly (default: True)
Returns:
ndarray: the generated image as ndarray, HxWx3 (channels last) format with float values between 0-1
"""
return None
def layer_visualization_darknet(net, cnn_layer, filter_pos, image_size, out_dir=None):
"""
Visualizes the filters for a given convolutional layer. This is particularly useful to interpret the learned features associated with specific filters and layers.
Args:
net (torch.nn.Module): the model used
cnn_layer (str): the layer to visualize
filter_pos (int): the filter in the selected layer to be visualized
image_size (tuple(int)): size of the input image
out_dir (str): output directory. If set to None, does not save the output as an image (default: None)
Returns:
ndarray: the generated layer visualization as ndarray, HxWx3 (channels last) format with float values between 0-1
"""
return None
def layer_activations_darknet(net, prep_img, out_class, cnn_layer, filter_pos, out_dir=None):
"""
Visualizes activations for a specific input on a specific layer and filter. The method is quite similar to guided backpropagation but instead of guiding the signal from the last layer and a specific target, it guides the signal from a specific layer and filter.
Args:
net (torch.nn.Module): the model used
prep_img (torch.Tensor): input image to the network as tensor
out_class (int): output class
cnn_layer (str): the layer to visualize
filter_pos (int): the filter in the selected layer to be visualized
out_dir (str): output directory. If set to None, does not save the output as an image (default: None)
Returns:
ndarray: the generated layer activation as ndarray, HxWx3 (channels last) format with float values between 0-1
"""
return None
def deep_dream_darknet(net, cnn_layer, filter_pos, im_path, image_size, out_dir=None):
"""
Performs Deep Dream on a given input image for selected filter and layer.
Args:
net (torch.nn.Module): the model used
cnn_layer (str): the layer to visualize
filter_pos (int): the filter in the selected layer to be visualized
im_path (str): path to the image to be dreamt on
image_size (tuple(int)): size of the input image
out_dir (str): output directory. If set to None, does not save the output as an image (default: None)
Returns:
ndarray: the dreamed image as ndarray, HxWx3 (channels last) format with float values between 0-1
"""
return None
def inverted_representation_darknet(net, prep_img, inv_mean, inv_std, out_dir=None, layer_list=None):
return None
def lime_image_darknet(model, img, out_class, out_dir=None, min_weight=0):
return None | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/explain/torch/explainability_methods_darknet.py | explainability_methods_darknet.py |
def vanilla_backprop(net, prep_img, out_class, class_name=None, out_dir=None, map_type='default', grad_times_image=True, smooth_grad=False, n=50, sigma=4):
"""
Performs vanilla backpropagation, optionally applies SmoothGrad and Grad x Image, and saves the gradients as an image.
Args:
net (torch.nn.Module): the model used
prep_img (torch.Tensor): input image to the network as tensor
out_class (int): output class
class_name (str): name of output class if any, otherwise defaults to str(out_class)
out_dir (str): output directory. If set to None, does not save the output as an image (default: None)
map_type (str): color map of the outputs, 'default' or 'grayscale' (default: 'default')
grad_times_image (bool): whether to perform Grad x Image, which multiplies the two to generate B&W viz images. (default: True)
smooth_grad (bool): whether to perform SmoothGrad (default: False)
n (int): amount of images used to smooth gradient, only used if smooth_grad=True (default: 50)
sigma (int): Sigma multiplier when calculating std of noise, only used if smooth_grad=True (default: 4)
Returns:
ndarray: backprop image as 3-channel ('default') or 1-channel ('grayscale'), HxWx3 (channels last) format with float values between 0-1
"""
return None
def guided_backprop(net, prep_img, out_class, class_name, out_dir=None, map_type='default', smooth_grad=False, n=50, sigma=4):
"""
Performs guided backpropagation, optionally applies SmoothGrad and Grad x Image, and saves the gradients as an image.
Args:
net (torch.nn.Module): the model used
prep_img (torch.Tensor): input image to the network as tensor
out_class (int): output class
class_name (str): name of output class if any, otherwise defaults to str(out_class)
out_dir (str): output directory. If set to None, does not save the output as an image (default: None)
map_type (str): color map of the outputs, 'default' or 'grayscale' (default: 'default')
smooth_grad (bool): whether to perform SmoothGrad (default: False)
n (int): amount of images used to smooth gradient, only used if smooth_grad=True (default: 50)
sigma (int): Sigma multiplier when calculating std of noise, only used if smooth_grad=True (default: 4)
Returns:
ndarray: backprop image as 3-channel ('default') or 1-channel ('grayscale'), HxWx3 (channels last) format with float values between 0-1
"""
return None
def gradcam(net, out_class, class_name, prep_img, img_org=None, out_dir=None, layer_list=None, map_type='heatmap'):
"""
Creates Grad-CAM images for a given class for the given list of convolutional layers.
Args:
net (torch.nn.Module): the model used
out_class (int): output class
class_name (str): name of output class if any, otherwise defaults to str(out_class)
prep_img (torch.Tensor): input image to the network as tensor
img_org (PIL.Image): the original image to overlay on, required for map_type='heatmap_on_image' (default: None)
out_dir (str): output directory. If set to None, does not save the output as an image (default: None)
layer_list (list(str)): the list of convolutional layers, None automatically infers all Conv layers. (default: None)
map_type (str): type of map to be generated. One of 'heatmap', 'heatmap_on_image' or 'grayscale'. 'heatmap_on_image' is not advised for small images (default: 'heatmap')
Returns:
dict(str, ndarray): a dict of (layer name, Grad-CAM ndarray) pairs, with ndarrays in HxWx3 (channels last) format with float values between 0-1
"""
return None
def guided_gradcam(net, out_class, class_name, prep_img, out_dir='ggc_test', layer_list=None, map_type='default'):
"""
Creates Guided Grad-CAM images for a given class for the given list of convolutional layers.
Args:
net (torch.nn.Module): the model used
out_class (int): output class
class_name (str): name of output class if any, otherwise defaults to str(out_class)
prep_img (torch.Tensor): input image to the network as tensor
out_dir (str): output directory. If set to None, does not save the output as an image (default: None)
layer_list (list(str)): the list of convolutional layers, None automatically infers all Conv layers. (default: None)
map_type (str): type of map to be generated. One of 'heatmap', 'heatmap_on_image' or 'grayscale'. 'heatmap_on_image' is not advised for small images (default: 'heatmap')
Returns:
dict(str, ndarray): a dict of (layer name, Guided Grad-CAM ndarray) pairs, with ndarray in HxWx3 (channels last) format with float values between 0-1
"""
return None
def scorecam(net, out_class, class_name, prep_img, img_org=None, out_dir=None, layer_list=None, map_type='heatmap'):
"""
Creates Score-CAM images for a given class for the given list of convolutional layers.
Args:
net (torch.nn.Module): the model used
out_class (int): output class
class_name (str): name of output class if any, otherwise defaults to str(out_class)
prep_img (torch.Tensor): input image to the network as tensor
img_org (PIL.Image): the original image to overlay on, required for map_type='heatmap_on_image' (default: None)
out_dir (str): output directory. If set to None, does not save the output as an image (default: None)
layer_list (list(str)): the list of convolutional layers, None automatically infers all Conv layers. (default: None)
map_type (str): type of map to be generated. One of 'heatmap', 'heatmap_on_image' or 'grayscale'. 'heatmap_on_image' is not advised for small images (default: 'heatmap')
Returns:
dict(str, ndarray): a dict of (layer name, Score-CAM ndarray) pairs, with ndarrays in HxWx3 (channels last) format with float values between 0-1
"""
return None
def integrated_gradients(net, out_class, class_name, prep_img, out_dir=None, steps=100):
"""
Generates Integrated Gradients visualizations from model gradients and saves them images.
Args:
net (torch.nn.Module): the model used
out_class (int): output class
class_name (str): name of output class if any, otherwise defaults to str(out_class)
prep_img (torch.Tensor): input image to the network as tensor
out_dir (str): output directory. If set to None, does not save the output as an image (default: None)
steps (int): the number of steps IG should be applied (default: 100)
Returns:
ndarray: the integrated gradients as ndarray, HxWx3 (channels last) format with float values between 0-1
"""
return None
def image_generation(net, target_class, image_size, out_dir=None, regularize=True):
"""
Optimizes a given network to produce images resembling a given class.
Args:
net (torch.nn.Module): the model used
target_class (int): the class for which the images will be generated
image_size (tuple(int)): size of the input image
out_dir (str): output directory. If set to None, does not save the output as an image (default: None)
regularize (bool): whether to regularize the images. regularization improves the quality of generated images significantly (default: True)
Returns:
ndarray: the generated image as ndarray, HxWx3 (channels last) format with float values between 0-1
"""
return None
def layer_visualization(net, cnn_layer, filter_pos, image_size, out_dir=None):
"""
Visualizes the filters for a given convolutional layer. This is particularly useful to interpret the learned features associated with specific filters and layers.
Args:
net (torch.nn.Module): the model used
cnn_layer (str): the layer to visualize
filter_pos (int): the filter in the selected layer to be visualized
image_size (tuple(int)): size of the input image
out_dir (str): output directory. If set to None, does not save the output as an image (default: None)
Returns:
ndarray: the generated layer visualization as ndarray, HxWx3 (channels last) format with float values between 0-1
"""
return None
def layer_activations(net, prep_img, out_class, cnn_layer, filter_pos, out_dir=None):
"""
Visualizes activations for a specific input on a specific layer and filter. The method is quite similar to guided backpropagation but instead of guiding the signal from the last layer and a specific target, it guides the signal from a specific layer and filter.
Args:
net (torch.nn.Module): the model used
prep_img (torch.Tensor): input image to the network as tensor
out_class (int): output class
cnn_layer (str): the layer to visualize
filter_pos (int): the filter in the selected layer to be visualized
out_dir (str): output directory. If set to None, does not save the output as an image (default: None)
Returns:
ndarray: the generated layer activation as ndarray, HxWx3 (channels last) format with float values between 0-1
"""
return None
def deep_dream(net, cnn_layer, filter_pos, im_path, image_size, out_dir=None):
"""
Performs Deep Dream on a given input image for selected filter and layer.
Args:
net (torch.nn.Module): the model used
cnn_layer (str): the layer to visualize
filter_pos (int): the filter in the selected layer to be visualized
im_path (str): path to the image to be dreamt on
image_size (tuple(int)): size of the input image
out_dir (str): output directory. If set to None, does not save the output as an image (default: None)
Returns:
ndarray: the dreamed image as ndarray, HxWx3 (channels last) format with float values between 0-1
"""
return None
def inverted_representation(net, prep_img, inv_mean, inv_std, out_dir=None, layer_list=None):
return None
def lime_image(model, img, out_class, out_dir=None, min_weight=0):
return None | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/explain/torch/explainability_methods.py | explainability_methods.py |
def get_layers(model):
"""
Extracts the layers of a PyTorch neural network as a list.
Args:
model (torch.nn.Module): the model with the layers to be extracted
Returns:
list(torch.nn.Module): list of the layers of the neural network
"""
return None
def get_darknet_layers(model):
"""
Extracts the layers of a PyTorch neural network as a list.
Args:
model (torch.nn.Module): the model with the layers to be extracted
Returns:
list(torch.nn.Module): list of the layers of the neural network
"""
return None
def convert_to_grayscale(im_as_arr):
"""
Converts 3d image to grayscale.
Args:
im_as_arr (np.ndarray): RGB image with shape (D,W,H)
Returns:
grayscale_im (np.ndarray): grayscale image with shape (1,W,D)
"""
return None
def save_gradient_images(gradient, path_to_file):
"""
Exports the original gradient image.
Args:
gradient (np.ndarray): Numpy array of the gradient with shape (3, 224, 224)
file_path (str): File name to be exported
Returns:
np.ndarray: the transposed array in WxHxC form
"""
return None
def save_class_activation_images(org_img, activation_map, file_path, map_type='heatmap'):
"""
Generates CAM heatmaps, either saves and returns them directly or overlays them on the original image first.
Args:
org_img (PIL.Image): Original image
activation_map (np.ndarray): activation map (grayscale) 0-255
file_path (str): File name of the exported image
Returns:
np.ndarray: the heatmap or overlaid array, HxWx3 (channels last) format with float values between 0-1
"""
return None
def apply_colormap_on_image(org_im, activation, colormap_name):
"""
Applies the colored activation heatmap on the original image.
Args:
org_img (PIL.Image): Original image
activation_map (np.ndarray): Activation map (grayscale) 0-255
colormap_name (str): Name of the colormap, standard matplotlib map names are used
Returns:
np.ndarray: no_trans_heatmap as the heatmap with no transparency
np.ndarray: heatmap_on_image as the heatmap overlaid on the image
"""
return None
def format_np_output(np_arr):
"""
This is a (kind of) bandaid fix to streamline saving procedure. It converts all the outputs to the same format which is 3xWxH with using sucecssive if clauses.
Args:
np_arr (np.ndarray): Matrix of shape 1xWxH or WxH or 3xWxH
Returns:
np.ndarray: NumPy array with chape CxWxH
"""
return None
def save_image(im, path):
"""
Saves a numpy array or PIL image as an image.
Args:
im_as_arr (np.ndarray): Matrix of shape DxWxH
path (str): Path to the image
Returns:
None
"""
return None
def preprocess_image(pil_im, mean=None, std=None, size=(224, 224), resize_im=True):
"""
Processes image to produce inputs for PyTorch CNNs.
Args:
pil_im (PIL.Image, ndarray or torch.Tensor): PIL Image or numpy/torch array to process
mean (list): mean values between 0 and 1 for each channel (default: None)
std (list): standard deviation values between 0 and 1 for each channel (default: None)
size (tuple(int, int)): desired size of the output image, must be compatible with the neural network (default: (224, 224))
resize_im (bool): to resize or not (default: True)
Returns:
im_as_var (torch variable): Variable that contains processed float tensor
"""
return None
def recreate_image(im_as_var, reverse_mean=None, reverse_std=None):
"""
Recreates original images from a torch variable, through a sort of reverse preprocessing process.
Args:
im_as_var (torch variable): Image to recreate
reverse_mean (list(float)): inverted mean if any mean normalization has been performed on prep_img. None defaults to ImageNet metrics (default: None)
e.g. if means for three channels were [0.485, 0.456, 0.406], inv_mean=[-0.485, -0.456, -0.406]
reverse_std (list(float)): inverted standard deviation if any std normalization has been performed on prep_img. None defaults to ImageNet metrics (default: None)
e.g. if stds for three channels were [0.229, 0.224, 0.225], inv_std=[1/0.229, 1/0.224, 1/0.225]
Returns:
recreated_im (numpy arr): Recreated image in array
"""
return None
class CamExtractor:
"""
Extracts class activation mapping (CAM) features from the model.
Args:
model (torch.nn.Module): the model used
target_layer (str): the layer to visualize
Attributes:
model (torch.nn.Module): the model used
target_layer (str): the layer to visualize
gradients (torch.Tensor): the gradients at the target layer
"""
def __init__(self, model):
pass
def save_gradient(self, grad):
"""
Saves the current gradients to a class attribute.
Args:
grad (torch.Tensor): the gradients at the target layer
"""
return self
def forward_pass_on_convolutions(self, x):
"""
Does a forward pass on convolutions, hooks the function at given layer. Applies only to torchvision models which have the 'features' submodules/blocks.
Args:
x (torch.Tensor): inputs to the neural network
Returns:
torch.Tensor: x as the output of the last convolutional layer
"""
return self
def forward_pass_on_classifier(self, x):
"""
Does a full forward pass on the model. Applies only to torchvision models which have 'classifier' submodules/blocks.
Args:
x (torch.Tensor): inputs to the neural network
Returns:
torch.Tensor: x as the output of the last layer
"""
return self
def forward_pass(self, x):
"""
Does a full forward pass on the model. Treats self.model as a torchvision model that employs 'features' and 'classifier' submodules by default, fallbacks to a standard sequential model if not.
Args:
x (torch.Tensor): inputs to the neural network
Returns:
torch.Tensor: x as the output of the last layer
"""
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/explain/torch/utils.py | utils.py |
class InvertedRepresentation:
"""
An algorithm that aims to generate the original image using the learned features of a given layer. For more info, see: 'A. Mahendran, A. Vedaldi. Understanding Deep Image Representations by Inverting Them, https://arxiv.org/abs/1412.0035'.
Args:
model (torch.nn.Module): the model used
out_dir (str): output directory
Attributes:
model (torch.nn.Module): the model used
out_dir (str): output directory
"""
def __init__(self, model, out_dir):
pass
def alpha_norm(self, input_matrix, alpha):
"""
Converts the input matrix to vector, and then calculates its alpha norm.
Args:
input_matrix (torch.Tensor): the image that is being optimized
alpha (float): alpha coefficient for exponential component
Returns:
float: sum of the alpha exponential of the flattened input matrix
"""
return self
def total_variation_norm(self, input_matrix, beta):
"""
Total variation norm is the second norm in the paper, represented as R_V(x).
Args:
input_matrix (torch.Tensor): the image that is being optimized
beta (float): beta coefficient for exponential component
Returns:
float: sum of the variation of the input matrix
"""
return self
def euclidian_loss(self, org_matrix, target_matrix):
"""
Euclidian loss is the main loss function in the paper: ||fi(x) - fi(x_0)||_2^2& / ||fi(x_0)||_2^2
Args:
org_matrix (torch.Tensor): the original output of the target layer
target_matrix (torch.Tensor): the output of the target layer that is being optimized
Returns:
torch.Tensor: the normalized euclidean distance between the two matrices
"""
return self
def get_output_from_specific_layer(self, x, layer_id):
"""
Saves the output after a forward pass until nth layer. This operation could be done with a forward hook too, but this method is deemed more straightforward.
Args:
x (torch.Tensor): the input to the neural network
layer_id (str): the index/name of the layer to target
Returns:
torch.Tensor: the output of the layer with the specified layer_id
"""
return self
def generate_inverted_image_specific_layer(self, input_image, inv_mean, inv_std, target_layer):
"""
Generates an inverted representation of the input image using the learned features of a specific network layer.
Args:
input_image (torch.Tensor): input image to the network as tensor
inv_mean (list(float)): inverted mean if any mean normalization has been performed on prep_img.
e.g. if means for three channels were [0.485, 0.456, 0.406], inv_mean=[-0.485, -0.456, -0.406]
inv_std (list(float)): inverted standard deviation if any std normalization has been performed on prep_img.
e.g. if stds for three channels were [0.229, 0.224, 0.225], inv_std=[1/0.229, 1/0.224, 1/0.225]
target_layer (str): the index/name of the layer to target
Returns:
np.ndarray: inverted representation output image as a NumPy array, HxWx3 (channels last) format with float values between 0-1
"""
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/explain/torch/algorithms/inverted_representation.py | inverted_representation.py |
def preprocess_and_blur_image(pil_im, mean=None, std=None, resize_im=True, size=(224, 224), blur_rad=None):
"""
Processes image with optional Gaussian blur for CNNs.
Args:
pil_im (PIL.Image): PIL Image or ndarray to process
mean (list(float)): mean values between 0 and 1 for each channel (default: None)
std (list(float)): standard deviation values between 0 and 1 for each channel (default: None)
resize_im (bool): to resize or not (default: True)
size (tuple(int, int)): the size to resize. Used only if resize_im=True (default: (224, 224))
blur_rad (int): pixel radius for Gaussian blurring (default: None)
returns:
torch.autograd.Variable: Variable that contains processed float tensor
"""
return None
class RegularizedClassSpecificImageGeneration:
"""
Produces an image that maximizes a certain class with gradient ascent. Uses Gaussian blur, weight decay, and clipping.
Args:
model (torch.nn.Module): the model used
target_class (int): the class for which the images will be generated
size (tuple(int)): size of the input image
out_dir (str): output directory
mean (list): mean values between 0 and 1 for each channel (default: None)
std (list): standard deviation values between 0 and 1 for each channel (default: None)
Attributes:
model (torch.nn.Module): the model used
target_class (int): the class for which the images will be generated
size (tuple(int)): size of the input image
out_dir (str): output directory
mean (list(float)): mean values between 0 and 1 for each channel
std (list(float)): standard deviation values between 0 and 1 for each channel
created_image (PIL image): the final image generated by the network created_image, WxHx3 (channels last) format with int values between 0-255
"""
def __init__(self, model, target_class, size, out_dir, mean=None, std=None):
pass
def generate(self, iterations=150, blur_freq=4, blur_rad=1, wd=0.0001, clipping_value=0.1, initial_learning_rate=6):
"""
Generates class specific image with enhancements to improve image quality. See https://arxiv.org/abs/1506.06579 for details on each argument's effect on output quality.
Play around with combinations of arguments. Besides the defaults, this combination has produced good images:
blur_freq=6, blur_rad=0.8, wd = 0.05
Args:
iterations (int): Total iterations for gradient ascent (default: 150)
blur_freq (int): Frequency of Gaussian blur effect, in iterations (default: 6)
blur_rad (float): Radius for gaussian blur, passed to PIL.ImageFilter.GaussianBlur() (default: 0.8)
wd (float): Weight decay value for Stochastic Gradient Ascent (default: 0.05)
clipping_value (None or float): Value for gradient clipping (default: 0.1)
initial_learning_rate (float): Initial learning rate of optimizer (default: 6)
Returns:
np.ndarray: Final maximally activated class image, HxWx3 (channels last) format with float values between 0-1
"""
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/explain/torch/algorithms/generate_regularized_class_specific_samples.py | generate_regularized_class_specific_samples.py |
def preprocess_and_blur_image(pil_im, mean=None, std=None, resize_im=True, size=(224, 224), blur_rad=None):
"""
Processes image with optional Gaussian blur for CNNs.
Args:
pil_im (PIL.Image): PIL Image or ndarray to process
mean (list(float)): mean values between 0 and 1 for each channel (default: None)
std (list(float)): standard deviation values between 0 and 1 for each channel (default: None)
resize_im (bool): to resize or not (default: True)
size (tuple(int, int)): the size to resize. Used only if resize_im=True (default: (224, 224))
blur_rad (int): pixel radius for Gaussian blurring (default: None)
returns:
torch.autograd.Variable: Variable that contains processed float tensor
"""
return None
class RegularizedClassSpecificImageGenerationDarknet:
"""
Produces an image that maximizes a certain class with gradient ascent. Uses Gaussian blur, weight decay, and clipping.
Args:
model (torch.nn.Module): the model used
target_class (int): the class for which the images will be generated
size (tuple(int)): size of the input image
out_dir (str): output directory
mean (list): mean values between 0 and 1 for each channel (default: None)
std (list): standard deviation values between 0 and 1 for each channel (default: None)
Attributes:
model (torch.nn.Module): the model used
target_class (int): the class for which the images will be generated
size (tuple(int)): size of the input image
out_dir (str): output directory
mean (list(float)): mean values between 0 and 1 for each channel
std (list(float)): standard deviation values between 0 and 1 for each channel
created_image (PIL image): the final image generated by the network created_image, WxHx3 (channels last) format with int values between 0-255
"""
def __init__(self, model, target_class, size, out_dir, mean=None, std=None):
pass
def generate(self, iterations=150, blur_freq=4, blur_rad=1, wd=0.0001, clipping_value=0.1, initial_learning_rate=6):
"""
Generates class specific image with enhancements to improve image quality. See https://arxiv.org/abs/1506.06579 for details on each argument's effect on output quality.
Play around with combinations of arguments. Besides the defaults, this combination has produced good images:
blur_freq=6, blur_rad=0.8, wd = 0.05
Args:
iterations (int): Total iterations for gradient ascent (default: 150)
blur_freq (int): Frequency of Gaussian blur effect, in iterations (default: 6)
blur_rad (float): Radius for gaussian blur, passed to PIL.ImageFilter.GaussianBlur() (default: 0.8)
wd (float): Weight decay value for Stochastic Gradient Ascent (default: 0.05)
clipping_value (None or float): Value for gradient clipping (default: 0.1)
initial_learning_rate (float): Initial learning rate of optimizer (default: 6)
Returns:
np.ndarray: Final maximally activated class image, HxWx3 (channels last) format with float values between 0-1
"""
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/explain/torch/algorithms/darknet/generate_regularized_class_specific_samples_darknet.py | generate_regularized_class_specific_samples_darknet.py |
class InvertedRepresentationDarknet:
"""
An algorithm that aims to generate the original image using the learned features of a given layer. For more info, see: 'A. Mahendran, A. Vedaldi. Understanding Deep Image Representations by Inverting Them, https://arxiv.org/abs/1412.0035'.
Args:
model (torch.nn.Module): the model used
out_dir (str): output directory
Attributes:
model (torch.nn.Module): the model used
out_dir (str): output directory
"""
def __init__(self, model, out_dir):
pass
def alpha_norm(self, input_matrix, alpha):
"""
Converts the input matrix to vector, and then calculates its alpha norm.
Args:
input_matrix (torch.Tensor): the image that is being optimized
alpha (float): alpha coefficient for exponential component
Returns:
float: sum of the alpha exponential of the flattened input matrix
"""
return self
def total_variation_norm(self, input_matrix, beta):
"""
Total variation norm is the second norm in the paper, represented as R_V(x).
Args:
input_matrix (torch.Tensor): the image that is being optimized
beta (float): beta coefficient for exponential component
Returns:
float: sum of the variation of the input matrix
"""
return self
def euclidian_loss(self, org_matrix, target_matrix):
"""
Euclidian loss is the main loss function in the paper: ||fi(x) - fi(x_0)||_2^2& / ||fi(x_0)||_2^2
Args:
org_matrix (torch.Tensor): the original output of the target layer
target_matrix (torch.Tensor): the output of the target layer that is being optimized
Returns:
torch.Tensor: the normalized euclidean distance between the two matrices
"""
return self
def get_output_from_specific_layer(self, x, layer_id):
"""
Saves the output after a forward pass until nth layer. This operation could be done with a forward hook too, but this method is deemed more straightforward.
Args:
x (torch.Tensor): the input to the neural network
layer_id (str): the index/name of the layer to target
Returns:
torch.Tensor: the output of the layer with the specified layer_id
"""
return self
def generate_inverted_image_specific_layer(self, input_image, inv_mean, inv_std, target_layer):
"""
Generates an inverted representation of the input image using the learned features of a specific network layer.
Args:
input_image (torch.Tensor): input image to the network as tensor
inv_mean (list(float)): inverted mean if any mean normalization has been performed on prep_img.
e.g. if means for three channels were [0.485, 0.456, 0.406], inv_mean=[-0.485, -0.456, -0.406]
inv_std (list(float)): inverted standard deviation if any std normalization has been performed on prep_img.
e.g. if stds for three channels were [0.229, 0.224, 0.225], inv_std=[1/0.229, 1/0.224, 1/0.225]
target_layer (str): the index/name of the layer to target
Returns:
np.ndarray: inverted representation output image as a NumPy array, HxWx3 (channels last) format with float values between 0-1
"""
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/explain/torch/algorithms/darknet/inverted_representation_darknet.py | inverted_representation_darknet.py |
class CamExtractorDarknet:
"""
Extracts class activation mapping (CAM) features from the model.
Args:
model (torch.nn.Module): the model used
target_layer (str): the layer to visualize
Attributes:
model (torch.nn.Module): the model used
target_layer (str): the layer to visualize
gradients (torch.Tensor): the gradients at the target layer
"""
def __init__(self, model, target_layer):
pass
def forward_pass_on_convolutions(self, x):
return self
def forward_pass_on_classifier(self, x):
"""
Does a full forward pass on the model. Applies only to torchvision models which have 'classifier' submodules/blocks.
Args:
x (torch.Tensor): inputs to the neural network
Returns:
torch.Tensor: conv_output as the output of the target layer
torch.Tensor: x as the output of the last layer
"""
return self
def forward_pass(self, x):
"""
Does a full forward pass on the model. Treats self.model as a torchvision model that employs 'features' and 'classifier' submodules by default, fallbacks to a standard sequential model if not.
Args:
x (torch.Tensor): inputs to the neural network
Returns:
torch.Tensor: conv_output as the output of the target layer
torch.Tensor: x as the output of the last layer
"""
return self
class ScoreCamDarknet:
"""
Produces class activation maps using the Score-CAM algorithm. For more info, see: 'H. Wang, Z. Wang, M. Du, F. Yang, Z. Zhang, S. Ding, P. Mardziel, X. Hu. Score-CAM: Score-Weighted Visual Explanations for Convolutional Neural Networks https://arxiv.org/abs/1910.01279'.
Args:
model (torch.nn.Module): the model used
target_layer (str): the layer to visualize
Attributes:
model (torch.nn.Module): the model used
target_layer (str): the layer to visualize
extractor (CamExtractor): Extractor for CAM features.
"""
def __init__(self, model, target_layer):
pass
def generate_cam(self, input_image, target_class=None):
"""
Applies the Score-CAM algorithm using the CamExtractor.
Args:
input_image (torch.Tensor): input image as a PyTorch tensor
target_class (int): the index of the class for which Grad-CAM images will be produced, defaults to the argmax of the model output if set to None (default: None)
Returns:
np.ndarray: the class activation map as an ndarray
"""
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/explain/torch/algorithms/darknet/scorecam_darknet.py | scorecam_darknet.py |
def show_image(image, result_dir, grayscale=False, ax=None, title=''):
"""
Display the numpy array as the image and save the image in the directory specified by the user
Args:
image: numpy array of the image
result_dir: the resulting directory where the image will be saved
grayscale: Boolean to specify the image as grayscale
ax: axis of the figure used when displaying the image inside the editor
title(str): title of the output, also used to store the image with the same(title) name
"""
return None
def load_image(file_path):
"""
Load/Open the image provided in the file_path
Args:
file_path: path of the image
"""
return None
def keras_vanilla_backprop(model, img, result_dir, out_class, loss=None):
"""
Display the vanilla backprop of the image according to the model
Args:
model: neural network (model) for explainability
img(numpy array): numpy array of the image
result_dir: the resulting directory where the image will be saved
out_class (int): output class
loss (function): Custom loss function for the provided model if needed. If set to None, this defaults to categorical cross-entropy, which is the standard for most multiclass classification tasks (default: None)
Returns:
mask(numpy array) : returns the output as numpy array
"""
return None
def keras_guided_backprop(model, img, result_dir, out_class, loss=None):
"""
Display the guided backprop gradients of the image according to the model
Args:
model: neural network (model) for explainability
img(numpy array): numpy array of the image
result_dir: the resulting directory where the image will be saved
out_class (int): output class
loss (function): Custom loss function for the provided model if needed. If set to None, this defaults to categorical cross-entropy, which is the standard for most multiclass classification tasks (default: None)
Returns:
mask(numpy array) : returns the output as numpy array
"""
return None
def keras_integrated_grad(model, img, result_dir, out_class, loss=None):
"""
Display the integrated gradients of the image according to the model
Args:
model: neural network (model) for explainability
img (ndarray): numpy array of the image with proper shape that matchs the model
result_dir: the resulting directory where the image will be saved
out_class (int): output class
loss (function): Custom loss function for the provided model if needed. If set to None, this defaults to categorical cross-entropy, which is the standard for most multiclass classification tasks (default: None)
Returns:
mask(numpy array) : returns the output as numpy array
"""
return None
def keras_smoothgrad(model, img, result_dir, out_class, num_samples=5, noise=1.0, loss=None):
"""
Display the integrated gradients of the image according to the model
Args:
model: neural network (model) for explainability
img (ndarray): numpy array of the image with proper shape that matchs the model
result_dir: the resulting directory where the image will be saved
out_class (int): output class
num_samples (int): Number of noisy samples to generate for each input image
noise (float): Standard deviation for noise normal distribution
loss (function): Custom loss function for the provided model if needed. If set to None, this defaults to categorical cross-entropy, which is the standard for most multiclass classification tasks (default: None)
Returns:
mask(numpy array) : returns the output as numpy array
"""
return None
def keras_gradximage(model, img, result_dir, out_class, use_guided_grads=False, loss=None):
"""
Display the integrated gradients of the image according to the model
Args:
model: neural network (model) for explainability
x(numpy array): numpy array of the image with proper shape that matchs the model
result_dir: the resulting directory where the image will be saved
out_class (int): output class
use_guided_grads (boolean): Whether to use guided grads or raw gradients
loss (function): Custom loss function for the provided model if needed. If set to None, this defaults to categorical cross-entropy, which is the standard for most multiclass classification tasks (default: None)
Returns:
mask(numpy array) : returns the output as numpy array
"""
return None
def keras_gradcam(model, img, result_dir, out_class, loss=None):
"""
Display the integrated gradients of the image according to the model
Args:
model: neural network (model) for explainability
x(numpy array): numpy array of the image with proper shape that matchs the model
result_dir: the resulting directory where the image will be saved
out_class (int): output class
loss (function): Custom loss function for the provided model if needed. If set to None, this defaults to categorical cross-entropy, which is the standard for most multiclass classification tasks (default: None)
Returns:
mask(numpy array) : returns the output as numpy array
"""
return None
def keras_guided_gradcam(model, img, result_dir, out_class, loss=None):
"""
Display the integrated gradients of the image according to the model
Args:
model: neural network (model) for explainability
x(numpy array): numpy array of the image with proper shape that matchs the model
result_dir: the resulting directory where the image will be saved
out_class (int): output class
loss (function): Custom loss function for the provided model if needed. If set to None, this defaults to categorical cross-entropy, which is the standard for most multiclass classification tasks (default: None)
Returns:
mask(numpy array) : returns the output as numpy array
"""
return None
def keras_occlusion_sensitivity(model, img, result_dir, out_class, patch_size=16, postprocess_fn=None):
"""
Display the integrated gradients of the image according to the model
Args:
model: neural network (model) for explainability
x(numpy array): numpy array of the image with proper shape that matchs the model
result_dir: the resulting directory where the image will be saved
out_class (int): output class
patch_size (int): size of the square occlusion patches
postprocess_fn (function): Custom postprocessing function to extract class probabilities from model outputs if needed. If set to None, this defaluts to indexing into the 1D outputs array, assuming softmaxed outputs (default: None)
Returns:
mask(numpy array) : returns the output as numpy array
"""
return None
def keras_visual_back_prop(model, x, result_dir, smoothing=False):
"""
Display the visual back propagation of the image according to the model
Args:
model: neural network (model) for explainability
x(numpy array): numpy array of the image with proper shape that matchs the model
result_dir: the resulting directory where the image will be saved
smoothing: whether to apply smoothing
Returns:
mask(numpy array) : returns the output as numpy array
"""
return None
def keras_gradcam_gb(model, img_path, layer_name, result_dir, cls=-1, save=True):
"""
Display the smooth visual back propagation of the image according to the model
Args:
model: neural network (model) for explainability
img_path: path of the image to calculate gradcam, guided back prop and guided gradcam for a given model
layer_name: name of the layer for which gradcam, guided back prop and guided gradcam is calculated
result_dir: the resulting directory where the image will be saved
cls:class number to localize (-1 for most probable class)
save: saving the image in the result directory folder
Returns:
gradcam(numpy array) : returns the gradcam output as numpy array
gb(numpy array) : returns the guided backprop output as numpy array
guided_gradcam(numpy array) : returns the guided_gradcam output as numpy array
"""
return None
def keras_lime(model, img_path, result_dir, visualize=False):
"""
Display the smooth visual back propagation of the image according to the model
Args:
model: neural network (model) for explainability
img_path: path of the image to calculate gradcam, guided back prop and guided gradcam for a given model
result_dir: the resulting directory where the image will be saved
visualize: to visualize the results using matplotlib
Returns:
temp(numpy array): numpy array of the image
mask(numpy array) : returns the output as numpy array
"""
return None | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/explain/keras/explainability_methods.py | explainability_methods.py |
class Keras_gradcam:
"""keras gradcam provides the gradcam prediction of the image.
Grad-CAM uses the gradients of any target concept (say logits for 'dog' or even a caption),
flowing into the final convolutional layer to produce a coarse localization map
highlighting the important regions in the image for predicting the concept.
"""
def __init__(self, model=None):
pass
def load_image(self, path, preprocess=True):
"""Load and preprocess image.
Args:
path (String): Provides the path of the image
preprocess (Boolean): Check whether the image is preprocessed or Not.
Returns:
x(numpy array): numpy array of the image
"""
return self
def deprocess_image(self, x):
"""Deprocess the image.
Args:
x(numpy array): x is the numpy array
Returns:
x(uint8): deprocessed uint array of the image array
"""
return self
def normalize(self, x):
"""Utility function to normalize a tensor by its L2 norm
Args:
x(numpy array): x is the normalized numpy array
"""
return self
def build_guided_model(self):
"""Function returning modified model.
Changes gradient function for all ReLu activations
according to Guided Backpropagation.
"""
return self
def guided_backprop(self, input_model, images, layer_name):
"""Guided Backpropagation method for visualizing input saliency.
Args:
input_model: Provides the input_model for calculating guided_backprop
images(list): list of images.
layer_name: Name of the layer for which guided_backprop needs to be calculated.
Returns:
grads_val(numpy array): returns the gradient value.
"""
return self
def grad_cam(self, input_model, image, layer_name, cls, H, W):
"""GradCAM method for visualizing input saliency.
Args:
input_model: Provides the input_model for calculating grad cam
image(string): Takes input image.
layer_name: Name of the layer for which grad_cam needs to be calculated.
H(Height): Height of the image
W(Width): Width of the image
cls:class number to localize (-1 for most probable class)
Returns:
cam(numpy array): returns the grad_cam value.
"""
return self
def grad_cam_batch(self, input_model, images, classes, layer_name, H = 224, W = 224):
"""GradCAM method for visualizing input saliency.
Same as grad_cam but processes multiple images in one run.
Args:
input_model: Provides the input_model for calculating grad cam
images(list): list of images.
layer_name: Name of the layer for which grad_cam needs to be calculated.
H(Height): Height of the image
W(Width): Width of the image
classes: classes for which grad_cam is calculated
Returns:
new_cams(numpy array): returns the grad_cam value.
"""
return self
def compute_saliency(self, model, guided_model, img_path, result_dir, layer_name, cls=-1, save=False):
"""Compute saliency using all three approaches.
Args:
model: Provides the input model for calculating grad cam
img_path(list): list of image paths.
layer_name: Name of the layer for which grad_cam needs to be calculated.
cls: class number to localize (-1 for most probable class).
result_dir:Provides the resulting directory to save the Gradcam image
save(Boolean): To save the results
Returns:
gradcam(numpy array): returns the grad_cam value.
gb(numpy array): returns the guided_backprop value.
guided_gradcam(numpy array): returns the guided_gradcam value.
"""
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/explain/keras/algorithms/keras_gradcam.py | keras_gradcam.py |
class GradCAM:
"""
Perform Grad CAM algorithm for a given input
Paper: [Grad-CAM: Visual Explanations from Deep Networks
via Gradient-based Localization](https://arxiv.org/abs/1610.02391)
"""
def explain(self, validation_data, model, class_index, layer_name=None, use_guided_grads=True, loss=None, colormap=cv2.COLORMAP_VIRIDIS, image_weight=0.7, ):
"""
Compute GradCAM for a specific class index.
Args:
validation_data (Tuple[np.ndarray, Optional[np.ndarray]]): Validation data
to perform the method on. Tuple containing (x, y).
model (tf.keras.Model): tf.keras model to inspect
class_index (int): Index of targeted class
layer_name (str): Targeted layer for GradCAM. If no layer is provided, it is
automatically infered from the model architecture.
loss (function): Custom loss function for the provided model if needed. If set to None, this defaults to categorical cross-entropy, which is the standard for most multiclass classification tasks (default: None)
colormap (int): OpenCV Colormap to use for heatmap visualization
image_weight (float): An optional `float` value in range [0,1] indicating the weight of
the input image to be overlaying the calculated attribution maps. Defaults to `0.7`.
use_guided_grads (boolean): Whether to use guided grads or raw gradients
Returns:
numpy.ndarray: Grid of all the GradCAM
"""
return self
def infer_grad_cam_target_layer(model):
"""
Search for the last convolutional layer to perform Grad CAM, as stated
in the original paper.
Args:
model (tf.keras.Model): tf.keras model to inspect
Returns:
str: Name of the target layer
"""
return None
def get_gradients_and_filters(model, images, layer_name, class_index, use_guided_grads, loss_fn=None):
"""
Generate guided gradients and convolutional outputs with an inference.
Args:
model (tf.keras.Model): tf.keras model to inspect
images (numpy.ndarray): 4D-Tensor with shape (batch_size, H, W, 3)
layer_name (str): Targeted layer for GradCAM
class_index (int): Index of targeted class
use_guided_grads (boolean): Whether to use guided grads or raw gradients
loss_fn (function): Custom loss function for the provided model if needed. If set to None, this defaults to categorical cross-entropy, which is the standard for most multiclass classification tasks (default: None)
Returns:
Tuple[tf.Tensor, tf.Tensor]: (Target layer outputs, Guided gradients)
"""
return None
def generate_ponderated_output(outputs, grads):
"""
Apply Grad CAM algorithm scheme.
Inputs are the convolutional outputs (shape WxHxN) and gradients (shape WxHxN).
From there:
- we compute the spatial average of the gradients
- we build a ponderated sum of the convolutional outputs based on those averaged weights
Args:
output (tf.Tensor): Target layer outputs, with shape (batch_size, Hl, Wl, Nf),
where Hl and Wl are the target layer output height and width, and Nf the
number of filters.
grads (tf.Tensor): Guided gradients with shape (batch_size, Hl, Wl, Nf)
Returns:
List[tf.Tensor]: List of ponderated output of shape (batch_size, Hl, Wl, 1)
"""
return None
def ponderate_output(output, grad):
"""
Perform the ponderation of filters output with respect to average of gradients values.
Args:
output (tf.Tensor): Target layer outputs, with shape (Hl, Wl, Nf),
where Hl and Wl are the target layer output height and width, and Nf the
number of filters.
grads (tf.Tensor): Guided gradients with shape (Hl, Wl, Nf)
Returns:
tf.Tensor: Ponderated output of shape (Hl, Wl, 1)
"""
return None
def save(self, grid, output_dir, output_name):
"""
Save the output to a specific dir.
Args:
grid (numpy.ndarray): Grid of all the heatmaps
output_dir (str): Output directory path
output_name (str): Output name
"""
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/explain/keras/algorithms/tf_explain/grad_cam.py | grad_cam.py |
class SmoothGrad:
"""
Perform SmoothGrad algorithm for a given input
Paper: [SmoothGrad: removing noise by adding noise](https://arxiv.org/abs/1706.03825)
"""
def explain(self, validation_data, model, class_index, num_samples=5, noise=1.0, loss=None):
"""
Compute SmoothGrad for a specific class index
Args:
validation_data (Tuple[np.ndarray, Optional[np.ndarray]]): Validation data
to perform the method on. Tuple containing (x, y).
model (tf.keras.Model): tf.keras model to inspect
class_index (int): Index of targeted class
num_samples (int): Number of noisy samples to generate for each input image
noise (float): Standard deviation for noise normal distribution
loss (function): Custom loss function for the provided model if needed. If set to None, this defaults to categorical cross-entropy, which is the standard for most multiclass classification tasks (default: None)
Returns:
np.ndarray: Grid of all the smoothed gradients
"""
return self
def generate_noisy_images(images, num_samples, noise):
"""
Generate num_samples noisy images with std noise for each image.
Args:
images (numpy.ndarray): 4D-Tensor with shape (batch_size, H, W, 3)
num_samples (int): Number of noisy samples to generate for each input image
noise (float): Standard deviation for noise normal distribution
Returns:
np.ndarray: 4D-Tensor of noisy images with shape (batch_size*num_samples, H, W, 3)
"""
return None
def get_averaged_gradients(noisy_images, model, class_index, num_samples, loss_fn):
"""
Compute average of gradients for target class.
Args:
noisy_images (tf.Tensor): 4D-Tensor of noisy images with shape
(batch_size*num_samples, H, W, 3)
model (tf.keras.Model): tf.keras model to inspect
class_index (int): Index of targeted class
num_samples (int): Number of noisy samples to generate for each input image
Returns:
tf.Tensor: 4D-Tensor with smoothed gradients, with shape (batch_size, H, W, 1)
"""
return None
def save(self, grid, output_dir, output_name):
"""
Save the output to a specific dir.
Args:
grid (numpy.ndarray): Gtid of all the smoothed gradients
output_dir (str): Output directory path
output_name (str): Output name
"""
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/explain/keras/algorithms/tf_explain/smoothgrad.py | smoothgrad.py |
class IntegratedGradients:
"""
Perform Integrated Gradients algorithm for a given input
Paper: [Axiomatic Attribution for Deep Networks](https://arxiv.org/pdf/1703.01365.pdf)
"""
def explain(self, validation_data, model, class_index, n_steps=10, loss=None):
"""
Compute Integrated Gradients for a specific class index
Args:
validation_data (Tuple[np.ndarray, Optional[np.ndarray]]): Validation data
to perform the method on. Tuple containing (x, y).
model (tf.keras.Model): tf.keras model to inspect
class_index (int): Index of targeted class
n_steps (int): Number of steps in the path
loss (function): Custom loss function for the provided model if needed. If set to None, this defaults to categorical cross-entropy, which is the standard for most multiclass classification tasks (default: None)
Returns:
np.ndarray: Grid of all the integrated gradients
"""
return self
def get_integrated_gradients(interpolated_images, model, class_index, n_steps, loss_fn):
"""
Perform backpropagation to compute integrated gradients.
Args:
interpolated_images (numpy.ndarray): 4D-Tensor of shape (N * n_steps, H, W, 3)
model (tf.keras.Model): tf.keras model to inspect
class_index (int): Index of targeted class
n_steps (int): Number of steps in the path
Returns:
tf.Tensor: 4D-Tensor of shape (N, H, W, 3) with integrated gradients
"""
return None
def generate_interpolations(images, n_steps):
"""
Generate interpolation paths for batch of images.
Args:
images (numpy.ndarray): 4D-Tensor of images with shape (N, H, W, 3)
n_steps (int): Number of steps in the path
Returns:
numpy.ndarray: Interpolation paths for each image with shape (N * n_steps, H, W, 3)
"""
return None
def generate_linear_path(baseline, target, n_steps):
"""
Generate the interpolation path between the baseline image and the target image.
Args:
baseline (numpy.ndarray): Reference image
target (numpy.ndarray): Target image
n_steps (int): Number of steps in the path
Returns:
List(np.ndarray): List of images for each step
"""
return None
def save(self, grid, output_dir, output_name):
"""
Save the output to a specific dir.
Args:
grid (numpy.ndarray): Grid of all the heatmaps
output_dir (str): Output directory path
output_name (str): Output name
"""
return self | zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane/explain/keras/algorithms/tf_explain/integrated_gradients.py | integrated_gradients.py |
"End User License Agreement
These terms govern your access and use of our 3D modelling and simulation platform (the "Platform") and its associated services (together, the "Services") and related services (together, the "Services"). These terms apply to all users of our Services, and are effective as soon as you agree to these terms by clicking "I agree", or as soon as you use our Services. Please read them carefully. If you do not agree with these terms, please refrain from using our Services.
This End User License Agreement ("EULA") is between you and Zetane Systems Inc, with a registered address at 1732 Blueberry Forest, Saint-Lazare, QC J7T 2K1β, Canada ("Zetane"; "we"; "us").
If you have any questions on this EULA, you can reach us at [email protected].
In this EULA, the verb "use" implies accessing, installing, downloading, copying or otherwise benefiting from using our Services or from the object of the licences set forth in this EULA.
1. DEFINITIONS
* "Confidential Information" means any and all information of Zetane which has or will come into your possession concerning our business, properties, affairs or finances, including proprietary information and trade secrets. Confidential Information is indicated as confidential, or it is clear at the time of disclosure that the information ought to be handled as confidential information.
* "Commercial Licence" means a licence for a group of End Users acquired by Customer, who is responsible for allocating and managing the licences to End User. Commercial Licence may be used for commercial purposes, or for other purposes such as educational purposes.
* "Individual Licence" means a licence to use the Services for personal and non-commercial purposes.
* "Intellectual Property" means (a) any and all proprietary rights provided under patent law, copyright law (registered and unregistered copyrights and unpublished work of authorship), trademark law, design patents or industrial design law, semiconductor chip law, or any other statutory provision or common law principle applicable to the protection of intangible proprietary information or rights, including trade secret law, which may provide a right in either idea, formula, algorithm, concept, invention, or know-how generally, or the expression or use of such ideas, formulae, algorithms, concepts, inventors or know-how, and any and all applications, registrations, licences, sub-licences, continuation, reissues, extensions, franchises, agreements or any other evidence of a right in any of the foregoing.
* "Open Source Software" means any software licensed under Open Source Licence terms.
* "Open Source Licence Terms" means the licensing and/or distribution models commonly known as "open source software" or "free software" or any other licensing and/or distribution models pursuant to which software is made generally available to the public in source code form under terms that permit modification and redistribution of such software. By way of example only and without limitation, Open Source Licence Terms include any versions of the following agreements, licences or distribution models: (1) the GNU General Public Licence (GPL); (2) Lesser/Library GPL (LGPL); (3) the Common Development and Distribution Licence (CDDL); (4) the Artistic Licence (including without limitation PERL); (5) the Netscape Public Licence; (6) the Sun Community Source Licence (SCSL) or the Sun Industry Standards License (SISL); (7) the Apache Licence; (8) the Common Public Licence; (9) the Affero GPL (AGPL), (10) the Berkeley Software Distribution (BSD), and (11) the MIT Licence (MIT).
* "Professional Licence" means a licence to use the Service for a sole professional and may be used commercial purposes, or other purposes such as educative.
2. RELATIONSHIP BETWEEN YOU, CUSTOMER AND US
We provide services to customer who have entered into an agreement with us, each a "Customer". We make the following licences available to Customers:
* Individual Licenses
* Professional Licenses
* Commercial Licenses
Customer with Commercial Licenses ("Commercial Customer") can assign licenses to third parties, each a "End User". You acknowledge and agree that Commercial Customers may (1) terminate, suspend or allocate access to End Users without our intervention and (2) control some of the functionalities of the Services for End Users. You agree that it is solely Commercial Customerβs responsibility to (a) inform you of its policies and practices; (b) obtain any rights, permissions or consents required for us to perform the Services and (c) resolve any dispute with you regarding your use and access of the Services.
Each Customer is a third-party beneficiary to this EULA and can enforce this EULA as required to protect their liability against us.
Customer that have for End Users may terminate, suspend or allocate such licenses without consulting with us. Such Customer may also control some functionalities associated with your access through administrative accounts.
3. REGISTRATION
Our Services are not intended for anyone under the age of 16 years old. If you are under the age of 16 years old, you may use our Services by providing us with a written notice of parental consent.
When using our Services, you will be required to create an online identifier and a password to access your account with us (the "Registration Data"). You agree that you are responsible for maintaining the confidentiality of your Registration Data, and in particular, of your password. If you become aware of any unauthorized access or use of your Registration Data, you agree to notify us without undue delay at [email protected].
4. ACCEPTABLE USE
You agree to use the Services only for lawful purposes and follow the following rules when using the Services. You represent and warrant that you will not use of the Services in any manner:
a) that is prohibited by law or regulation or our policies made available to you;
b) that will disrupt third partiesβ use or enjoyment of the Services, including if this use results in automated, constant and repeated requests for data other than as permitted and has a negative effect on our systems or network, including abnormal usage that overloads servers or causes portions of our network to be blocked (e.g. denial-of-services and distributed-denial-of-services attacks);
c) to create, transmit, distribute or store material that violates Intellectual Property, privacy, publicity or other personal rights of individuals, export control, or that can otherwise be threatening, abusive, hateful or constitutes or encourages conduct that would be considered a fraud, a criminal offence or likely to give rise to civil liability;
d) that results in (i) the sharing of identifiers and passwords with other individuals or entities, including through any time-sharing service, service bureau, network or by any other means;
e) that involves using any robot, spider, scraper, deep link or other similar automated data gathering or extraction tools, programs, algorithms, or methodology to access, acquire, copy or monitor the Services or any portion of the Services;
f) that involves decompiling, disassembling, or otherwise reverse engineering or attempting to reconstruct or discover any source code or ideas or algorithms of any of the Services underlying technology by any means whatsoever;
g) that involves penetrating our security, including, without limitation:
a. by posting or transmitting any file which contains viruses, worms, Trojan horses or any other contaminating or destructive features;
b. by interfering with the proper working of the Services;
c. by attempting to hack any security requirements or processes in the use of the Services;
d. by attempting to access any part of the Services (or any of their related systems, networks, servers or other equipment) which Customer is not authorized to access;
e. by attempting to disrupt in any manner the operation of the Services, its servers or network;
f. by disobeying any requirements, procedures, policies or regulations of your network connected to the Services;
g. by manipulating identifiers to disguise the origin of any content transmitted or uploaded on to the Services, or the source of any content;
h. by modifying or altering the Services in any unauthorized manner.
(Collectively, the "Abuses").
You agree and acknowledge that an indirect or attempted violation of this Section shall be considered an Abuse. If we discover that you have committed an Abuse or is planning to commit an Abuse or otherwise encourage or facilitate the commission of an Abuse, we may suspend part your access to the Services without any prior notice.
5. GRANT OF LICENSES
Subject to your respect of this EULA, we grant you, for the Term, a non-exclusive, non-sublicensable, non-transferable, limited and revocable right and license to access and use our Services (for greater precision, including to install and download the Platform on your device). All rights not expressly granted hereunder are reserved by us. You may not sell, transfer, sublicense, public, disclose or otherwise make available the Services to any third party without our prior written consent.
6. INTELLECTUAL PROPERTY
All rights not granted are reserved by us. We are the sole and exclusive owner of our pre-existing Intellectual Property. We do not grant you any rights, implied or otherwise, to our Intellectual Property. You may make suggestions, enhancement requests, recommendations or other feedbacks ("Feedbacks"). We will be the sole owner of such Feedback as long as they relate to the Services and you hereby assign to us, without limitation of any kind, all rights, titles and interests therein.
7. OPEN SOURCE SOFTWARE
The Services may contain Open Source Software. The terms and conditions governing your use of such Open Source Software shall be in accordance with the Open Source Licence Terms associated with such Open Source Software.
8. THIRD PARTY BENEFICIARY
The Software may contain licensed software, data and material from third party ("Third Party Material"). Licensors of Third-Party Material included in the Software are third party beneficiary of these Terms of Service and the EULA.
Each Customer is also a third-party beneficiary of this EULA.
9. DISCLAIMERS
You are solely responsible for the content, accuracy, completeness and lawfulness of the data that you upload, download, install, or otherwise process through our Services. We take no responsibility for the content, accuracy, completeness and lawfulness of such data. The Services are provided on an "as is" basis. To the maximum extent permitted by law, we make no other representations, conditions, warranties or guarantees, express or implied, regarding the accuracy, reliability or completeness of the Services. We expressly disclaim any and all implied warranties of merchantability, fitness for a particular purpose, title and non-infringement. We do not warrant that the Services will be uninterrupted or error free. We do not make any warranty as to the results that may be obtained from the use of the Services. You hereby waive your rights in any of the foregoing warranties, representations or conditions, whether express or implied.
We do not operate the networks of, or have any control over the operations of, the wireless or other communications service providers through which you may access the Services. Accordingly (i) we disclaim all responsibility and liability for or relating to your use of any such providers to access the Services and (ii) we cannot guarantee the privacy or security of wireless data transmissions. You should consult your Internet service provider about their privacy and security practices.
No oral advices or written or electronically delivered information given by us, our affiliates, our officers, directors, employees, agents, providers, merchants, sponsors, licensors or the like shall create any warranty.
10. CONFIDENTIAL INFORMATION
You acknowledge that you may be exposed to or acquire information in connection with the Services that classifies as Confidential Information. For the purpose of these Terms of Services, Confidential Information shall not include:
* Information which is generally known or available publicly, including information which becomes generally known through no fault or breach of the Receiving Party;
* Information which was known by the Receiving Party prior to receive the information from the Disclosing Party;
* Information which is independently developed by the Receiving Party without the direct or indirect use of Confidential Information;
* Information which is lawfully obtained from a third party without violation of a confidentiality obligation of the Disclosing Party.
You agree to keep our Confidential Information as confidential, subject to applicable laws, and apply no less than reasonable care to prevent unauthorized access, use or disclose of our Confidential Information. You shall not use our Confidential Information to develop competitive services or allow, directly or indirectly, third parties to develop such competitive services.
11. INDEMNITY
You agree to defend us and hold us harmless (including our affiliates, subsidiaries, licensors, suppliers, service providers, contractors, and agents, as well as their respective employees, directors, officers, contractors and agents) against any and all claims, penalties, fine, cost, expenses (including reasonable attorneyβs fees), actions, damages, losses or liabilities directly or indirectly arising out of, related to, in connection with or resulting from:
* A breach of this EULA;
* A breach of applicable laws;
* Your gross negligence, fraudulent behavior and willful acts.
12. LIMITATION OF LIABILITY
To the maximum extent permitted by law, neither party will be liable to the other party for any incidental, consequential, or exemplary damages, in connection with the performance of the Services, even if the party is aware of the possibility of occurrence of such damages.
To the maximum extent permitted by law, our total liability to you for any damages arising out or in connection with this EULA, whether arising by statute, contract tort or otherwise, will not exceed one hundred Canadian dollars (100$).
13. EXPORT CONTROL
You may not export, re-export, sell, transfer, divert or otherwise dispose of our Services in any destination which is restricted or prohibited by Canada, the country in which you are located or other applicable jurisdictions. You agree not to use the Services to disturb international peace or for other purposes which are restricted by international treaties and applicable export control laws.
14. GOVERNING LAWS AND JURISDICTIONS
These Terms of Service and all transactions hereunder shall be governed by and construed in accordance with the laws of Ontario, Canada, without giving effect to any choice or conflict of law provision or rule (whether in Ontario or any other jurisdiction) that would cause the application of laws of any jurisdiction other than those of Ontario, Canada.
Any legal suit, action or proceeding, arising out of these Terms of Service or any transactions hereunder shall be instituted exclusively in Toronto, Ontario, Canada, and each party irrevocably submits to the exclusive jurisdiction of such courts in any such suit, action or proceeding.
15. MISCELLEANOUS
If any provision of this EULA is in violation of any governmental statute or regulation, or is illegal for any reason, said provision shall be self-deleting without affecting the validity of the remaining provisions.
The waiver of a breach of any provision of this EULA will not operate or be interpreted as a waiver for any other or subsequent breach.
You may not use our name or any logo, trademark, service mark, business name, trade name, domain name or social media account name or handle owned or licensed by us or generally speaking, our brand features, whether registered or not, and including any good will associated therewith, in any manner without our prior written consent.
We shall not be liable for delays in or failure of performance hereunder due to causes beyond its reasonable control, including, but not limited to, acts of God or public enemy, acts of government in either its sovereign or contractual capacity, flood, earthquake or other natural disaster, strike or other labor disputes, acts of war, acts of civil disobedience, denial-of-services and distributed-denial-of-services, ransomware and other cyber-attacks that are not caused or facilitated by our gross negligence.
We may assign this EULA, in whole or in part, at any time with or without notice to you. You may not assign this EULA, or part of it, to any other person without our prior written approval. Any attempt by you to do so is void. You may not transfer to anyone else, either temporarily or permanently, any rights to use the Services or any part of the Services.
| zetane-engine | /zetane_engine-1.7.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl/zetane_engine-1.7.4.dist-info/LICENSE.md | LICENSE.md |
"End User License Agreement
These terms govern your access and use of our 3D modelling and simulation platform (the "Platform") and its associated services (together, the "Services") and related services (together, the "Services"). These terms apply to all users of our Services, and are effective as soon as you agree to these terms by clicking "I agree", or as soon as you use our Services. Please read them carefully. If you do not agree with these terms, please refrain from using our Services.
This End User License Agreement ("EULA") is between you and Zetane Systems Inc, with a registered address at 1732 Blueberry Forest, Saint-Lazare, QC J7T 2K1β, Canada ("Zetane"; "we"; "us").
If you have any questions on this EULA, you can reach us at [email protected].
In this EULA, the verb "use" implies accessing, installing, downloading, copying or otherwise benefiting from using our Services or from the object of the licences set forth in this EULA.
1. DEFINITIONS
* "Confidential Information" means any and all information of Zetane which has or will come into your possession concerning our business, properties, affairs or finances, including proprietary information and trade secrets. Confidential Information is indicated as confidential, or it is clear at the time of disclosure that the information ought to be handled as confidential information.
* "Commercial Licence" means a licence for a group of End Users acquired by Customer, who is responsible for allocating and managing the licences to End User. Commercial Licence may be used for commercial purposes, or for other purposes such as educational purposes.
* "Individual Licence" means a licence to use the Services for personal and non-commercial purposes.
* "Intellectual Property" means (a) any and all proprietary rights provided under patent law, copyright law (registered and unregistered copyrights and unpublished work of authorship), trademark law, design patents or industrial design law, semiconductor chip law, or any other statutory provision or common law principle applicable to the protection of intangible proprietary information or rights, including trade secret law, which may provide a right in either idea, formula, algorithm, concept, invention, or know-how generally, or the expression or use of such ideas, formulae, algorithms, concepts, inventors or know-how, and any and all applications, registrations, licences, sub-licences, continuation, reissues, extensions, franchises, agreements or any other evidence of a right in any of the foregoing.
* "Open Source Software" means any software licensed under Open Source Licence terms.
* "Open Source Licence Terms" means the licensing and/or distribution models commonly known as "open source software" or "free software" or any other licensing and/or distribution models pursuant to which software is made generally available to the public in source code form under terms that permit modification and redistribution of such software. By way of example only and without limitation, Open Source Licence Terms include any versions of the following agreements, licences or distribution models: (1) the GNU General Public Licence (GPL); (2) Lesser/Library GPL (LGPL); (3) the Common Development and Distribution Licence (CDDL); (4) the Artistic Licence (including without limitation PERL); (5) the Netscape Public Licence; (6) the Sun Community Source Licence (SCSL) or the Sun Industry Standards License (SISL); (7) the Apache Licence; (8) the Common Public Licence; (9) the Affero GPL (AGPL), (10) the Berkeley Software Distribution (BSD), and (11) the MIT Licence (MIT).
* "Professional Licence" means a licence to use the Service for a sole professional and may be used commercial purposes, or other purposes such as educative.
2. RELATIONSHIP BETWEEN YOU, CUSTOMER AND US
We provide services to customer who have entered into an agreement with us, each a "Customer". We make the following licences available to Customers:
* Individual Licenses
* Professional Licenses
* Commercial Licenses
Customer with Commercial Licenses ("Commercial Customer") can assign licenses to third parties, each a "End User". You acknowledge and agree that Commercial Customers may (1) terminate, suspend or allocate access to End Users without our intervention and (2) control some of the functionalities of the Services for End Users. You agree that it is solely Commercial Customerβs responsibility to (a) inform you of its policies and practices; (b) obtain any rights, permissions or consents required for us to perform the Services and (c) resolve any dispute with you regarding your use and access of the Services.
Each Customer is a third-party beneficiary to this EULA and can enforce this EULA as required to protect their liability against us.
Customer that have for End Users may terminate, suspend or allocate such licenses without consulting with us. Such Customer may also control some functionalities associated with your access through administrative accounts.
3. REGISTRATION
Our Services are not intended for anyone under the age of 16 years old. If you are under the age of 16 years old, you may use our Services by providing us with a written notice of parental consent.
When using our Services, you will be required to create an online identifier and a password to access your account with us (the "Registration Data"). You agree that you are responsible for maintaining the confidentiality of your Registration Data, and in particular, of your password. If you become aware of any unauthorized access or use of your Registration Data, you agree to notify us without undue delay at [email protected].
4. ACCEPTABLE USE
You agree to use the Services only for lawful purposes and follow the following rules when using the Services. You represent and warrant that you will not use of the Services in any manner:
a) that is prohibited by law or regulation or our policies made available to you;
b) that will disrupt third partiesβ use or enjoyment of the Services, including if this use results in automated, constant and repeated requests for data other than as permitted and has a negative effect on our systems or network, including abnormal usage that overloads servers or causes portions of our network to be blocked (e.g. denial-of-services and distributed-denial-of-services attacks);
c) to create, transmit, distribute or store material that violates Intellectual Property, privacy, publicity or other personal rights of individuals, export control, or that can otherwise be threatening, abusive, hateful or constitutes or encourages conduct that would be considered a fraud, a criminal offence or likely to give rise to civil liability;
d) that results in (i) the sharing of identifiers and passwords with other individuals or entities, including through any time-sharing service, service bureau, network or by any other means;
e) that involves using any robot, spider, scraper, deep link or other similar automated data gathering or extraction tools, programs, algorithms, or methodology to access, acquire, copy or monitor the Services or any portion of the Services;
f) that involves decompiling, disassembling, or otherwise reverse engineering or attempting to reconstruct or discover any source code or ideas or algorithms of any of the Services underlying technology by any means whatsoever;
g) that involves penetrating our security, including, without limitation:
a. by posting or transmitting any file which contains viruses, worms, Trojan horses or any other contaminating or destructive features;
b. by interfering with the proper working of the Services;
c. by attempting to hack any security requirements or processes in the use of the Services;
d. by attempting to access any part of the Services (or any of their related systems, networks, servers or other equipment) which Customer is not authorized to access;
e. by attempting to disrupt in any manner the operation of the Services, its servers or network;
f. by disobeying any requirements, procedures, policies or regulations of your network connected to the Services;
g. by manipulating identifiers to disguise the origin of any content transmitted or uploaded on to the Services, or the source of any content;
h. by modifying or altering the Services in any unauthorized manner.
(Collectively, the "Abuses").
You agree and acknowledge that an indirect or attempted violation of this Section shall be considered an Abuse. If we discover that you have committed an Abuse or is planning to commit an Abuse or otherwise encourage or facilitate the commission of an Abuse, we may suspend part your access to the Services without any prior notice.
5. GRANT OF LICENSES
Subject to your respect of this EULA, we grant you, for the Term, a non-exclusive, non-sublicensable, non-transferable, limited and revocable right and license to access and use our Services (for greater precision, including to install and download the Platform on your device). All rights not expressly granted hereunder are reserved by us. You may not sell, transfer, sublicense, public, disclose or otherwise make available the Services to any third party without our prior written consent.
6. INTELLECTUAL PROPERTY
All rights not granted are reserved by us. We are the sole and exclusive owner of our pre-existing Intellectual Property. We do not grant you any rights, implied or otherwise, to our Intellectual Property. You may make suggestions, enhancement requests, recommendations or other feedbacks ("Feedbacks"). We will be the sole owner of such Feedback as long as they relate to the Services and you hereby assign to us, without limitation of any kind, all rights, titles and interests therein.
7. OPEN SOURCE SOFTWARE
The Services may contain Open Source Software. The terms and conditions governing your use of such Open Source Software shall be in accordance with the Open Source Licence Terms associated with such Open Source Software.
8. THIRD PARTY BENEFICIARY
The Software may contain licensed software, data and material from third party ("Third Party Material"). Licensors of Third-Party Material included in the Software are third party beneficiary of these Terms of Service and the EULA.
Each Customer is also a third-party beneficiary of this EULA.
9. DISCLAIMERS
You are solely responsible for the content, accuracy, completeness and lawfulness of the data that you upload, download, install, or otherwise process through our Services. We take no responsibility for the content, accuracy, completeness and lawfulness of such data. The Services are provided on an "as is" basis. To the maximum extent permitted by law, we make no other representations, conditions, warranties or guarantees, express or implied, regarding the accuracy, reliability or completeness of the Services. We expressly disclaim any and all implied warranties of merchantability, fitness for a particular purpose, title and non-infringement. We do not warrant that the Services will be uninterrupted or error free. We do not make any warranty as to the results that may be obtained from the use of the Services. You hereby waive your rights in any of the foregoing warranties, representations or conditions, whether express or implied.
We do not operate the networks of, or have any control over the operations of, the wireless or other communications service providers through which you may access the Services. Accordingly (i) we disclaim all responsibility and liability for or relating to your use of any such providers to access the Services and (ii) we cannot guarantee the privacy or security of wireless data transmissions. You should consult your Internet service provider about their privacy and security practices.
No oral advices or written or electronically delivered information given by us, our affiliates, our officers, directors, employees, agents, providers, merchants, sponsors, licensors or the like shall create any warranty.
10. CONFIDENTIAL INFORMATION
You acknowledge that you may be exposed to or acquire information in connection with the Services that classifies as Confidential Information. For the purpose of these Terms of Services, Confidential Information shall not include:
* Information which is generally known or available publicly, including information which becomes generally known through no fault or breach of the Receiving Party;
* Information which was known by the Receiving Party prior to receive the information from the Disclosing Party;
* Information which is independently developed by the Receiving Party without the direct or indirect use of Confidential Information;
* Information which is lawfully obtained from a third party without violation of a confidentiality obligation of the Disclosing Party.
You agree to keep our Confidential Information as confidential, subject to applicable laws, and apply no less than reasonable care to prevent unauthorized access, use or disclose of our Confidential Information. You shall not use our Confidential Information to develop competitive services or allow, directly or indirectly, third parties to develop such competitive services.
11. INDEMNITY
You agree to defend us and hold us harmless (including our affiliates, subsidiaries, licensors, suppliers, service providers, contractors, and agents, as well as their respective employees, directors, officers, contractors and agents) against any and all claims, penalties, fine, cost, expenses (including reasonable attorneyβs fees), actions, damages, losses or liabilities directly or indirectly arising out of, related to, in connection with or resulting from:
* A breach of this EULA;
* A breach of applicable laws;
* Your gross negligence, fraudulent behavior and willful acts.
12. LIMITATION OF LIABILITY
To the maximum extent permitted by law, neither party will be liable to the other party for any incidental, consequential, or exemplary damages, in connection with the performance of the Services, even if the party is aware of the possibility of occurrence of such damages.
To the maximum extent permitted by law, our total liability to you for any damages arising out or in connection with this EULA, whether arising by statute, contract tort or otherwise, will not exceed one hundred Canadian dollars (100$).
13. EXPORT CONTROL
You may not export, re-export, sell, transfer, divert or otherwise dispose of our Services in any destination which is restricted or prohibited by Canada, the country in which you are located or other applicable jurisdictions. You agree not to use the Services to disturb international peace or for other purposes which are restricted by international treaties and applicable export control laws.
14. GOVERNING LAWS AND JURISDICTIONS
These Terms of Service and all transactions hereunder shall be governed by and construed in accordance with the laws of Ontario, Canada, without giving effect to any choice or conflict of law provision or rule (whether in Ontario or any other jurisdiction) that would cause the application of laws of any jurisdiction other than those of Ontario, Canada.
Any legal suit, action or proceeding, arising out of these Terms of Service or any transactions hereunder shall be instituted exclusively in Toronto, Ontario, Canada, and each party irrevocably submits to the exclusive jurisdiction of such courts in any such suit, action or proceeding.
15. MISCELLEANOUS
If any provision of this EULA is in violation of any governmental statute or regulation, or is illegal for any reason, said provision shall be self-deleting without affecting the validity of the remaining provisions.
The waiver of a breach of any provision of this EULA will not operate or be interpreted as a waiver for any other or subsequent breach.
You may not use our name or any logo, trademark, service mark, business name, trade name, domain name or social media account name or handle owned or licensed by us or generally speaking, our brand features, whether registered or not, and including any good will associated therewith, in any manner without our prior written consent.
We shall not be liable for delays in or failure of performance hereunder due to causes beyond its reasonable control, including, but not limited to, acts of God or public enemy, acts of government in either its sovereign or contractual capacity, flood, earthquake or other natural disaster, strike or other labor disputes, acts of war, acts of civil disobedience, denial-of-services and distributed-denial-of-services, ransomware and other cyber-attacks that are not caused or facilitated by our gross negligence.
We may assign this EULA, in whole or in part, at any time with or without notice to you. You may not assign this EULA, or part of it, to any other person without our prior written approval. Any attempt by you to do so is void. You may not transfer to anyone else, either temporarily or permanently, any rights to use the Services or any part of the Services.
| zetane | /zetane-1.7.4-cp39-cp39-macosx_10_14_x86_64.whl/zetane-1.7.4.dist-info/LICENSE.md | LICENSE.md |
ο»Ώ# ZetaPush SDK #
This module is a SDK to connect to the ZetaPush platform with Python. (Only with Python3)
## Installation
pip3 install zetapush_python
## Usage
### Imports
First, we need to import 2 objets to use the ZetaPush SDK :
- Client
- Service
The `Client` is used to handle the connection with the ZetaPush backend and the `Service` is used to call services from the client. In particular, the service to call macroscripts.
To import them we write :
from zetapush_python import Client
from zetapush_python import Service
### Connection to ZetaPush backend
First, we need the create the `Client` to handle the connection :
zpClient = Client(businessId="Rj7PY_1I", apiUrl="http://demo-1.zpush.io/zbo/pub/business/")
The *businessId* is the identifier of the sandbox in the ZetaPush back-end. For ZetaPush, a sandbox includes the whole application back-end. Then, we have the *apiUrl*. It is optional in production. During the development we send you the apiUrl if necessary.
Now, we need to launch the connection with our credentials. For this example we use this credentials :
- login : "user"
- password: "password"
zpClient.connect(login="user", password="password")
If we want to connect to the ZetaPush platform as Weak connection, we need to **don't** send the *login* and *password* parameters. By default, the SDK use the authentication service named *weak_0*.
#### Set the authentication service
If we need to set the authentication service name, we can write the parameter *authenticationService*. In this case we need also to write the parameter *authenticationType* to define the type of authentication we use. ('simple' or 'weak').
For example we can write :
zpClient.connect(authenticationService="simple_1", authenticationType="simple")
#### Callback connection
It is useful to launch a function when the connection is established.
For this we implement the *onConnectionSuccess()* method :
def onConnectionSuccess():
print("OnConnectionSuccess")
zpClient.onConnectionSuccess = onConnectionSuccess
### Call a macroscript
In this step, we call a macroscript. For this, we need to configure a service that manage the macroscript calls.
serviceMacro = Service("macro_0", zpClient)
Here *macro_0* is the *deployment ID* of our macroscript service. By default we use *macro_0*.
*zpClient* is our Client that we previously create.
In our example, we want to call a macroscript named **test** that takes 2 parameters *num1* and *num2*. The macroscript return the sum of *num1* and *num2* in a object named *result*.
To call the macroscript we use :
serviceMacro.send('test', { 'num1': 3, 'num2': 5})
To listen the result we need to define a function and affect it to the result :
def handleTest(params):
print("result => ", params['result']
serviceMacro.on('test', handleTest)
### Stop the communication with ZetaPush
To disconnect an user to the ZetaPush platform we can use :
zpClient.disconnect()
Then, it is necessary to properly close the communication with the ZetaPush backend at the end of the program. For this we use :
zpClient.stopZPConnection()
### Send JSON in macro
We can also send JSON in a macroscript.
In our example we have a macroscript named *testJson(name, data)* that have 2 parameters :
- name : String
- data : Map (or JSON)
We can call this macroscript with this syntax :
serviceMacro.send('testJson', { 'name': 'sensor1', 'data': { 'value': 15, 'unit': 'ppm' }} )
## Complete example
Here we have a complete example that launch a connection to the ZetaPush platform, call a macroscript named *test*, print his result and close the communication after few seconds :
from zetapush_python import Client
from zetapush_python import Service
import time
# Create the Client to handle the connection with ZetaPush
zpClient = Client(businessId="Rj7PY_1I", apiUrl="http://demo-1.zpush.io/zbo/pub/business/")
# We create the macro service
serviceMacro = Service("macro_0", zpClient)
# Define a function that will be called when the connection is established
def onConnectionSuccessful():
print("ZetaPush::ConnectionSuccess")
# We call the macroscript 'send'
serviceMacro.send('test', { 'num1': 3, 'num2': 5})
# We define a function called when the macroscript "test" return us a result
def handleTest(params):
print("result => ", params['result'])
# Affect a function that will be called when the connection is established
zpClient.onConnectionSuccess = onConnectionSuccessful
# We affect a function to handle the result of the 'test' macroscript
serviceMacro.on('test', handleTest)
# Launch the connection with our credentials
zpClient.connect(login= "user", password= "password")
# Pause the program during 2 secondes
time.sleep(2)
# Properly close the communication with ZetaPush
zpClient.stopZPConnection()
| zetapush_python | /zetapush_python-0.1.5.tar.gz/zetapush_python-0.1.5/README.md | README.md |
# zetapy
Repository containing ZETA functions and dependencies. For an example of how to use the code, check example.py.
Note on updates and maintenance: the original code is written and maintained in MATLAB by Jorrit Montijn (https://github.com/JorritMontijn/ZETA). This Python repository is maintained by Guido Meijer (original Python port by Alexander Heimel)
The article describing ZETA has been published in eLife: https://elifesciences.org/articles/71969
This repository contains three main functions:
1) getZeta: Calculates the Zenith of Event-based Time-locked Anomalies (ZETA) for spike times of a single neuron. Outputs a p-value.
2) getMultiScaleDeriv: Calculates instantaneous firing rates for trace-based data, such as spike-time/z-score combinations that underlie ZETA.
3) getIFR: Wrapper function for getMultiScaleDeriv.m when the input data are spike times and event times. Use this as you would a PSTH function.
Rationale for ZETA
Neurophysiological studies depend on a reliable quantification of whether and when a neuron responds to stimulation, be it sensory, optogenetically or otherwise. However, current statistical analysis methods to determine a neuronβs responsiveness require arbitrary parameter choices, such as a binning size. This choice can change the results of the analysis, which invites bad statistical practice and reduces the replicability of analyses. Moreover, many methods, such as bin-wise t-tests, only detect classically mean-rate modulated cells. Especially with advent of techniques that yield increasingly large numbers of cells, such as Neuropixels recordings , it is important to use tests for cell-inclusion that require no manual curation. Here, we present the parameter-free ZETA-test, which outperforms common approaches, in the sense that it includes more cells at a similar false-positive rate.
Finally, ZETAβs timescale-, parameter- and binning-free nature allowed us to implement a ZETA-derived algorithm (using multi-scale derivatives) to calculate peak onset and offset latencies in neuronal spike trains with theoretically unlimited temporal resolution.
Please send any questions or comments to j.montijn at nin.knaw.nl.

| zetapy | /zetapy-2.7.2.tar.gz/zetapy-2.7.2/README.md | README.md |
[](https://discord.gg/qUtxnK2NMf)
# Zeta - Seamlessly Create Zetascale Transformers
[](https://zeta.readthedocs.io)
<p>
<a href="https://github.com/kyegomez/zeta/blob/main/LICENSE"><img alt="MIT License" src="https://img.shields.io/badge/license-MIT-blue.svg" /></a>
<a href="https://pypi.org/project/zetascale"><img alt="MIT License" src="https://badge.fury.io/py/zetascale.svg" /></a>
</p>
Create Ultra-Powerful Multi-Modality Models Seamlessly and Efficiently in as minimal lines of code as possible.
## Installation
To install:
```
pip install zetascale
```
To get hands-on and develop it locally:
```
git clone https://github.com/kyegomez/zeta.git
cd zeta
pip install -e .
```
## Initiating Your Journey
Creating a model empowered with the aforementioned breakthrough research features is a breeze. Here's how to quickly materialize the renowned Flash Attention
```python
import torch
from zeta import FlashAttention
q = torch.randn(2, 4, 6, 8)
k = torch.randn(2, 4, 10, 8)
v = torch.randn(2, 4, 10, 8)
attention = FlashAttention(causal=False, dropout=0.1, flash=True)
output = attention(q, k, v)
print(output.shape)
```
# Documentation
[Click here for the documentation, it's at zeta.apac.ai](https://zeta.apac.ai)
## Acknowledgments
Zeta is a masterpiece inspired by LucidRains's repositories and elements of [FairSeq](https://github.com/facebookresearch/fairseq) and [UniLM](https://github.com/kyegomez/unilm).
## Contributing
We're dependent on you for contributions, it's only Kye maintaining this repository and it's very difficult and with that said any contribution is infinitely appreciated by not just me but by Zeta's users who dependen on this repository to build the world's
best AI models
* Head over to the project board to look at open features to implement or bugs to tackle
## Todo
* Head over to the project board to look at open features to implement or bugs to tackle
| zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/README.md | README.md |
import copy
from pathlib import Path
import torch
import torch.nn.functional as F
from beartype import beatype
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from torch import nn
def log(t, eps=1e-10):
return torch.log(t.clamp(min=eps))
def exists(val):
return val is not None
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim = dim)
def masked_mean(seq, mask=None, dim=1, keepdim=False):
if not exists(mask):
return seq.mean(dim=dim)
if seq.ndim == 3:
mask = rearrange(mask, 'b n -> b n 1')
masked_seq = seq.masked_fill(~mask, 0.)
numer = masked_seq.sum(dim=dim, keepdim=keepdim)
denom = mask.sum(dim=dim, keepdim=keepdim)
masked_mean = numer / denom.clamp(min=1e-3)
masked_mean = masked_mean.masked_fill(denom == 0, 0.)
return masked_mean
@beatype
class RewardModel(nn.Module):
def __init__(
self,
model,
dropout = 0.1,
num_binned_output = 0.,
use_lora=True,
lora_r=8,
reward_lora_scope="reward",
):
super().__init__()
self.model = copy.deepcopy(model)
self.model.set_dropout(dropout)
self.reward_lora_scope = reward_lora_scope if use_lora else None
if exists(self.reward_lora_scope):
self.model.add_finetune_params(reward_lora_scope, lora_r=lora_r)
dim = model.dim
self.binned_output = num_binned_output > 1
self.prompt_embed = nn.Parameter(torch.zeros(1, 1, dim))
self.response_embed = nn.Parameter(torch.zeros(1, 1, dim))
if self.binned_output:
self.to_pred = nn.Linear(dim, num_binned_output)
else:
self.to_pred = nn.Sequential(
nn.Linear(dim, 1, bias=False),
Rearrange('... 1 -> ...')
)
def load(self, path):
path = Path(path)
assert path.exists()
self.load_state_dict(torch.load(path))
def finetune_parameters(self):
return [
*self.to_pred.parameters(),
*(self.model.finetune_parameters(self.reward_lora_scope) \
if exists(self.reward_lora_scope) else self.model.parameters())
]
def forward(
self,
x,
mask=None,
prompt_mask=None,
prompt_lengths=None,
labels=None,
sample=None,
sample_temperature=1.,
disable_lora=False
):
assert not(exists(prompt_mask) and exists(prompt_lengths))
if exists(prompt_lengths):
batch, seq_len = x.shape
arange = torch.arange(seq_len, device=x.device)
prompt_mask = repeat(arange, 'n -> b n', b=batch) < rearrange(prompt_lengths, 'b -> b 1')
#model need to know what is prompt and what is response
extra_embed=None
if exists(prompt_mask):
extra_embed = torch.where(
rearrange(prompt_mask, 'b n -> b n 1'),
self.prompt_embed,
self.response_embed
)
embeds = self.model(
x,
extra_embed=extra_embed,
return_only_embedding=True,
disable_lora=disable_lora,
finetune_scope=self.reward_lora_scope
)
pooled = masked_mean(embeds, mask, dim=1)
pred = self.to_pred(pooled)
if sample and self.binned_output:
assert not exists(labels)
pred = gumbel_sample(pred, temperature = sample_temperature, dim=-1)
if not exists(labels):
return pred
if not self.binned_output:
return F.mse_loss(pred, labels)
if not self.binned_output:
return F.mse_loss(pred, labels)
return F.cross_entropy(pred, labels) | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/modules/reward_model.py | reward_model.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
from .xmoe.global_groups import get_moe_group
class set_torch_seed(object):
def __init__(self, seed):
assert isinstance(seed, int)
self.rng_state = self.get_rng_state()
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
def get_rng_state(self):
state = {"torch_rng_state": torch.get_rng_state()}
if torch.cuda.is_available():
state["cuda_rng_state"] = torch.cuda.get_rng_state()
return state
def set_rng_state(self, state):
torch.set_rng_state(state["torch_rng_state"])
if torch.cuda.is_available():
torch.cuda.set_rng_state(state["cuda_rng_state"])
def __enter__(self):
return self
def __exit__(self, *exc):
self.set_rng_state(self.rng_state)
def make_experts(args, embed_dim, expert_ffn_dim):
world_size = (
1
if not torch.distributed.is_initialized()
else torch.distributed.get_world_size()
)
expert_list = []
ddp_rank = args.ddp_rank
start_seed = torch.randint(1000000, (1,)).item()
# at least as many experts than gpus
if args.moe_expert_count >= world_size:
assert (
args.moe_expert_count % world_size == 0
), f"{args.moe_expert_count}, {world_size}"
local_moe_expert_count = args.moe_expert_count // world_size
for i in range(local_moe_expert_count):
with set_torch_seed(start_seed + ddp_rank * local_moe_expert_count + i):
expert_list.append(
FeedForwardNetwork(
embed_dim,
expert_ffn_dim,
args.activation_fn,
args.dropout,
args.activation_dropout,
args.layernorm_eps,
args.subln,
)
)
else:
assert (
world_size % args.moe_expert_count == 0
), f"{world_size}, {args.moe_expert_count}"
moe_idx, _ = get_moe_group(args.moe_expert_count)
with set_torch_seed(start_seed + moe_idx):
expert_list.append(
FeedForwardNetwork(
embed_dim,
expert_ffn_dim,
args.activation_fn,
args.dropout,
args.activation_dropout,
args.layernorm_eps,
args.subln,
)
)
experts = nn.ModuleList(expert_list)
return experts
def get_activation_fn(activation):
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
else:
raise NotImplementedError
class FeedForwardNetwork(nn.Module):
def __init__(
self,
embed_dim,
ffn_dim,
activation_fn,
dropout,
activation_dropout,
layernorm_eps,
subln=False,
):
super().__init__()
self.embed_dim = embed_dim
self.activation_fn = get_activation_fn(activation=str(activation_fn))
self.activation_dropout_module = torch.nn.Dropout(activation_dropout)
self.dropout_module = torch.nn.Dropout(dropout)
self.fc1 = nn.Linear(self.embed_dim, ffn_dim)
self.fc2 = nn.Linear(ffn_dim, self.embed_dim)
self.ffn_layernorm = LayerNorm(ffn_dim, eps=layernorm_eps) if subln else None
def reset_parameters(self):
self.fc1.reset_parameters()
self.fc2.reset_parameters()
if self.ffn_layernorm is not None:
self.ffn_layernorm.reset_parameters()
def forward(self, x):
x_shape = x.shape
x = x.reshape(-1, x.size(-1))
x = self.fc1(x)
x = self.activation_fn(x.float()).type_as(x)
x = self.activation_dropout_module(x)
if self.ffn_layernorm is not None:
x = self.ffn_layernorm(x)
x = self.fc2(x)
x = x.view(x_shape)
x = self.dropout_module(x)
return x | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/modules/feedforward_network.py | feedforward_network.py |
import torch.distributed as dist
def _find_my_group_index(grouped_ranks):
my_rank = dist.get_rank()
for i, group in enumerate(grouped_ranks):
if my_rank in group:
return i
raise RuntimeError
def get_moe_group(moe_expert_count=None):
if dist.is_initialized():
if not hasattr(get_moe_group, "_moe_groups"):
world_size = dist.get_world_size()
if world_size <= moe_expert_count:
assert moe_expert_count % world_size == 0
moe_groups = [[i] for i in range(world_size)]
else:
assert world_size % moe_expert_count == 0
ranks_per_group = world_size // moe_expert_count
moe_groups = [
[i + j * moe_expert_count for j in range(ranks_per_group)]
for i in range(moe_expert_count)
]
get_moe_group._moe_expert_count = moe_expert_count
get_moe_group._moe_group_idx = moe_groups
get_moe_group._moe_groups = [dist.new_group(g) for g in moe_groups]
my_group_idx = _find_my_group_index(get_moe_group._moe_group_idx)
return my_group_idx, get_moe_group._moe_groups[my_group_idx]
def get_all2all_group(moe_expert_count):
if dist.is_initialized():
if not hasattr(get_all2all_group, "_all2all_groups"):
world_size = dist.get_world_size()
# more experts than world size
if world_size <= moe_expert_count:
assert moe_expert_count % world_size == 0
all2all_groups = [[i for i in range(world_size)]]
# larger world than num experts
else:
assert world_size % moe_expert_count == 0
ranks_per_group = world_size // moe_expert_count
all2all_groups = [
[i * moe_expert_count + j for j in range(moe_expert_count)]
for i in range(ranks_per_group)
]
get_all2all_group._all2all_group_idx = all2all_groups
get_all2all_group._all2all_groups = [
dist.new_group(g) for g in all2all_groups
]
my_group_idx = _find_my_group_index(get_all2all_group._all2all_group_idx)
return get_all2all_group._all2all_groups[my_group_idx] | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/modules/xmoe/global_groups.py | global_groups.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Implementation of Top2Gating described in https://arxiv.org/pdf/2006.16668.pdf
# Code is inspired by Top2GatingOnLogits from lingvo:
# https://github.com/tensorflow/lingvo/blob/21b8106c5f1d30a196c98eedc441d4fd70833b11/lingvo/core/moe_layers.py#L477
# NOTE: This is a mirror of the code in
# https://github.com/facebookresearch/fairscale/tree/master/fairscale/nn/moe
import math
from typing import Callable, Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor
from .moe_layer import fused_cumsum_sub_one, has_tutel
# use a fixed temperature to compute balance loss
TEMPERATURE_FOR_L_UAX = 0.07
# maximum capacity of 1 expert as a fraction of number of tokens in the batch
# Note: setting this to 1.0 causes inference to significantly slow down
EVAL_CAPACITY_TOKEN_FRACTION = 0.25
# logging
SAMPLE_FRACTION = 0.2
def top1gating(
logits: torch.Tensor,
input_mask: Optional[torch.Tensor] = None,
use_fp32=False,
capacity_factor=1.0,
eval_mode=False,
moe_eval_capacity_token_fraction=EVAL_CAPACITY_TOKEN_FRACTION,
use_xmoe=False,
gate_obj=None,
) -> Tuple[Tensor, Tensor, Tensor, Dict]:
"""Implements Top2Gating on logits."""
metadata = {}
if use_fp32:
orig_dtype = logits.dtype
logits = logits.float()
gates = F.softmax(logits, dim=1)
metadata["entropy_gating"] = entropy(probs=gates).mean().detach()
# gates has shape of SE
num_tokens = gates.shape[0]
num_experts = gates.shape[1]
if moe_eval_capacity_token_fraction > 0.0 and eval_mode:
capacity = math.ceil(moe_eval_capacity_token_fraction * num_tokens)
else:
# capacity = capacity_factor * S/E
capacity = int(capacity_factor * math.ceil(num_tokens / num_experts))
# Create a mask for 1st's expert per token
indices1_s = torch.argmax(gates, dim=1)
mask1 = one_hot(indices1_s, num_classes=num_experts, unsqueeze_indices=True)
if input_mask is not None and input_mask.any():
nonpadding = ~input_mask
mask1 = mask1 * nonpadding.unsqueeze(-1).to(mask1.dtype)
# for logging (percent of tokens routed to each expert)
expert1_hist = (
100
* torch.histc(
(indices1_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts
)
/ num_tokens
)
metadata["unused_expert1_count"] = (expert1_hist == 0).sum()
expert1_hist = (
torch.sort(expert1_hist, dim=0, descending=True).values
+ torch.finfo(torch.float32).tiny
)
sample_count = max(math.ceil(num_experts * SAMPLE_FRACTION), 1)
metadata["expert1_balance_top"] = expert1_hist[:sample_count].sum()
metadata["expert1_balance_bottom"] = expert1_hist[-sample_count:].sum()
gates1_s = (gates * mask1).sum(dim=1)
# Compute locations in capacity buffer
locations1 = fused_cumsum_sub_one(mask1)
# Compute l_aux
me = torch.mean(gates, dim=0)
ce = torch.mean(mask1.to(gates.dtype), dim=0)
l_aux = torch.mean(me * ce)
l_aux = l_aux * num_experts * num_experts
if has_tutel:
locations1_s = torch.sum(locations1 * mask1, dim=1)
return (
l_aux,
metadata,
capacity,
num_experts,
[
indices1_s,
],
[
locations1_s,
],
[
gates1_s,
],
)
# Remove locations outside capacity from mask
mask1 = mask1 * torch.lt(locations1, capacity)
# Store the capacity location for each token
locations1_s = torch.sum(locations1 * mask1, dim=1)
# Calculate combine_weights and dispatch_mask
gates1 = gates1_s.unsqueeze(-1) * mask1.to(gates1_s.dtype) # einsum("s,se->se")
# locations1_sc = num_tokens * capacity
locations1_sc = one_hot(locations1_s, num_classes=capacity, unsqueeze_indices=True)
combine1_sec = torch.bmm(
# einsum("se,sc->sec")
gates1.unsqueeze(-1),
locations1_sc.to(gates1.dtype).unsqueeze(1),
)
dispatch_mask = combine1_sec.bool()
if use_fp32:
return l_aux, combine1_sec.to(orig_dtype), dispatch_mask, metadata
else:
return l_aux, combine1_sec, dispatch_mask, metadata
class Top1Gate(torch.nn.Module):
"""Gate module which implements Top2Gating as described in Gshard_.
::
gate = Top2Gate(model_dim, num_experts)
l_aux, combine_weights, dispatch_mask = gate(input)
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
Args:
model_dim (int):
size of model embedding dimension
num_experts (ints):
number of experts in model
"""
wg: torch.nn.Linear
def __init__(
self,
model_dim: int,
num_experts: int,
use_fp32=False,
input_noise_type=None,
capacity_factor=1.0,
moe_eval_capacity_token_fraction=EVAL_CAPACITY_TOKEN_FRACTION,
use_xmoe=False,
) -> None:
# TODO: merge this to top2gate.py
#
super().__init__()
if not use_xmoe:
self.wg = torch.nn.Linear(model_dim, num_experts, bias=False)
else:
self.wg_reduction = torch.nn.Linear(model_dim, 16, bias=False)
wg = torch.empty(num_experts, 16)
torch.nn.init.orthogonal_(wg, gain=0.32)
self.register_parameter("wg", torch.nn.Parameter(wg))
self.use_xmoe = use_xmoe
self.use_fp32 = use_fp32
self.input_noise_type = input_noise_type
self.capacity_factor = capacity_factor
self.moe_eval_capacity_token_fraction = moe_eval_capacity_token_fraction
def forward(self, input, mask=None): # type: ignore
if self.use_xmoe:
input = self.wg_reduction(input)
with torch.no_grad():
wg_norm = self.wg.norm(p=2.0, dim=1, keepdim=True)
self.wg.mul_(1.5 / wg_norm)
logits = self._cosine(input, self.wg)
logits = self._make_finite(logits)
else:
logits = self.wg(input)
return top1gating(
logits,
mask,
use_fp32=self.use_fp32,
capacity_factor=self.capacity_factor,
eval_mode=not self.training,
moe_eval_capacity_token_fraction=self.moe_eval_capacity_token_fraction,
use_xmoe=self.use_xmoe,
gate_obj=self,
)
def _make_finite(self, scores):
ok = scores.isfinite()
if not ok.all():
# NaNs here can break the assignment algorithm
scores[~ok] = scores[ok].min()
return scores
def _get_gating_temperature(self, eps=1e-4):
if self.gating_t.data.item() < eps:
return eps
return self.gating_t
def _cosine(self, mat1, mat2, eps=1e-4):
assert mat1.dim() == 2
assert mat2.dim() == 2
# mat1 = F.normalize(mat1, p=2.0, dim=1, eps=eps)
mat2 = F.normalize(mat2.float(), p=2.0, dim=1, eps=eps)
return mat1.float().matmul(mat2.transpose(0, 1)).type_as(mat1)
gumbel_map: Dict[torch.device, Callable] = {}
def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor:
gumbel = gumbel_map.get(device)
if gumbel is None:
one = torch.tensor(1.0, device=device)
zero = torch.tensor(0.0, device=device)
gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore
gumbel_map[device] = gumbel
return gumbel(shape)
def one_hot(indices: torch.Tensor, num_classes: int, unsqueeze_indices=False) -> Tensor:
if unsqueeze_indices:
indices = indices.unsqueeze(-1)
assert indices.shape[-1] == 1, "last dimension of indices must be have size 1"
output = torch.zeros(
indices.shape[:-1] + (num_classes,), device=indices.device, dtype=indices.dtype
)
output.scatter_(len(output.shape) - 1, indices, 1)
return output
def entropy(probs):
logits = torch.distributions.utils.probs_to_logits(probs)
p_log_p = probs * logits
return -p_log_p.sum(-1)
def top2gating(
logits: torch.Tensor,
input_mask: Optional[torch.Tensor] = None,
use_fp32=False,
second_expert_policy="sampling",
normalize_gate_prob_before_dropping=False,
eval_mode=False,
moe_eval_capacity_token_fraction=0.25,
batch_prioritized_routing=False,
) -> Tuple[Tensor, Tensor, Tensor]:
"""Implements Top2Gating on logits."""
metadata = {}
if use_fp32:
orig_dtype = logits.dtype
logits = logits.float()
gates = F.softmax(logits, dim=1)
metadata["entropy_gating"] = entropy(probs=gates).mean().detach()
# gates has shape of SE
num_tokens = gates.shape[0]
num_experts = gates.shape[1]
if moe_eval_capacity_token_fraction > 0.0 and eval_mode:
capacity = math.ceil(moe_eval_capacity_token_fraction * num_tokens)
else:
# capacity = 2S/E
capacity = 2 * math.ceil(num_tokens / num_experts)
# Create a mask for 1st's expert per token
indices1_s = torch.argmax(gates, dim=1, keepdim=True)
mask1 = one_hot(indices1_s, num_experts)
if second_expert_policy == "sampling":
# Create a mask for 2nd's expert per token using Gumbel-max trick
# https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/
logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device)
else:
logits_w_noise = logits
# Replace top-expert with min value
logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float("-inf"))
indices2_s = torch.argmax(logits_except1, dim=1, keepdim=True)
mask2 = one_hot(indices2_s, num_experts)
gates1_s = (gates * mask1).sum(dim=1)
gates2_s = (gates * mask2).sum(dim=1)
if normalize_gate_prob_before_dropping:
# Normalize gate probabilities
denom_s = gates1_s + gates2_s
# Avoid divide-by-zero
denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps)
gates1_s = gates1_s / denom_s
gates2_s = gates2_s / denom_s
if second_expert_policy == "random":
sampled = (2 * gates2_s) > torch.rand_like(gates2_s)
mask2 = mask2 * sampled.repeat(num_experts, 1).transpose(1, 0)
# Compute locations in capacity buffer
if input_mask is not None and input_mask.any():
nonpadding = ~input_mask
mask1 = mask1 * nonpadding.unsqueeze(-1).to(mask1.dtype)
mask2 = mask2 * nonpadding.unsqueeze(-1).to(mask1.dtype)
if batch_prioritized_routing:
# if batch_prioritized_routing:
importance_scores = -1 * gates.max(dim=1)[0]
sorted_mask1 = mask1[importance_scores.argsort(dim=0)]
sorted_cumsum1 = fused_cumsum_sub_one(sorted_mask1) * sorted_mask1
importance_sorted_locations1 = sorted_cumsum1[
importance_scores.argsort(dim=0).argsort(dim=0)
]
sorted_mask2 = mask2[importance_scores.argsort(dim=0)]
sorted_cumsum2 = fused_cumsum_sub_one(sorted_mask2) * sorted_mask2
importance_sorted_locations2 = sorted_cumsum2[
importance_scores.argsort(dim=0).argsort(dim=0)
]
importance_sorted_locations2 += torch.sum(mask1, dim=0, keepdim=True)
locations1, locations2 = (
importance_sorted_locations1,
importance_sorted_locations2,
)
else:
locations1 = fused_cumsum_sub_one(mask1)
locations2 = fused_cumsum_sub_one(mask2)
# Update 2nd's location by accounting for locations of 1st
locations2 += torch.sum(mask1, dim=0, keepdim=True)
# Compute l_aux
me = torch.mean(gates, dim=0)
ce = torch.mean(mask1.to(gates.dtype), dim=0)
l_aux = torch.mean(me * ce)
l_aux = l_aux * num_experts * num_experts
# for logging purposes
metadata["overflow_expert1"] = (
100 * torch.sum(mask1 * torch.ge(locations1, capacity)) / torch.sum(mask1)
)
metadata["overflow_expert2"] = (
100 * torch.sum(mask2 * torch.ge(locations2, capacity)) / torch.sum(mask2)
)
# Remove locations outside capacity from mask
mask1_, mask2_ = mask1, mask2
mask1 = mask1 * torch.lt(locations1, capacity)
mask2 = mask2 * torch.lt(locations2, capacity)
# for logging (percent of tokens routed to each expert)
expert1_hist = (
100
* torch.histc(
(indices1_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts
)
/ num_tokens
)
metadata["unused_expert1_count"] = (expert1_hist == 0).sum()
expert1_hist = (
torch.sort(expert1_hist, dim=0, descending=True).values
+ torch.finfo(torch.float32).tiny
)
expert2_hist = (
100
* torch.histc(
(indices2_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts
)
/ num_tokens
)
metadata["unused_expert2_count"] = (expert2_hist == 0).sum()
expert2_hist = (
torch.sort(expert2_hist, dim=0, descending=True).values
+ torch.finfo(torch.float32).tiny
)
sample_count = max(math.ceil(num_experts * SAMPLE_FRACTION), 1)
metadata["expert1_balance_top"] = expert1_hist[:sample_count].sum()
metadata["expert1_balance_bottom"] = expert1_hist[-sample_count:].sum()
metadata["expert2_balance_top"] = expert2_hist[:sample_count].sum()
metadata["expert2_balance_bottom"] = expert2_hist[-sample_count:].sum()
if not normalize_gate_prob_before_dropping:
# Normalize gate probabilities
gates1_s = (gates * mask1).sum(dim=1)
gates2_s = (gates * mask2).sum(dim=1)
denom_s = gates1_s + gates2_s
# Avoid divide-by-zero
denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps)
gates1_s /= denom_s
gates2_s /= denom_s
if has_tutel:
locations1_s = torch.sum(locations1 * mask1_, dim=1)
locations2_s = torch.sum(locations2 * mask2_, dim=1)
return (
l_aux,
metadata,
capacity,
num_experts,
[indices1_s, indices2_s],
[locations1_s, locations2_s],
[gates1_s, gates2_s],
)
# Store the capacity location for each token
locations1_s = torch.sum(locations1 * mask1, dim=1)
locations2_s = torch.sum(locations2 * mask2, dim=1)
# Calculate combine_weights and dispatch_mask
gates1 = gates1_s.unsqueeze(-1) * mask1.to(gates1_s.dtype) # einsum("s,se->se")
gates2 = gates2_s.unsqueeze(-1) * mask2.to(gates2_s.dtype) # einsum("s,se->se")
locations1_sc = one_hot(locations1_s, num_classes=capacity, unsqueeze_indices=True)
locations2_sc = one_hot(locations2_s, num_classes=capacity, unsqueeze_indices=True)
combine1_sec = torch.bmm(
# einsum("se,sc->sec")
gates1.unsqueeze(-1),
locations1_sc.to(gates1.dtype).unsqueeze(1),
)
combine2_sec = torch.bmm(
# einsum("se,sc->sec")
gates2.unsqueeze(-1),
locations2_sc.to(gates2.dtype).unsqueeze(1),
)
combine_weights = combine1_sec + combine2_sec
dispatch_mask = combine_weights.bool()
if use_fp32:
return l_aux, combine_weights.to(orig_dtype), dispatch_mask, metadata
else:
return l_aux, combine_weights, dispatch_mask, metadata
class Top2Gate(torch.nn.Module):
"""Gate module which implements Top2Gating as described in Gshard_.
::
gate = Top2Gate(model_dim, num_experts)
l_aux, combine_weights, dispatch_mask = gate(input)
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
Args:
model_dim (int):
size of model embedding dimension
num_experts (ints):
number of experts in model
"""
wg: torch.nn.Linear
def __init__(
self,
model_dim: int,
num_experts: int,
use_fp32=False,
second_expert_policy="sampling",
normalize_gate_prob_before_dropping=False,
moe_eval_capacity_token_fraction=0.25,
batch_prioritized_routing=False,
use_xmoe=False,
) -> None:
super().__init__()
if not use_xmoe:
self.wg = torch.nn.Linear(model_dim, num_experts, bias=False)
else:
self.wg_reduction = torch.nn.Linear(model_dim, 16, bias=False)
wg = torch.empty(num_experts, 16)
torch.nn.init.orthogonal_(wg, gain=0.32)
self.register_parameter("wg", torch.nn.Parameter(wg))
self.use_fp32 = use_fp32
self.second_expert_policy = second_expert_policy
self.normalize_gate_prob_before_dropping = normalize_gate_prob_before_dropping
self.moe_eval_capacity_token_fraction = moe_eval_capacity_token_fraction
self.batch_prioritized_routing = batch_prioritized_routing
self.use_xmoe = use_xmoe
def forward(self, input, mask=None): # type: ignore
if self.use_xmoe:
input = self.wg_reduction(input)
with torch.no_grad():
wg_norm = self.wg.norm(p=2.0, dim=1, keepdim=True)
self.wg.mul_(1.5 / wg_norm)
logits = self._cosine(input, self.wg)
logits = self._make_finite(logits)
else:
logits = self.wg(input)
return top2gating(
logits,
mask,
use_fp32=self.use_fp32,
second_expert_policy=self.second_expert_policy,
normalize_gate_prob_before_dropping=self.normalize_gate_prob_before_dropping,
eval_mode=not self.training,
moe_eval_capacity_token_fraction=self.moe_eval_capacity_token_fraction,
batch_prioritized_routing=self.batch_prioritized_routing,
)
def _cosine(self, mat1, mat2, eps=1e-4):
assert mat1.dim() == 2
assert mat2.dim() == 2
# mat1 = F.normalize(mat1, p=2.0, dim=1, eps=eps)
mat2 = F.normalize(mat2.float(), p=2.0, dim=1, eps=eps)
return mat1.float().matmul(mat2.transpose(0, 1)).type_as(mat1)
def _make_finite(self, scores):
ok = scores.isfinite()
if not ok.all():
# NaNs here can break the assignment algorithm
scores[~ok] = scores[ok].min()
return scores | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/modules/xmoe/routing.py | routing.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# NOTE: This is a mirror of the code in
# https://github.com/facebookresearch/fairscale/tree/master/fairscale/nn/moe
import logging
import time
from typing import Any, Tuple, cast
import torch
import torch.distributed as dist
from torch import Tensor
from torch.nn import Module, ModuleList
from .global_groups import get_all2all_group, get_moe_group
try:
from fairseq.modules.moe import MOELayer
has_fairseq = True
Base = MOELayer
except ModuleNotFoundError:
Base = Module
has_fairseq = False
try:
# To enable Tutel MoE optimizations:
# python3 -m pip install --user --upgrade git+https://github.com/Agora/[email protected]
from tutel import moe as tutel_moe
has_tutel, fused_cumsum_sub_one = True, tutel_moe.fast_cumsum_sub_one
except ModuleNotFoundError:
has_tutel, fused_cumsum_sub_one = False, lambda mask: torch.cumsum(mask, dim=0) - 1
logger = logging.getLogger(__name__)
# einsum dimensions: (g)roup, (s)equence, (e)xpert, (m)odel, (c)apacity
# See https://arxiv.org/pdf/2006.16668.pdf for details.
# Based on https://github.com/pytorch/pytorch/pull/40762
class _AllToAll(torch.autograd.Function):
@staticmethod
def forward(ctx: Any, group: dist.ProcessGroup, input: Tensor) -> Tensor: # type: ignore
ctx.group = group
input = input.contiguous()
output = torch.empty_like(input)
if torch.distributed.is_initialized():
dist.all_to_all_single(output, input, group=group)
else:
assert group is None
output = input
return output
@staticmethod
def backward(ctx: Any, *grad_output: Tensor) -> Tuple[None, Tensor]:
return (None, _AllToAll.apply(ctx.group, *grad_output))
class MOELayer(Base):
"""MOELayer module which implements MixtureOfExperts as described in Gshard_.
::
gate = Top2Gate(model_dim, num_experts)
moe = MOELayer(gate, expert)
output = moe(input)
l_aux = moe.l_aux
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
Args:
gate (torch.nn.Module):
gate network
expert (torch.nn.Module):
expert network
"""
def __init__(self, gate, experts, args):
if has_fairseq:
super(Base, self).__init__()
else:
super().__init__()
self.gate = gate
if type(experts) == ModuleList:
self.experts = cast(ModuleList, experts)
else:
self.experts = ModuleList([experts])
_, self.expert_group = get_moe_group(args.moe_expert_count)
self.all2all_group = get_all2all_group(args.moe_expert_count)
self.world_size = dist.get_world_size(group=self.expert_group)
self.all2all_size = dist.get_world_size(group=self.all2all_group)
for p in experts.parameters():
p.expert = True # type: ignore
self.num_local_experts = len(self.experts)
self.args = args
self.in_generation = False
self.a2a_cuda_event_intervals = []
self.a2a_cpu_time_ms = 0.0
def forward(self, *input: Tensor, input_padding_mask=None, **kwargs: Any) -> Tensor:
assert len(input) == 1, "only single input Tensor supported"
input = input[0]
assert (
len(input.shape) == 3
), "input Tensor must have dimensions: (s)equence, (t)oken, (m)odel"
if input_padding_mask is not None:
assert (
len(input_padding_mask.shape) == 2
), "input Tensor must have dimensions: (s)equence, (t)oken"
assert input_padding_mask.shape[0] == input.shape[0]
assert input_padding_mask.shape[1] == input.shape[1]
# assert input.shape[0] % len(self.experts) == 0, "num tokens must be order of number of local experts"
# Implement Algorithm 2 from GShard paper.
d_model = input.shape[2]
# Pad to expected batch size
input_shape = list(input.shape)
expected_bsz = (
getattr(self.args, "batch_size", 0)
if self.training
else getattr(self.args, "batch_size_valid", 0)
)
# This indicates that --batch-size or --max-sentences is not specified
if expected_bsz is None:
expected_bsz = 0
# Note: Padding is not necessary at generation time at present
# because all DDP workers process the same batch. Also, batch size at generation time
# can be different from that present in the checkpoint state
if (
not self.in_generation
and expected_bsz != 0
and input_shape[0] != expected_bsz
):
logger.warning(
f"padding batch with unexpected size {input_shape[0]} (expected: {expected_bsz})"
)
assert input_shape[0] < expected_bsz, f"{input_shape[0]} < {expected_bsz}"
padded_input = torch.zeros(
(expected_bsz, input_shape[1], input_shape[2]),
dtype=input.dtype,
layout=input.layout,
device=input.device,
)
padded_input[: input_shape[0], :, :] = input
input = padded_input
padded_input_padding_mask = torch.ones(
(
expected_bsz,
input_shape[1],
),
dtype=torch.bool,
device=input.device,
)
if input_padding_mask is not None:
padded_input_padding_mask[: input_shape[0], :] = input_padding_mask
else:
padded_input_padding_mask[: input_shape[0], :] = False
input_padding_mask = padded_input_padding_mask
# Reshape into S tokens by dropping sequence dimension.
reshaped_input = input.reshape(-1, d_model)
reshaped_input_shape = reshaped_input.shape
reshaped_input_padding_mask = (
input_padding_mask.reshape(-1) if input_padding_mask is not None else None
)
# Doing padding here when --max-tokens is specified and not --batch-size or --max-sentences
# Pro of --max-tokens: more flexible for MT variable sequence lengths
# Con of --max-tokens: extra all-reduce needed to figure out optimal padding without running OOM
if expected_bsz == 0:
expected_dim = reshaped_input_shape[0] * torch.ones(
(1,), dtype=torch.long, device=input.device
)
dist.all_reduce(expected_dim, group=dist.group.WORLD, op=dist.ReduceOp.MAX)
expected_dim = int(expected_dim.item())
padded_input = torch.zeros(
(expected_dim, reshaped_input_shape[1]),
dtype=input.dtype,
layout=input.layout,
device=input.device,
)
padded_input[: reshaped_input_shape[0], :] = reshaped_input
reshaped_input = padded_input
padded_input_padding_mask = torch.ones(
(expected_dim,), dtype=torch.bool, device=padded_input.device
)
if reshaped_input_padding_mask is not None:
padded_input_padding_mask[
: reshaped_input_shape[0]
] = reshaped_input_padding_mask
else:
padded_input_padding_mask[: reshaped_input_shape[0]] = False
reshaped_input_padding_mask = padded_input_padding_mask
if has_tutel:
l_aux, self.metadata, C, E, indices_, locations_, gates_ = self.gate(
reshaped_input, reshaped_input_padding_mask
)
S, M = reshaped_input.size(0), reshaped_input.size(1)
if not hasattr(self, "_tutel_dispatcher"):
self._tutel_dispatcher = tutel_moe.fast_dispatcher(
E, C, M, dispatch_dtype=reshaped_input.dtype
)
self._tutel_dispatcher.update(indices_, locations_, gates_, capacity=C)
dispatched_input = self._tutel_dispatcher.encode(reshaped_input)
else:
l_aux, combine_weights, dispatch_mask, self.metadata = self.gate(
reshaped_input, reshaped_input_padding_mask
)
dispatch_mask = dispatch_mask.to(input.dtype).permute(
1, 2, 0
) # S,E,C -> E,C,S
E, C, S = dispatch_mask.size()
M = reshaped_input.size(1)
assert reshaped_input.size() == (S, M)
# einsum("sec,sm->ecm")
dispatched_input = torch.mm(
dispatch_mask.view(E * C, S), reshaped_input
) # -> (E*C),M
if self.all2all_size > 1:
dispatched_input = self.all_to_all_wrapper(dispatched_input)
# Re-shape after all-to-all: ecm -> gecm
dispatched_input = dispatched_input.reshape(
self.all2all_size, self.num_local_experts, -1, d_model
)
chunks = dispatched_input.chunk(self.num_local_experts, dim=1)
expert_outputs = []
for chunk, expert in zip(chunks, self.experts):
expert_outputs += [expert(chunk)]
expert_output = torch.cat(expert_outputs, dim=1)
if self.all2all_size > 1:
expert_output = self.all_to_all_wrapper(expert_output)
# Re-shape back: gecm -> ecm
expert_output = expert_output.reshape(
self.all2all_size * self.num_local_experts, -1, d_model
)
if has_tutel:
combined_output = self._tutel_dispatcher.decode(
expert_output.view(E * C, M)
)
else:
# einsum("sec,ecm->sm")
combined_output = combine_weights.view(S, E * C).mm(
expert_output.view(E * C, M)
)
# Remove padding here when --max-tokens is specified and not --batch-size or --max-sentences
combined_output = combined_output[: reshaped_input_shape[0], :]
combined_output = combined_output.reshape(input.shape)
combined_output = combined_output[: input_shape[0], :, :]
self.record_all_to_all_stats()
return combined_output, l_aux
def prepare_for_inference_(self):
self.in_generation = True
def all_to_all_wrapper(self, input: Tensor):
dummy_a2a = getattr(self.args, "dummy_a2a", False)
if dummy_a2a:
input = input.contiguous()
output = input.detach().clone()
return input
# always record times, since it is not a lot of overhead
# if we do not log it we simply clear it off in record_all_to_all_stats
cuda_start = torch.cuda.Event(enable_timing=True)
cuda_end = torch.cuda.Event(enable_timing=True)
cpu_start = time.time() * 1000
cuda_start.record()
output = _AllToAll.apply(self.all2all_group, input)
cuda_end.record()
cpu_end = time.time() * 1000
self.a2a_cpu_time_ms += cpu_end - cpu_start
self.a2a_cuda_event_intervals.append((cuda_start, cuda_end))
return output
def record_all_to_all_stats(self):
# controlled via an argument as we want to minimize any impact from torch.cuda.synchronize()
record_a2a_perf_stats = getattr(self.args, "record_a2a_perf_stats", False)
if record_a2a_perf_stats:
torch.cuda.synchronize()
self.metadata["all_to_all_cpu_time_ms"] = self.a2a_cpu_time_ms
a2a_cuda_time_ms = 0.0
for ev_start, ev_end in self.a2a_cuda_event_intervals:
a2a_cuda_time_ms += ev_start.elapsed_time(ev_end)
self.metadata["all_to_all_cuda_time_ms"] = a2a_cuda_time_ms
# reset stats
self.a2a_cpu_time_ms = 0.0
self.a2a_cuda_event_intervals = [] | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/modules/xmoe/moe_layer.py | moe_layer.py |
from typing import Optional, Sequence, Tuple, Union
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import Tensor, nn
from zeta.nn.attention.flash_attention import FlashAttention
from zeta.nn.biases.relative_position_bias import RelativePositionBias
from zeta.nn.embeddings.xpos_relative_position import XPOS
from zeta.nn.attention.base import BaseAttention
device = "cuda:0"
dtype=torch.float16
class ParallelWrapper:
"""
A simple wrapper to enable easy usage of data parallelism.
Arguments:
model: The neural network model to be parallelized.
device (optional): The device to which the model should be moved. Default: "cuda".
use_data_parallel (optional): A boolean flag to indicate whether to use data parallelism or not. Default: True.
"""
def __init__(
self,
model,
device="cuda",
use_data_parallel=True
):
self.model = model.to(device)
self.use_data_parallel = use_data_parallel
self.device = device
if self.use_data_parallel and torch.cuda.device_count() < 1:
print(f"Using {torch.cuda.device_count()} GPUS")
self.model = nn.DataParallel(self.model)
def forward(self, *args, **kwargs):
return self.model(*args, **kwargs)
def to(self, device):
self.device = device
self.model= self.model.to(device)
return self
def __getattr__(self, name):
#redirect attribute access to the internal model to allow direct access to its methods and props
return getattr(self.model, name)
#add alibi, qk layer norm, one write head, multihway,
class DilatedAttention(BaseAttention):
"""
Dilated Attention Module.
Arguments:
d_model: The dimension of the attention layers.
num_heads: The number of attention heads.
dilation_rate: The dilation rate for dilated attention.
segment_size: The segment size for dilated attention.
dropout (optional): The dropout probability. Default: 0.0
casual (optional): If set to True, the attention mechanism is casual. Default: False
use_xpos (optional): If set to True, xpos is used for positional encoding. Default: False
use_rel_pos_bias (optional): If set to True, relative position bias is used in the attention mechanism. Default: False
Usage:
The `DilatedAttention` class can be used as a module for neural networks and is especially suited for transformer architectures.
Example:
attention = DilatedAttention(d_model=512, num_heads=8, dilation_rate=2, segment_size=64, use_xpos=True, use_rel_pos_bias=True)
output = attention(input_tensor)
This will return the output tensor after applying dilated attention. The `use_xpos` and `use_rel_pos_bias` parameters allow for switching on positional encoding and relative positional bias respectively.
"""
def __init__(self,
d_model: int = None,
num_heads: int = None,
dilation_rate: int = None,
segment_size: int = None,
dropout: int = 0.0,
casual: bool = False,
use_xpos: bool = False,
use_rel_pos_bias: bool = False):
super(DilatedAttention, self).__init__()
self.d_model = d_model
self.num_heads = num_heads
self.dilation_rate = dilation_rate
self.segment_size = segment_size
self.dropout = nn.Dropout(dropout)
self.casual = casual
self.use_xpos = use_xpos
self.use_rel_pos_bias = use_rel_pos_bias
self.attention = FlashAttention(causal=self.casual, dropout=dropout).to(device)
if use_xpos:
self.xpos = XPOS(head_dim=d_model//num_heads)
if use_rel_pos_bias:
self.relative_bias = RelativePositionBias(num_buckets=32, max_distance=128, n_heads=num_heads)
#head offsets
self.head_offsets = nn.Parameter(torch.randn(num_heads, d_model))
def get_mask(self, i, j):
return torch.ones((i, j), device=device, dtype=torch.bool).triu(j - i + 2)
def forward(self, x):
print(f"X original shape: {x.shape} and x dtype: {x.dtype}")
batch_size, seq_len, _ = x.shape
padding_len = -seq_len % self.segment_size
x = F.pad(x, (0,0,0,padding_len))
seq_len = seq_len + padding_len
print(f"Paddex x shape: {x.shape}")
if self.use_xpos:
x = self.xpos(x)
# Split and sparsify
x = x.view(batch_size, -1, self.segment_size, self.d_model)
print(f"z after view shape: {x.shape}")
x = x[:, :, :: self.dilation_rate, :]
print(f"x after dilation shape: {x.shape} and x.dtype: {x.dtype}")
# Perform attention
attn_output = self.attention(x, x, x)
print(f"Attn output: {attn_output.shape} and dtype: {attn_output.dtype}")
#if use rel pos => apply relative positioning bias
if self.use_rel_pos_bias:
attn_output += self.relative_bias(batch_size, attn_output.size(1), attn_output.size(1))
print(f"attn_output: {attn_output.shape} and attn output: {attn_output.dtype}")
# if casual create a mask and apply to the output
if self.casual:
mask = self.get_mask(attn_output.size(1), attn_output.size(1))
print(f"mask shape: {mask.shape} and mask dtype: {x.dtype}")
attn_output = attn_output.masked_fill(mask, float('-inf'))
print(f"attn output shape: {attn_output.shape} and attn_output: {attn_output.dtype}")
# apply dropout
attn_output = self.dropout(attn_output)
print(f"attn output after dropout: {attn_output.shape} and dtype: {attn_output.dtype}")
# Scatter and concatenate
attn_output = attn_output.reshape(batch_size, -1, self.d_model)
print(f"attn_output scatter and concatenate: {attn_output.shape} and {attn_output.dtype}")
return attn_output
class MultiheadDilatedAttention(nn.Module):
def __init__(
self,
embed_dim: int,
num_heads: int,
dilation_rates: Sequence[int],
segment_lengths: Sequence[int],
dropout: float = 0.0,
bias: bool = True,
layer_norm: bool = True,
layer_norm_eps: float = 1e-5,
gamma_init: float = 1.0,
device: Optional[Union[torch.device, str]] = None,
dtype: Optional[torch.dtype] = None,
):
super().__init__()
self.num_heads = num_heads
self.layer_norm = layer_norm
self.gamma_init = gamma_init
if not embed_dim % self.num_heads == 0:
raise ValueError(
f"embed_dim ({embed_dim}) must be divisible by "
f"num_heads ({num_heads})"
)
num_dilations = len(dilation_rates)
num_segments = len(segment_lengths)
if num_dilations != num_segments:
raise ValueError(
f"len(dilation_rates) ({num_dilations}) must be equal to "
f"len(segment_lengths) ({num_segments})"
)
head_dim = embed_dim // num_heads
if not head_dim % 8 == 0:
raise ValueError(
f"head_dim (embed_dim / num_heads = {head_dim}) must be divisible by 8"
)
if not head_dim <= 128:
raise ValueError(
f"head_dim (embed_dim / num_heads = {head_dim}) must be <= 128"
)
self.q_proj = nn.Linear(
embed_dim, embed_dim, bias=bias, device=device, dtype=dtype
)
self.k_proj = nn.Linear(
embed_dim, embed_dim, bias=bias, device=device, dtype=dtype
)
self.v_proj = nn.Linear(
embed_dim, embed_dim, bias=bias, device=device, dtype=dtype
)
self.attention = DilatedAttention(
segment_lengths=segment_lengths,
dilation_rates=dilation_rates,
dropout=dropout,
# op=op,
)
self.norm: Optional[nn.LayerNorm] = None
if layer_norm:
self.norm = nn.LayerNorm(
embed_dim, eps=layer_norm_eps, device=device, dtype=dtype
)
self.out_proj = nn.Linear(
embed_dim, embed_dim, bias=bias, device=device, dtype=dtype
)
self._reset_parameters()
def _reset_parameters(self):
nn.init.xavier_normal_(self.q_proj.weight)
if self.q_proj.bias is not None:
nn.init.constant_(self.q_proj.bias, 0)
nn.init.xavier_normal_(self.k_proj.weight)
if self.k_proj.bias is not None:
nn.init.constant_(self.k_proj.bias, 0)
# NOTE: We follow the initialization strategy from MAGNETO. See:
# https://arxiv.org/pdf/2210.06423.pdf, Fig. 2
# Gain (self.gamma_init) should be provided as a keyword argument when
# initializing the larger Transformer model, since it requires knowledge
# of the number of encoder/decoder layers in the model.
nn.init.xavier_normal_(self.v_proj.weight, gain=self.gamma_init)
if self.v_proj.bias is not None:
nn.init.constant_(self.v_proj.bias, 0)
nn.init.xavier_normal_(self.out_proj.weight, gain=self.gamma_init)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0)
def forward(
self, query: Tensor, key: Tensor, value: Tensor, is_causal: bool = False
) -> Tuple[Tensor, None]:
# Notation:
# b - batch size
# n - sequence length
# h - number of heads
# d - embedding dimension
#
# Input shape: (b, n, d)
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
# Unfold 'd' dimension into 'h' separate attention heads.
q = rearrange(q, "b n (h d) -> b n h d", h=self.num_heads)
k = rearrange(k, "b n (h d) -> b n h d", h=self.num_heads)
v = rearrange(v, "b n (h d) -> b n h d", h=self.num_heads)
# Apply attention, then fold 'h' attention heads back into 'd'.
x = self.attention(q, k, v, is_causal=is_causal)
x = rearrange(x, "b n h d -> b n (h d)")
if self.layer_norm:
assert self.norm is not None
x = self.norm(x)
# Linear projection on attention outputs.
x = self.out_proj(x)
return x, None | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/attention/dilated_attention.py | dilated_attention.py |
import math
import torch
import torch.nn.functional as F
from torch import nn
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
from zeta.nn.attention.base import BaseAttention
from zeta.nn.embeddings.multiway_network import MultiwayWrapper
from zeta.nn.embeddings.xpos_relative_position import XPOS
class MultiheadAttention(BaseAttention):
def __init__(
self,
args,
embed_dim: int = None,
num_heads: int = None,
dropout: int = 0.0,
self_attention: bool =False,
encoder_decoder_attention: bool = False,
subln: bool =False,
):
super().__init__()
self.args = args
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.scaling = self.head_dim**-0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert self.self_attention ^ self.encoder_decoder_attention
self.k_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.v_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.q_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.out_proj = MultiwayWrapper(
args, nn.Linear(embed_dim, embed_dim, bias=True)
)
self.inner_attn_ln = (
MultiwayWrapper(args, LayerNorm(self.embed_dim, eps=args.layernorm_eps))
if subln and self.self_attention
else None
)
self.dropout_module = torch.nn.Dropout(dropout)
self.xpos = (
XPOS(self.head_dim, args.xpos_scale_base)
if args.xpos_rel_pos and self.self_attention
else None
)
def reset_parameters(self):
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.out_proj.weight)
nn.init.constant_(self.out_proj.bias, 0.0)
def forward(
self,
query,
key,
value,
incremental_state=None,
key_padding_mask=None,
attn_mask=None,
rel_pos=None,
is_first_step=False,
):
bsz, tgt_len, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
key_bsz, src_len, _ = key.size()
assert key_bsz == bsz, f"{query.size(), key.size()}"
assert value is not None
assert bsz, src_len == value.shape[:2]
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
q = q.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2)
k = k.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2)
v = v.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2)
q = q.reshape(bsz * self.num_heads, tgt_len, self.head_dim)
k = k.reshape(bsz * self.num_heads, src_len, self.head_dim)
v = v.reshape(bsz * self.num_heads, src_len, self.head_dim)
if incremental_state is not None:
if "prev_key" in incremental_state:
prev_key = incremental_state["prev_key"].view(
bsz * self.num_heads, -1, self.head_dim
)
prev_value = incremental_state["prev_value"].view(
bsz * self.num_heads, -1, self.head_dim
)
k = torch.cat([prev_key, k], dim=1)
v = torch.cat([prev_value, v], dim=1)
incremental_state["prev_key"] = k.view(
bsz, self.num_heads, -1, self.head_dim
)
incremental_state["prev_value"] = v.view(
bsz, self.num_heads, -1, self.head_dim
)
src_len = k.size(1)
if self.xpos is not None:
if incremental_state is not None and not is_first_step:
offset = src_len - 1
else:
offset = 0
k = self.xpos(k, offset=0, downscale=True)
q = self.xpos(q, offset=offset, downscale=False)
attn_weights = torch.bmm(q, k.transpose(1, 2))
if attn_mask is not None:
attn_weights = torch.nan_to_num(attn_weights)
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if rel_pos is not None:
rel_pos = rel_pos.view(attn_weights.size())
attn_weights = attn_weights + rel_pos
attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).type_as(
attn_weights
)
attn_probs = self.dropout_module(attn_weights)
attn = torch.bmm(attn_probs, v)
attn = attn.transpose(0, 1).reshape(tgt_len, bsz, embed_dim).transpose(0, 1)
if self.inner_attn_ln is not None:
attn = self.inner_attn_ln(attn)
attn = self.out_proj(attn)
attn_weights = attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
return attn, attn_weights | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/attention/multihead_attention.py | multihead_attention.py |
import math
import torch
from einops import rearrange
from torch import einsum, nn
from torch.autograd.function import Function
from torch.cuda.amp import GradScaler, autocast
from torch.nn import DataParallel
from zeta.nn.attention.base import BaseAttention
# constants
EPSILON = 1e-10
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# flash attention forwards and backwards
# flash attention v1 - https://arxiv.org/abs/2205.14135
# flash attention v2 - https://tridao.me/publications/flash2/flash2.pdf
class FlashAttentionFunction(Function):
@staticmethod
@torch.no_grad()
def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):
""" Algorithm 1 in the v2 paper """
device = q.device
max_neg_value = -torch.finfo(q.dtype).max
qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)
o = torch.zeros_like(q)
all_row_sums = torch.zeros((*q.shape[:-1], 1), device = device)
all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, device = device)
scale = (q.shape[-1] ** -0.5)
num_row_tiles = math.ceil(q.shape[-2] / q_bucket_size)
num_col_tiles = math.ceil(k.shape[-2] / k_bucket_size)
if exists(mask) and mask.ndim == 2:
mask = rearrange(mask, 'b n -> b 1 1 n')
if not exists(mask):
col_masks = (None,) * num_col_tiles
mask = (col_masks,) * num_row_tiles
else:
mask = ((mask,) * num_row_tiles) if mask.shape[-2] == 1 else mask.split(q_bucket_size, dim = -2)
mask = tuple(((row_mask,) * num_col_tiles) if row_mask.shape[-1] == 1 else row_mask.split(k_bucket_size, dim = -1) for row_mask in mask)
row_splits = zip(
q.split(q_bucket_size, dim = -2),
o.split(q_bucket_size, dim = -2),
mask,
all_row_sums.split(q_bucket_size, dim = -2),
all_row_maxes.split(q_bucket_size, dim = -2),
)
for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):
q_start_index = ind * q_bucket_size - qk_len_diff
col_splits = zip(
k.split(k_bucket_size, dim = -2),
v.split(k_bucket_size, dim = -2),
row_mask
)
for k_ind, (kc, vc, col_mask) in enumerate(col_splits):
k_start_index = k_ind * k_bucket_size
attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale
if exists(col_mask):
attn_weights.masked_fill_(~col_mask, max_neg_value)
if causal and q_start_index < (k_start_index + k_bucket_size - 1):
causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype = torch.bool, device = device).triu(q_start_index - k_start_index + 1)
attn_weights.masked_fill_(causal_mask, max_neg_value)
block_row_maxes = attn_weights.amax(dim = -1, keepdims = True)
new_row_maxes = torch.maximum(block_row_maxes, row_maxes)
exp_weights = torch.exp(attn_weights - new_row_maxes)
if exists(col_mask):
exp_weights.masked_fill_(~col_mask, 0.)
block_row_sums = exp_weights.sum(dim = -1, keepdims = True).clamp(min = EPSILON)
exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)
exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)
new_row_sums = exp_row_max_diff * row_sums + block_row_sums
oc.mul_(exp_row_max_diff).add_(exp_values)
row_maxes.copy_(new_row_maxes)
row_sums.copy_(new_row_sums)
oc.div_(row_sums)
lse = all_row_sums.log() + all_row_maxes
ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)
ctx.save_for_backward(q, k, v, o, lse)
return o
@staticmethod
@torch.no_grad()
def backward(ctx, do):
""" Algorithm 2 in the v2 paper """
causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args
q, k, v, o, lse = ctx.saved_tensors
device = q.device
max_neg_value = -torch.finfo(q.dtype).max
qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)
dq = torch.zeros_like(q)
dk = torch.zeros_like(k)
dv = torch.zeros_like(v)
row_splits = zip(
q.split(q_bucket_size, dim = -2),
o.split(q_bucket_size, dim = -2),
do.split(q_bucket_size, dim = -2),
mask,
lse.split(q_bucket_size, dim = -2),
dq.split(q_bucket_size, dim = -2)
)
for ind, (qc, oc, doc, row_mask, lsec, dqc) in enumerate(row_splits):
q_start_index = ind * q_bucket_size - qk_len_diff
col_splits = zip(
k.split(k_bucket_size, dim = -2),
v.split(k_bucket_size, dim = -2),
dk.split(k_bucket_size, dim = -2),
dv.split(k_bucket_size, dim = -2),
row_mask
)
for k_ind, (kc, vc, dkc, dvc, col_mask) in enumerate(col_splits):
k_start_index = k_ind * k_bucket_size
attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale
if causal and q_start_index < (k_start_index + k_bucket_size - 1):
causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype = torch.bool, device = device).triu(q_start_index - k_start_index + 1)
attn_weights.masked_fill_(causal_mask, max_neg_value)
p = torch.exp(attn_weights - lsec)
if exists(col_mask):
p.masked_fill_(~col_mask, 0.)
dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)
dp = einsum('... i d, ... j d -> ... i j', doc, vc)
D = (doc * oc).sum(dim = -1, keepdims = True)
ds = p * scale * (dp - D)
dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)
dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)
dqc.add_(dq_chunk)
dkc.add_(dk_chunk)
dvc.add_(dv_chunk)
return dq, dk, dv, None, None, None, None
# main class
# just flash attention in plain pytorch
# it will be way slower than implementing it in CUDA
# for tinkering and educational purposes
class FlashAttentionTwo(BaseAttention):
def __init__(
self,
*,
dim: int = None,
heads: int = 8,
dim_head: int = 64,
causal: bool = False,
q_bucket_size: int = 512,
k_bucket_size: int = 1024,
parallel: bool = False,
mixed_precision: bool = False
):
super().__init__()
self.heads = heads
self.causal = causal
self.parallel = parallel
self.mixed_precision = mixed_precision
inner_dim = heads * dim_head
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
# memory efficient attention related parameters
# can be overriden on forward
self.q_bucket_size = q_bucket_size
self.k_bucket_size = k_bucket_size
if self.parallel:
self.model = DataParallel(self)
if self.mixed_precision:
self.scaler = GradScaler()
def forward(
self,
x,
context = None,
mask = None,
q_bucket_size = None,
k_bucket_size = None,
):
q_bucket_size = default(q_bucket_size, self.q_bucket_size)
k_bucket_size = default(k_bucket_size, self.k_bucket_size)
h = self.heads
context = default(context, x)
q = self.to_q(x)
k, v = self.to_kv(context).chunk(2, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))
if self.parallel:
# Split the input data into chunks and move each chunk to the correct GPU
num_gpus = torch.cuda.device_count()
x_chunks = x.split(x.size(0) // num_gpus)
x_chunks = [chunk.to(f'cuda:{i}') for i, chunk in enumerate(x_chunks)]
q = x_chunks
if self.mixed_precision:
# Use autocast to allow operations to run in lower precision
with autocast():
out = FlashAttentionFunction.apply(q, k, v, mask, self.causal, q_bucket_size, k_bucket_size)
else:
out = FlashAttentionFunction.apply(q, k, v, mask, self.causal, q_bucket_size, k_bucket_size)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out) | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/attention/flash_attention2.py | flash_attention2.py |
from collections import namedtuple
from dataclasses import dataclass
from functools import wraps
import torch
import torch.nn.functional as F
from einops import rearrange
from packaging import version
from torch import Tensor, einsum, nn
from zeta.nn.attention.base import BaseAttention
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
@dataclass
class Intermediates:
"""
Dataclass to store intermediate tensors during attention computation.
Args:
qk_similarities (torch.Tensor): Tensor storing the similarities between query and key.
pre_softmax_attn (torch.Tensor): Tensor storing the attention weights before softmax.
post_softmax_attn (torch.Tensor): Tensor storing the attention weights after softmax.
Methods:
to_tuple(): Convert the Intermediates object to a tuple.
"""
qk_similarities: Tensor = None
pre_softmax_attn: Tensor = None
post_softmax_attn: Tensor = None
def to_tuple(self):
"""
Convert the Intermediates object to a tuple.
Returns:
tuple: Tuple representation of the Intermediates object.
"""
return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn)
class FlashAttention(BaseAttention):
def __init__(
self,
causal: bool = False,
dropout: float = 0.,
flash: bool = True
):
"""
FlashAttention module that performs attention computation.
Args:
causal (bool): Whether to apply causal masking (default: False).
dropout (float): Dropout probability (default: 0.).
flash (bool): Whether to use flash attention (default: True).
"""
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.causal = causal
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = EfficientAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(False, True, True)
def get_mask(self, i, j, device):
"""
Generate a mask for attention computation.
Args:
i (int): Length of the query sequence.
j (int): Length of the key sequence.
device (torch.device): Device to place the mask tensor.
Returns:
torch.Tensor: Mask tensor of shape (i, j).
"""
return torch.ones((i, j), device=device, dtype=torch.bool).triu(j - i + 1)
def flash_attn(
self,
q, k, v,
mask = None,
attn_bias = None
):
"""
Perform flash attention computation.
Args:
q (torch.Tensor): Query tensor of shape (batch, heads, q_len, dim).
k (torch.Tensor): Key tensor of shape (batch, heads, k_len, dim).
v (torch.Tensor): Value tensor of shape (batch, heads, v_len, dim).
mask (torch.Tensor): Mask tensor of shape (batch, heads, q_len, k_len) (default: None).
attn_bias (torch.Tensor): Attention bias tensor of shape (batch, heads, q_len, k_len) (default: None).
Returns:
torch.Tensor: Output tensor of shape (batch, heads, q_len, dim).
"""
batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
if v.ndim == 3:
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
# handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
causal = self.causal
if exists(mask):
assert mask.ndim == 4
mask = mask.expand(batch, heads, q_len, k_len)
# manually handle causal mask, if another mask was given
if causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
mask = mask & ~causal_mask
causal = False
# handle alibi positional bias
# convert from bool to float
if exists(attn_bias):
attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, heads, -1, -1)
# if mask given, the mask would already contain the causal mask from above logic
# otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number
mask_value = -torch.finfo(q.dtype).max
if exists(mask):
attn_bias = attn_bias.masked_fill(~mask, mask_value // 2)
elif causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2)
causal = False
# scaled_dot_product_attention handles attn_mask either as bool or additive bias
# make it an additive bias here
mask = attn_bias
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = causal
)
return out
def forward(self, q, k, v, mask = None, attn_bias = None):
"""
Perform attention computation.
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
Args:
q (torch.Tensor): Query tensor of shape (batch, heads, q_len, dim).
k (torch.Tensor): Key tensor of shape (batch, heads, k_len, dim).
v (torch.Tensor): Value tensor of shape (batch, heads, v_len, dim).
mask (torch.Tensor): Mask tensor of shape (batch, heads, q_len, k_len) (default: None).
attn_bias (torch.Tensor): Attention bias tensor of shape (batch, heads, q_len, k_len) (default: None).
Returns:
torch.Tensor: Output tensor of shape (batch, heads, q_len, dim).
"""
q_len, k_len, device = q.shape[-2], k.shape[-2], q.device
scale = q.shape[-1] ** -0.5
kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d'
if self.flash:
return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias)
# similarity
sim = einsum(f"b h i d, {kv_einsum_eq} -> b h i j", q, k) * scale
# attention bias
if exists(attn_bias):
sim = sim + attn_bias
# causal mask
if self.causal:
causal_mask = self.get_mask(q_len, k_len, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum(f"b h i j, {kv_einsum_eq} -> b h i d", attn, v)
return out | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/attention/flash_attention.py | flash_attention.py |
from collections import namedtuple
from dataclasses import dataclass
from functools import partial, wraps
from typing import Optional
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from packaging import version
from torch import Tensor, einsum, nn
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
@dataclass
class Intermediates:
qk_similarities: Optional[Tensor] = None
pre_softmax_attn: Optional[Tensor] = None
post_softmax_attn: Optional[Tensor] = None
def to_tuple(self):
return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn)
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def compact(arr):
return [*filter(exists, arr)]
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# functions for creating causal mask
# need a special one for onnx cpu (no support for .triu)
def create_causal_mask(i, j, device):
return torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1)
def onnx_create_causal_mask(i, j, device):
r = torch.arange(i, device = device)
causal_mask = rearrange(r, 'i -> i 1') < rearrange(r, 'j -> 1 j')
causal_mask = F.pad(causal_mask, (j - i, 0), value = False)
return causal_mask
# main class
class Attend(nn.Module):
def __init__(
self,
*,
dropout = 0.,
causal = False,
heads = None,
talking_heads = False,
sparse_topk = None,
scale = None,
qk_norm = False,
flash = False,
add_zero_kv = False,
onnxable = False
):
super().__init__()
self.scale = scale
self.qk_norm = qk_norm
self.causal = causal
self.create_causal_mask = onnx_create_causal_mask if onnxable else create_causal_mask
self.attn_fn = partial(F.softmax, dtype = torch.float32) if not qk_norm else F.softmax
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
# talking heads
assert not (flash and talking_heads), 'talking heads not compatible with flash attention'
self.talking_heads = talking_heads
if talking_heads:
self.pre_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
self.post_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
# sparse topk
assert not (flash and sparse_topk), 'sparse topk not compatible with flash attention'
self.sparse_topk = sparse_topk
# add a key / value token composed of zeros
# in case this helps controlling outliers, proposed by https://www.evanmiller.org/attention-is-off-by-one.html
self.add_zero_kv = add_zero_kv
# flash attention
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = EfficientAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(False, True, True)
def flash_attn(
self,
q, k, v,
mask = None,
attn_bias = None
):
batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
if v.ndim == 3:
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
# handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention
if self.qk_norm:
default_scale = q.shape[-1] ** -0.5
q = q * (default_scale / self.scale)
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
causal = self.causal
if exists(mask):
assert mask.ndim == 4
mask = mask.expand(batch, heads, q_len, k_len)
# manually handle causal mask, if another mask was given
if causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
mask = mask & ~causal_mask
causal = False
# handle alibi positional bias
# convert from bool to float
if exists(attn_bias):
attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, heads, -1, -1)
# if mask given, the mask would already contain the causal mask from above logic
# otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number
mask_value = -torch.finfo(q.dtype).max
if exists(mask):
attn_bias = attn_bias.masked_fill(~mask, mask_value // 2)
elif causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2)
causal = False
# scaled_dot_product_attention handles attn_mask either as bool or additive bias
# make it an additive bias here
mask = attn_bias
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = causal
)
return out, Intermediates()
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, heads, kv_heads, device = q.shape[-2], q.shape[1], k.shape[1], q.device
scale = default(self.scale, q.shape[-1] ** -0.5)
# handle grouped multi-query attention
if kv_heads == 1:
k, v = map(lambda t: rearrange(t, 'b 1 n d -> b n d'), (k, v))
elif kv_heads < heads:
k, v = map(lambda t: repeat(t, 'b kvh n d -> b (r kvh) n d', r = heads // kv_heads), (k, v))
# handle zero kv, as means for allowing network to attend to nothing
if self.add_zero_kv:
k, v = map(lambda t: F.pad(t, (0, 0, 1, 0), value = 0.), (k, v))
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
if exists(attn_bias):
attn_bias = F.pad(attn_bias, (1, 0), value = 0.)
if self.flash:
assert not exists(prev_attn), 'residual attention not compatible with flash attention'
return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias)
kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d'
dots = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k) * scale
if exists(prev_attn):
dots = dots + prev_attn
qk_similarities = dots.clone()
if self.talking_heads:
dots = self.pre_softmax_talking_heads(dots)
if exists(attn_bias):
dots = dots + attn_bias
i, j, dtype = *dots.shape[-2:], dots.dtype
mask_value = -torch.finfo(dots.dtype).max
if exists(self.sparse_topk) and self.sparse_topk < j:
top_values, _ = dots.topk(self.sparse_topk, dim = -1)
sparse_topk_mask = dots < top_values[..., -1:]
mask = (mask & sparse_topk_mask) if exists(mask) else sparse_topk_mask
if exists(mask):
dots = dots.masked_fill(~mask, mask_value)
if self.causal:
causal_mask = self.create_causal_mask(i, j, device = device)
dots = dots.masked_fill(causal_mask, mask_value)
pre_softmax_attn = dots.clone()
attn = self.attn_fn(dots, dim = -1)
attn = attn.type(dtype)
post_softmax_attn = attn.clone()
attn = self.attn_dropout(attn)
if self.talking_heads:
attn = self.post_softmax_talking_heads(attn)
out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v)
intermediates = Intermediates(
qk_similarities = qk_similarities,
pre_softmax_attn = pre_softmax_attn,
post_softmax_attn = post_softmax_attn
)
return out, intermediates
# cascading heads logic
def to_single_heads(t, dim = 1):
heads = t.unbind(dim = dim)
return tuple(head.unsqueeze(dim) for head in heads)
class CascadingHeads(nn.Module):
def __init__(self, attend: Attend):
super().__init__()
self.attend = attend
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
assert q.shape[-1] == v.shape[-1], 'cascading heads can only be done if query / key and value head dimensions are the same'
# split inputs into per-head inputs
heads = q.shape[1]
queries = to_single_heads(q)
keys = to_single_heads(k) if k.ndim == 4 else ((k,) * heads)
values = to_single_heads(v) if v.ndim == 4 else ((v,) * heads)
mask = (mask,) * heads
attn_bias = to_single_heads(attn_bias, dim = 0) if exists(attn_bias) else ((None,) * heads)
prev_attn = to_single_heads(prev_attn) if exists(prev_attn) else ((None,) * heads)
# now loop through each head, without output of previous head summed with the next head
# thus cascading
all_outs = []
all_intermediates = []
prev_head_out = None
for h_q, h_k, h_v, h_mask, h_attn_bias, h_prev_attn in zip(queries, keys, values, mask, attn_bias, prev_attn):
if exists(prev_head_out):
h_q = h_q + prev_head_out
out, intermediates = self.attend(
h_q, h_k, h_v,
mask = h_mask,
attn_bias = h_attn_bias,
prev_attn = h_prev_attn
)
prev_head_out = out
all_outs.append(out)
all_intermediates.append(intermediates)
# cat all output heads
all_outs = torch.cat(all_outs, dim = 1)
# cat all intermediates, if they exist
qk_similarities, pre_softmax_attn, post_softmax_attn = zip(*map(lambda i: i.to_tuple(), all_intermediates))
qk_similarities, pre_softmax_attn, post_softmax_attn = map(compact, (qk_similarities, pre_softmax_attn, post_softmax_attn))
aggregated_intermediates = Intermediates(
qk_similarities = torch.cat(qk_similarities, dim = 1) if len(qk_similarities) > 0 else None,
pre_softmax_attn = torch.cat(pre_softmax_attn, dim = 1) if len(pre_softmax_attn) > 0 else None,
post_softmax_attn = torch.cat(post_softmax_attn, dim = 1) if len(post_softmax_attn) > 0 else None
)
return all_outs, aggregated_intermediates | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/attention/attend.py | attend.py |
import math
import warnings
from typing import Dict, Optional, Type
import torch
import torch.nn as nn
from einops import rearrange
from packaging import version
from zeta.nn.attention.base import BaseAttention
def _cast_if_autocast_enabled(tensor):
if torch.is_autocast_enabled():
if tensor.device.type == 'cuda':
dtype = torch.get_autocast_gpu_dtype()
elif tensor.device.type == 'cpu':
dtype = torch.get_autocast_cpu_dtype()
else:
raise NotImplementedError()
return tensor.to(dtype=dtype)
return tensor
class LPLayerNorm(nn.Module):
def __init__(
self,
normalized_shape,
eps=1e-05,
elementwise_affine=True,
device=None,
dtype=None,
):
super().__init__(
normalized_shape=normalized_shape,
eps=eps,
elementwise_affine=elementwise_affine,
device=device,
dtype=dtype
)
def forward(self, x):
module_device = x.device
downcast_x = _cast_if_autocast_enabled(x)
downcast_weight = _cast_if_autocast_enabled(
self.weight) if self.weight is not None else self.weight
downcast_bias = _cast_if_autocast_enabled(
self.bias) if self.bias is not None else self.bias
with torch.autocast(enabled=False, device_type=module_device.type):
return torch.nn.functional.layer_norm(
downcast_x,
self.normalized_shape,
downcast_weight,
downcast_bias,
self.eps,
)
def rms_norm(x, weight=None, eps=1e-5):
output = x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + eps)
if weight is not None:
return output * weight
return output
class RMSNorm(nn.Module):
def __init__(
self,
normalized_shape,
eps=1e-5,
weight=True,
dtype=None,
device=None,
):
super().__init__()
self.eps = eps
if weight:
self.weight = torch.nn.Parameter(
torch.ones(normalized_shape, dtype=dtype, device=device)
)
else:
self.register_parameter('weight', None)
def forward(self, x):
return rms_norm(x.float(), self.weight, self.eps).to(dtype=x.dtype)
class LPRMSNorm(RMSNorm):
def __init__(
self,
normalized_shape,
eps=1e-5,
weight=True,
dtype=None,
device=None,
):
super().__init__(
normalized_shape=normalized_shape,
eps=eps,
weight=weight,
dtype=dtype,
device=device,
)
def forward(self, x):
downcast_x = _cast_if_autocast_enabled(x)
downcast_weight = _cast_if_autocast_enabled(
self.weight) if self.weight is not None else self.weight
with torch.autocast(enabled=False, device_type=x.device_type):
return rms_norm(downcast_x, downcast_weight,
self.eps).to(dtype=x.dtype)
#Registers
FC_CLASS_REGISTRY = {
'torch': nn.Linear,
}
NORM_CLASS_REGISTRY = {
'layernornm': nn.LayerNorm,
'low_precision_layernorm': LPLayerNorm,
'rmsnorm': LPLayerNorm,
'low_precision_rmsnorm': LPRMSNorm,
}
def _reset_causal(num_query_tokens: int, num_key_tokens: int,
original_causal: bool):
# disable causal when it is not needed
# necessary for flash & triton for generation with kv_cache
if original_causal and num_query_tokens != num_key_tokens:
if num_query_tokens != 1:
raise NotImplementedError(
'MPT does not support query and key with different number of tokens, unless number of query tokens is 1.'
)
else:
return False
return original_causal
def scaled_multihead_dot_product_attention(
query,
key,
value,
heads,
past_key_value=None,
softmax_scale=None,
bias=None,
key_padding_mask=None,
causal=False,
dropout=0.0,
training=False,
needs_weights=False,
multiquery=False,
):
q = rearrange(query, 'b s (h d) -> b h s d', h=heads)
kv_heads = 1 if multiquery else heads
k = rearrange(key, 'b s (h d) -> b h d s', h=kv_heads)
v = rearrange(value, 'b s (h d) -> b h s d', h=kv_heads)
if past_key_value is not None:
# attn_impl: flash & triton use kernels which expect input shape [b, s, h, d_head].
# kv_cache is therefore stored using that shape.
# attn_impl: torch stores the kv_cache in the ordering which is most advantageous
# for its attn computation ie
# keys are stored as tensors with shape [b, h, d_head, s] and
# values are stored as tensors with shape [b, h, s, d_head]
if len(past_key_value) != 0:
k = torch.cat([past_key_value[0], k], dim=3)
v = torch.cat([past_key_value[1], v], dim=2)
past_key_value = (k, v)
b, _, s_q, d = q.shape
s_k = k.size(-1)
if softmax_scale is None:
softmax_scale = 1 / math.sqrt(d)
attn_weight = q.matmul(k) * softmax_scale
if bias is not None:
# clamp to 0 necessary for torch 2.0 compile()
_s_q = max(0, bias.size(2) - s_q)
_s_k = max(0, bias.size(3) - s_k)
bias = bias[:, :, _s_q:, _s_k:]
if (bias.size(-1) != 1 and
bias.size(-1) != s_k) or (bias.size(-2) != 1 and
bias.size(-2) != s_q):
raise RuntimeError(
f'bias (shape: {bias.shape}) is expected to broadcast to shape: {attn_weight.shape}.'
)
attn_weight = attn_weight + bias
min_val = torch.finfo(q.dtype).min
if key_padding_mask is not None:
if bias is not None:
warnings.warn(
'Propogating key_padding_mask to the attention module ' +\
'and applying it within the attention module can cause ' +\
'unneccessary computation/memory usage. Consider integrating ' +\
'into bias once and passing that to each attention ' +\
'module instead.'
)
attn_weight = attn_weight.masked_fill(
~key_padding_mask.view((b, 1, 1, s_k)), min_val)
if causal and (not q.size(2) == 1):
s = max(s_q, s_k)
causal_mask = attn_weight.new_ones(s, s, dtype=torch.float32)
causal_mask = causal_mask.tril()
causal_mask = causal_mask.to(torch.bool)
causal_mask = ~causal_mask
causal_mask = causal_mask[-s_q:, -s_k:]
attn_weight = attn_weight.masked_fill(causal_mask.view(1, 1, s_q, s_k),
min_val)
attn_weight = torch.softmax(attn_weight, dim=-1)
if dropout:
attn_weight = torch.nn.functional.dropout(attn_weight,
p=dropout,
training=training,
inplace=True)
out = attn_weight.to(v.dtype).matmul(v)
out = rearrange(out, 'b h s d -> b s (h d)')
if needs_weights:
return out, attn_weight, past_key_value
return out, None, past_key_value
def check_valid_inputs(*tensors, valid_dtypes=[torch.float16, torch.bfloat16]):
for tensor in tensors:
if tensor.dtype not in valid_dtypes:
raise TypeError(f'{tensor.dtype=} must be in {valid_dtypes=}.')
if not tensor.is_cuda:
raise TypeError(f'Inputs must be cuda tensors ({tensor.is_cuda=}).')
def flash_attn_fn(
query,
key,
value,
heads,
past_key_value=None,
softmax_scale=None,
bias=None,
key_padding_mask=None,
causal=False,
dropout=0.0,
training=False,
needs_weights=False,
multiquery=False,
):
try:
from flash_attn import bert_padding, flash_attn_interface # type: ignore # yapf: disable # isort: skip
except:
raise RuntimeError('Please install flash-attn==1.0.3.post0')
check_valid_inputs(query, key, value)
if past_key_value is not None:
if len(past_key_value) != 0:
key = torch.cat([past_key_value[0], key], dim=1)
value = torch.cat([past_key_value[1], value], dim=1)
past_key_value = (key, value)
if bias is not None:
# clamp to 0 necessary for torch 2.0 compile()
_s_q = max(0, bias.size(2) - query.size(1))
_s_k = max(0, bias.size(3) - key.size(1))
bias = bias[:, :, _s_q:, _s_k:]
if bias is not None:
raise NotImplementedError('bias not implemented for flash attn.')
batch_size, seqlen = query.shape[:2]
if key_padding_mask is None:
key_padding_mask = torch.ones_like(key[:, :, 0], dtype=torch.bool)
query_padding_mask = key_padding_mask[:, -query.size(1):]
query_unpad, indices_q, cu_seqlens_q, max_seqlen_q = bert_padding.unpad_input(
query, query_padding_mask)
query_unpad = rearrange(query_unpad, 'nnz (h d) -> nnz h d', h=heads)
key_unpad, _, cu_seqlens_k, max_seqlen_k = bert_padding.unpad_input(
key, key_padding_mask)
key_unpad = rearrange(key_unpad,
'nnz (h d) -> nnz h d',
h=1 if multiquery else heads)
value_unpad, _, _, _ = bert_padding.unpad_input(value, key_padding_mask)
value_unpad = rearrange(value_unpad,
'nnz (h d) -> nnz h d',
h=1 if multiquery else heads)
if multiquery:
key_unpad = key_unpad.expand(key_unpad.size(0), heads,
key_unpad.size(-1))
value_unpad = value_unpad.expand(value_unpad.size(0), heads,
value_unpad.size(-1))
dropout = dropout if training else 0.0
reset_causal = _reset_causal(query.size(1), key.size(1), causal)
output_unpad = flash_attn_interface.flash_attn_unpadded_func(
query_unpad,
key_unpad,
value_unpad,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout,
softmax_scale=softmax_scale,
causal=reset_causal,
return_attn_probs=needs_weights)
output = bert_padding.pad_input(
rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size,
seqlen)
return output, None, past_key_value
def attn_bias_shape(attn_impl, heads, seq_len, alibi, prefix_lm, causal,
use_sequence_id):
if attn_impl == 'flash':
return None
elif attn_impl in ['torch', 'triton']:
if alibi:
if (prefix_lm or not causal) or use_sequence_id:
return (1, heads, seq_len, seq_len)
return (1, heads, 1, seq_len)
elif prefix_lm or use_sequence_id:
return (1, 1, seq_len, seq_len)
return None
else:
raise ValueError(f'{attn_impl=} is an invalid setting.')
def build_attn_bias(
attn_impl,
bias,
heads,
seq_len,
causal=False,
alibi=False,
alibi_bias_max=8,
):
if attn_impl == 'flash':
return None
elif attn_impl in ['torch', 'triton']:
if alibi:
# in place add alibi to attn bias
device, dtype = bias.device, bias.dtype
bias = bias.add(
build_alibi_bias(
heads,
seq_len,
full=not causal,
alibi_bias_max=alibi_bias_max,
device=device,
dtype=dtype,
))
return bias
else:
raise ValueError(f'{attn_impl=} is an invalid setting.')
#helper helpers
def gen_slopes(heads, alibi_bias_max=8, device=None):
_heads = 2**math.ceil(math.log2(heads))
m = torch.arange(1, _heads + 1, dtype=torch.float32, device=device)
m = m.mul(alibi_bias_max / _heads)
slopes = (1. / torch.pow(2, m))
if _heads != heads:
# if heads is not a power of two,
# Huggingface and FasterTransformer calculate slopes normally,
# then return this strided concatenation of slopes
slopes = torch.concat([slopes[1::2], slopes[::2]])[:heads]
return slopes.view(1, heads, 1, 1)
def build_alibi_bias(
heads,
seq_len,
full=False,
alibi_bias_max=8,
device=None,
dtype=None,
):
alibi_bias = torch.arange(1 - seq_len, 1, dtype=torch.int32,
device=device).view(1, 1, 1, seq_len)
if full:
# generate 1 x Heads x SeqLen x SeqLen alibi bias mask
# otherwise the mask is 1 x Heads x 1 x SeqLen (which is broadcast to the appropriate size)
alibi_bias = alibi_bias - torch.arange(
1 - seq_len, 1, dtype=torch.int32, device=device).view(
1, 1, seq_len, 1)
alibi_bias = alibi_bias.abs().mul(-1)
slopes = gen_slopes(heads, alibi_bias_max, device=device)
alibi_bias = alibi_bias * slopes
return alibi_bias.to(dtype=dtype)
def triton_flash_attn_fn(
query,
key,
value,
heads,
past_key_value=None,
softmax_scale=None,
bias=None,
key_padding_mask=None,
causal=False,
dropout=0.0,
training=False,
needs_weights=False,
multiquery=False,
):
try:
from llmfoundry.models.layers.flash_attn_triton import flash_attn_func
except:
_installed = False
if version.parse(torch.__version__) < version.parse('2.0.0'):
_installed = True
# if torch1.13.1 revert to using triton flash attn from HazyResearch
# with flash-attn==1.0.3.post0 and triton==2.0.0.dev20221202
try:
from flash_attn.flash_attn_triton import flash_attn_func
except:
_installed = False
if not _installed:
# installing triton-pre-mlir works for both torch1.13.1 and torch2.0+
# default recommendation is to install this variant
raise RuntimeError(
'Requirements for `attn_impl: triton` not installed. Either (1) have a CUDA-compatible GPU '
'and `pip install .[gpu]` if installing from source or '
'`pip install triton-pre-mlir@git+https://github.com/vchiley/triton.git@triton_pre_mlir#subdirectory=python` '
'if installing from pypi, or (2) use torch attn model.attn_config.attn_impl=torch (torch attn_impl will be slow). '
'Note: (1) requires you have CMake and PyTorch already installed.'
)
check_valid_inputs(query, key, value)
if past_key_value is not None:
if len(past_key_value) != 0:
key = torch.cat([past_key_value[0], key], dim=1)
value = torch.cat([past_key_value[1], value], dim=1)
past_key_value = (key, value)
if bias is not None:
# clamp to 0 necessary for torch 2.0 compile()
_s_q = max(0, bias.size(2) - query.size(1))
_s_k = max(0, bias.size(3) - key.size(1))
bias = bias[:, :, _s_q:, _s_k:]
if dropout:
raise NotImplementedError(
'Dropout not implemented for attn_impl: triton.')
if needs_weights:
raise NotImplementedError(
'attn_impl: triton cannot return attn weights.')
if key_padding_mask is not None:
warnings.warn(
'Propagating key_padding_mask to the attention module ' +\
'and applying it within the attention module can cause ' +\
'unnecessary computation/memory usage. Consider integrating ' +\
'into bias once and passing that to each attention ' +\
'module instead.'
)
b_size, s_k = key_padding_mask.shape[:2]
if bias is None:
bias = query.new_zeros(b_size, 1, 1, s_k)
bias = bias.masked_fill(
~key_padding_mask.view((b_size, 1, 1, s_k)),
torch.finfo(query.dtype).min)
query = rearrange(query, 'b s (h d) -> b s h d', h=heads)
key = rearrange(key, 'b s (h d) -> b s h d', h=1 if multiquery else heads)
value = rearrange(value,
'b s (h d) -> b s h d',
h=1 if multiquery else heads)
if multiquery:
# necessary to repeat instead of expand tensor because
# output contains NaN in edge cases such as with head dimension = 8
key = key.repeat(1, 1, heads, 1)
value = value.repeat(1, 1, heads, 1)
reset_causal = _reset_causal(query.size(1), key.size(1), causal)
attn_output = flash_attn_func(query, key, value, bias, reset_causal,
softmax_scale)
output = attn_output.view(*attn_output.shape[:2], -1)
return output, None, past_key_value
class MultiHeadAttention(nn.Module):
"""Multi-head self attention.
Using torch or triton attention implemetation enables user to also use
additive bias.
"""
def __init__(
self,
d_model: int,
heads: int,
attn_impl: str = 'triton',
clip_qkv: Optional[float] = None,
qk_ln: bool = False,
softmax_scale: Optional[float] = None,
attn_pdrop: float = 0.0,
norm_type: str = 'low_precision_layernorm',
fc_type: str = 'torch',
verbose: int = 0,
device: Optional[str] = None,
):
super().__init__()
self.attn_impl = attn_impl
self.clip_qkv = clip_qkv
self.qk_ln = qk_ln
self.d_model = d_model
self.heads = heads
self.softmax_scale = softmax_scale
if self.softmax_scale is None:
self.softmax_scale = 1 / math.sqrt(self.d_model / self.heads)
self.attn_dropout = attn_pdrop
fc_kwargs = {}
if fc_type != 'te':
fc_kwargs['device'] = device
self.Wqkv = FC_CLASS_REGISTRY[fc_type](
self.d_model,
3 * self.d_model,
**fc_kwargs,
)
# for param init fn; enables shape based init of fused layers
fuse_splits = (d_model, 2 * d_model)
self.Wqkv._fused = (0, fuse_splits) # type: ignore
if self.qk_ln:
norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]
self.q_ln = norm_class(self.d_model, device=device)
self.k_ln = norm_class(self.d_model, device=device)
if self.attn_impl == 'flash':
self.attn_fn = flash_attn_fn
elif self.attn_impl == 'triton':
self.attn_fn = triton_flash_attn_fn
if verbose:
warnings.warn(
'While `attn_impl: triton` can be faster than `attn_impl: flash` ' +\
'it uses more memory. When training larger models this can trigger ' +\
'alloc retries which hurts performance. If encountered, we recommend ' +\
'using `attn_impl: flash` if your model does not use `alibi` or `prefix_lm`.'
)
elif self.attn_impl == 'torch':
self.attn_fn = scaled_multihead_dot_product_attention
if torch.cuda.is_available() and verbose:
warnings.warn(
'Using `attn_impl: torch`. If your model does not use `alibi` or ' +\
'`prefix_lm` we recommend using `attn_impl: flash` otherwise ' +\
'we recommend using `attn_impl: triton`.'
)
else:
raise ValueError(f'{attn_impl=} is an invalid setting.')
self.out_proj = FC_CLASS_REGISTRY[fc_type](
self.d_model,
self.d_model,
**fc_kwargs,
)
self.out_proj._is_residual = True # type: ignore
def forward(
self,
x,
past_key_value=None,
bias=None,
mask=None,
causal=True,
needs_weights=False,
):
qkv = self.Wqkv(x)
if self.clip_qkv:
qkv = qkv.clamp(min=-self.clip_qkv, max=self.clip_qkv)
query, key, value = qkv.chunk(3, dim=2)
key_padding_mask = mask
if self.qk_ln:
# Applying layernorm to qk
dtype = query.dtype
query = self.q_ln(query).to(dtype)
key = self.k_ln(key).to(dtype)
context, attn_weights, past_key_value = self.attn_fn(
query,
key,
value,
self.heads,
past_key_value=past_key_value,
softmax_scale=self.softmax_scale,
bias=bias,
key_padding_mask=key_padding_mask,
causal=causal,
dropout=self.attn_dropout,
training=self.training,
needs_weights=needs_weights,
)
return self.out_proj(context), attn_weights, past_key_value
class MultiQueryAttention(BaseAttention):
"""Multi-Query self attention.
Using torch or triton attention implemetation enables user to also use
additive bias.
Look for documentation
"""
def __init__(
self,
d_model: int,
heads: int,
attn_impl: str = 'torch',
clip_qkv: Optional[float] = None,
qk_ln: bool = False,
softmax_scale: Optional[float] = None,
attn_pdrop: float = 0.0,
norm_type: str = 'low_precision_layernorm',
fc_type: str = 'torch',
verbose: int = 0,
device: Optional[str] = None,
):
super().__init__()
self.attn_impl = attn_impl
self.clip_qkv = clip_qkv
self.qk_ln = qk_ln
self.d_model = d_model
self.heads = heads
self.head_dim = d_model // heads
self.softmax_scale = softmax_scale
if self.softmax_scale is None:
self.softmax_scale = 1 / math.sqrt(self.head_dim)
self.attn_dropout = attn_pdrop
fc_kwargs = {}
if fc_type != 'te':
fc_kwargs['device'] = device
# - vchiley
self.Wqkv = FC_CLASS_REGISTRY[fc_type](
d_model,
d_model + 2 * self.head_dim,
**fc_kwargs,
)
# for param init fn; enables shape based init of fused layers
fuse_splits = (d_model, d_model + self.head_dim)
self.Wqkv._fused = (0, fuse_splits) # type: ignore
if self.qk_ln:
norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]
self.q_ln = norm_class(d_model, device=device)
self.k_ln = norm_class(self.head_dim, device=device)
if self.attn_impl == 'flash':
self.attn_fn = flash_attn_fn
elif self.attn_impl == 'triton':
self.attn_fn = triton_flash_attn_fn
if verbose:
warnings.warn(
'While `attn_impl: triton` can be faster than `attn_impl: flash` ' +\
'it uses more memory. When training larger models this can trigger ' +\
'alloc retries which hurts performance. If encountered, we recommend ' +\
'using `attn_impl: flash` if your model does not use `alibi` or `prefix_lm`.'
)
elif self.attn_impl == 'torch':
self.attn_fn = scaled_multihead_dot_product_attention
if torch.cuda.is_available() and verbose:
warnings.warn(
'Using `attn_impl: torch`. If your model does not use `alibi` or ' +\
'`prefix_lm` we recommend using `attn_impl: flash` otherwise ' +\
'we recommend using `attn_impl: triton`.'
)
else:
raise ValueError(f'{attn_impl=} is an invalid setting.')
self.out_proj = FC_CLASS_REGISTRY[fc_type](
self.d_model,
self.d_model,
**fc_kwargs,
)
self.out_proj._is_residual = True # type: ignore
def forward(
self,
x,
past_key_value=None,
bias=None,
mask=None,
causal=True,
needs_weights=False,
):
qkv = self.Wqkv(x)
if self.clip_qkv:
qkv = qkv.clamp(min=-self.clip_qkv, max=self.clip_qkv)
query, key, value = qkv.split(
[self.d_model, self.head_dim, self.head_dim], dim=2)
key_padding_mask = mask
if self.qk_ln:
# Applying layernorm to qk
dtype = query.dtype
query = self.q_ln(query).to(dtype)
key = self.k_ln(key).to(dtype)
context, attn_weights, past_key_value = self.attn_fn(
query,
key,
value,
self.heads,
past_key_value=past_key_value,
softmax_scale=self.softmax_scale,
bias=bias,
key_padding_mask=key_padding_mask,
causal=causal,
dropout=self.attn_dropout,
training=self.training,
needs_weights=needs_weights,
multiquery=True,
)
return self.out_proj(context), attn_weights, past_key_value | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/attention/multiquery_attention.py | multiquery_attention.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from abc import ABC, abstractmethod
import bitsandbytes as bnb
from zeta.nn.utils.tensor_helpers import l2norm
from zeta.nn.utils.helpers import exists
class BaseEmbedding(ABC):
@abstractmethod
def forward(self, num_tokens: int, dim: int) -> nn.Module:
#custom embedding function
embedding = ...
return embedding
#Other embedding
class AndromedaEmbedding(BaseEmbedding):
def forward(self, num_tokens: int, dim: int) -> nn.Module:
embedding = nn.Embedding(num_tokens, dim)
return embedding
class AndromedaBnBEmbedding(BaseEmbedding):
def forward(self, num_tokens: int, dim: int, padding_idx) -> bnb.nn.modules:
embedding = bnb.nn.modules.Embedding(num_tokens, dim, padding_idx)
return embedding
class TextEmbedding(nn.Embedding):
def reset_parameters(self):
nn.init.normal_(self.weight, mean=0, std=self.embedding_dim**-0.5)
self._fill_padding_idx_with_zero()
class PositionalEmbedding(nn.Embedding):
def forward(
self,
x,
positions=None,
**kwargs,
):
if positions is None:
# being consistent with Fairseq, which starts from 2.
positions = (
torch.arange(2, x.size(1) + 2, device=x.device).long().unsqueeze(0)
)
return F.embedding(
positions,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len, l2norm_embed=False):
super().__init__()
self.scale = dim ** -0.5 if not l2norm_embed else 1.
self.max_seq_len = max_seq_len
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x, pos=None):
seq_len, device = x.shape[-1], x.device
assert seq_len <= self.max_seq_len, f"You are passing in a sequence length of {seq_len} but you absolute positional embedding has a max of length of {self.max_seq_len}"
if not exists(pos):
pos = torch.arange(seq_len, device=device)
pos_emb = self.emb(pos)
pos_emb = pos_emb * self.scale
return l2norm(pos_emb) if self.l2norm_embed else pos_emb
class VisionLanguageEmbedding(nn.Module):
def __init__(self, text_embed, vision_embed):
super().__init__()
self.text_embed = text_embed
self.vision_embed = vision_embed
def forward(self, textual_tokens, visual_tokens, **kwargs):
if textual_tokens is None:
return self.vision_embed(visual_tokens)
if visual_tokens is None:
return self.text_embed(textual_tokens)
x1 = self.vision_embed(visual_tokens)
x2 = self.text_embed(textual_tokens)
return torch.cat([x1, x2], dim=1)
class VisionEmbedding(nn.Module):
"""Image to Patch Embedding"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
contain_mask_token=False,
prepend_cls_token=False,
):
super().__init__()
img_size = (img_size, img_size)
patch_size = (patch_size, patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
if contain_mask_token:
self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.mask_token = None
if prepend_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.cls_token = None
def num_position_embeddings(self):
if self.cls_token is None:
return self.num_patches
else:
return self.num_patches + 1
def forward(self, x, masked_position=None, **kwargs):
B, C, H, W = x.shape
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
batch_size, seq_len, _ = x.size()
if masked_position is not None:
assert self.mask_token is not None
mask_token = self.mask_token.expand(batch_size, seq_len, -1)
w = masked_position.unsqueeze(-1).type_as(mask_token)
x = x * (1 - w) + mask_token * w
if self.cls_token is not None:
cls_tokens = self.cls_token.expand(
batch_size, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
return x | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/embeddings/embedding.py | embedding.py |
import torch
import torch.nn as nn
def fixed_pos_embedding(x):
"""
Generates fixed positional embeddings for the input tensor.
Args:
- x: Input tensor of shape (seq_len, dim)
Returns:
- sin: Sine positional embeddings of shape (seq_len, dim)
- cos: Cosine positional embeddings of shape (seq_len, dim)
"""
seq_len, dim = x.shape
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim) / dim))
sinusoid_inp = (
torch.einsum("i , j -> i j", torch.arange(0, seq_len, dtype=torch.float), inv_freq).to(x)
)
return torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)
def rotate_every_two(x):
"""
Rearranges the elements of the input tensor by rotating every two elements.
Args:
- x: Input tensor of shape (batch_size, seq_len, dim)
Returns:
- x: Rearranged tensor of shape (batch_size, seq_len, dim)
"""
x1 = x[:, :, ::2]
x2 = x[:, :, 1::2]
x = torch.stack((-x2, x1), dim=-1)
return x.flatten(-2)
def duplicate_interleave(m):
"""
Duplicates a matrix while interleaving the copy.
Args:
- m: Input matrix
Returns:
- m: Duplicated and interleaved matrix
"""
dim0 = m.shape[0]
m = m.view(-1, 1)
m = m.repeat(1, 2)
m = m.view(dim0, -1)
return m
def apply_rotary_pos_emb(x, sin, cos, scale=1):
"""
Applies rotary positional embeddings to the input tensor.
Args:
- x: Input tensor of shape (batch_size, seq_len, dim)
- sin: Sine positional embeddings of shape (seq_len, dim)
- cos: Cosine positional embeddings of shape (seq_len, dim)
- scale: Scaling factor for the positional embeddings
Returns:
- x: Tensor with applied rotary positional embeddings
"""
sin, cos = map(lambda t: duplicate_interleave(t * scale), (sin, cos))
return (x * cos) + (rotate_every_two(x) * sin)
class XPOS(nn.Module):
def __init__(
self,
head_dim: int = None,
scale_base: int = 512
):
super().__init__()
self.head_dim = head_dim
self.scale_base = scale_base
self.register_buffer(
"scale", (torch.arange(0, head_dim, 2) + 0.4 * head_dim) / (1.4 * head_dim)
)
def forward(self,
x,
offset=0,
downscale=False):
"""
Forward pass of the XPOS module.
Args:
- x: Input tensor of shape (batch_size, seq_len, dim)
- offset: Offset value for positional embeddings
- downscale: Boolean indicating whether to downscale the positional embeddings
Returns:
- x: Tensor with applied rotary positional embeddings
"""
length = x.shape[1]
min_pos = -(length + offset) // 2
max_pos = length + offset + min_pos
scale = self.scale ** torch.arange(min_pos, max_pos, 1).to(self.scale).div(self.scale_base)[:, None]
sin, cos = fixed_pos_embedding(scale)
if scale.shape[0] > length:
scale = scale[-length:]
sin = sin[-length:]
cos = cos[-length:]
if downscale:
scale = 1 / scale
x = apply_rotary_pos_emb(x, sin, cos, scale)
return x | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/embeddings/xpos_relative_position.py | xpos_relative_position.py |
import math
import torch
import torch.nn as nn
from zeta.nn.biases.base import BaseBias
class RelativePositionBias(BaseBias):
def __init__(
self,
bidirectional: int = True,
num_buckets: int =32,
max_distance: int = 128,
num_heads: int = 1
):
super().__init__()
self.bidirectional = bidirectional
self.num_buckets = num_buckets
self.max_distance = max_distance
self.num_heads = num_heads
self.relative_attention_bias = nn.Embedding(self.num_buckets, self.num_heads)
@staticmethod
def _relative_position_bucket(
relative_position, bidirectional=True, num_buckets=32, max_distance=128
):
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += (n < 0).to(torch.long) * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(
val_if_large, torch.full_like(val_if_large, num_buckets - 1)
)
ret += torch.where(is_small, n, val_if_large)
return ret
def compute_bias(self, qlen, klen, step=None):
step = 0 if step is None else step
context_position = torch.arange(
step,
step + qlen,
dtype=torch.long,
device=self.relative_attention_bias.weight.device,
)[:, None]
memory_position = torch.arange(
klen, dtype=torch.long, device=self.relative_attention_bias.weight.device
)[None, :]
relative_position = memory_position - context_position # shape (qlen, klen)
rp_bucket = self._relative_position_bucket(
relative_position, # shape (qlen, klen)
bidirectional=self.bidirectional,
num_buckets=self.num_buckets,
max_distance=self.max_distance,
)
rp_bucket = rp_bucket.to(self.relative_attention_bias.weight.device)
values = self.relative_attention_bias(
rp_bucket
) # shape (qlen, klen, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(
0
) # shape (1, num_heads, qlen, klen)
return values
def forward(self, batch_size, qlen, klen, step=None):
# shape (batch * num_heads, qlen, klen)
return (
self.compute_bias(qlen, klen, step)
.repeat(batch_size, 1, 1, 1)
.view(-1, qlen, klen)
) | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/biases/relative_position_bias.py | relative_position_bias.py |
import math
import torch
from torch import nn, Tensor
import torch.nn.functional as F
from zeta.nn.biases.base import BaseBias
from einops import rearrange
######## Helpers
def exists(val):
return val is not None
def pad_at_dim(t, pad, dim=-1, value=0.):
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
zeros = ((0, 0) * dims_from_right)
return F.pad(t, (*zeros, *pad), value=value)
class AlibiPositionalBias(BaseBias):
def __init__(self, heads, num_heads, **kwargs):
super().__init__()
self.heads = heads
self.num_heads = num_heads
slopes = Tensor(self.__get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.register_buffer('slopes', slopes, persistent=False)
self.register_buffer('bias', None, persistent=False)
def get_bias(self, i, j, device):
torch.arange(j - i, j, device=device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if math.log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** math.floor(math.log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
@property
def device(self):
return next(self.buffers()).device
def forward(self, i, j):
h, device = self.num_heads, self.device
if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i:
return self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self._get_slopes
num_heads_unalibied = h - bias.shape[0]
bias = pad_at_dim(bias, (0, num_heads_unalibied), dim=0)
self.register_buffer('bias', bias, persistent=False)
return self.bias
class LearnedAlibiPositionalBias(AlibiPositionalBias):
def __init__(self, heads, num_heads):
super().__init__(heads, num_heads)
log_slopes = torch.log(self.slopes)
self.learned_logslopes = nn.Parameter(log_slopes)
def forward(self, i, j):
h, device = self.heads, self.device
def get_slopes(param):
return pad_at_dim(param.exp(), (0, h - param.shape[0]), dim=-2)
if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i:
bias = self.bias[..., :i, :j]
else:
bias = self.get_bias(i, j, device)
self.register_buffer('bias', bias, persistent=False)
slopes = get_slopes(self.learned_logslopes)
bias = bias * slopes
return bias | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/biases/alibi.py | alibi.py |
import numpy as np
import math
import torch
import einops
import torch.nn as nn
import torch.functional as F
from einops import rearrange
from typing import Callable, List, Optional, Tuple
####
def max_neg_values(tensor):
return -torch.info(tensor.dtype).max
def l2norm(t, groups=1):
t = rearrange(t, '... (g d) -> ... g d', g=groups)
t = F.normalize(t, p=2, dim=-1)
return rearrange(t, '... g d -> ... (g d)')
def pad_at_dim(t, pad, dim=-1, value=0.):
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
zeros = ((0, 0) * dims_from_right)
return F.pad(t, (*zeros, *pad), value=value)
def or_reduce(masks):
head, *body = masks
for rest in body:
head = head | rest
return head
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device)* -emb)
emb = x[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
def upsample(dim):
return nn.ConvTranspose3d(dim, dim, (1, 4, 4), (1, 2, 2), (0, 1, 1))
def downsample(dim):
return nn.Conv3d(dim, dim, (1, 4, 4), (1, 2, 2), (0, 1, 1))
class LayerNorm(nn.Module):
def __init__(self,
dim,
eps=1e-5):
super().__init__()
self.eps = eps
self.gamma = nn.Parameter(torch.ones(1, dim, 1, 1, 1))
def forward(self, x):
var = torch.vart(x, dim=1, unbiased=False, keepdim=True)
mean = torch.mean(x, dim=1, keepdim=True)
return (x - mean) / (var + self.eps).sqrt() * self.gamma
class PreNorm(nn.Module):
def __init__(self,
dim,
fn):
self.fn = fn
self.norm = LayerNorm(dim)
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
def cosine_beta_schedule(timesteps, s=0.008):
steps = timesteps + 1
x = torch.linspace(0,
timesteps,
steps,
dtype=torch.float64)
alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * torch.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.9999)
class Normalize(nn.Module):
def __init__(self, dim: int) -> None:
super().__init__()
self.dim = dim
def forward(self, x):
return torch.nn.functional.normalize(x, dim=self.dim, p=2)
class LearnableLogitScaling(nn.Module):
def __init__(
self,
logit_scale_init: float = 1 / 0.07,
learnable: bool = True,
max_logit_scale: float = 100,
) -> None:
super().__init__()
self.max_logit_scale = max_logit_scale
self.logit_scale_init = logit_scale_init
self.learnable = learnable
log_logit_scale = torch.ones([]) * np.log(self.logit_scale_init)
if learnable:
self.log_logit_scale = nn.Parameter(log_logit_scale)
else:
self.register_bufffer("log_logit_scale", log_logit_scale)
def forward(self, x):
return torch.clip(self.logit_scale.exp(),
max=self.max_logit_scale) * x
def extra_repr(self):
st = f"logit_scale_init={self.logit_scale_init}, learnable={self.learnable}," \
f"max_logit_scale={self.max_logit_scale}"
return st
class EinOpsRearrange(nn.Module):
def __init__(self, rearrange_expr: str,
**kwargs) -> None:
super().__init__()
self.rearrange_expr = rearrange_expr
self.kwargs = kwargs
def forward(self, x):
assert isinstance(x, torch.Tensor)
return einops.rearrange(x, self.rearrange_expr, **self.kwargs)
def cast_if_src_dtype(
tensor: torch.Tensor,
src_dtype: torch.dtype,
tgt_dtype: torch.dtype
):
updated = False
if tensor.dtype == src_dtype:
tensor = tensor.to(dtype=tgt_dtype)
updated = True
return tensor, updated
class SelectElements(nn.Module):
def __init__(self,
index) -> None:
super().__init__()
self.index = index
def forward(self, x):
assert x.ndim >= 3
return x[:, self.index, ...]
class SelectEOSAndProject(nn.Module):
def __init__(self, proj: nn.Module) -> None:
super().__init__()
self.proj = proj
def forward(self, x, seq_len):
assert x.ndim == 3
x = x[torch.arange(x.shape[0]), seq_len]
x = self.proj(x)
return x
##################
def get_sinusoid_encoding_table(n_position, d_hid):
def get_position_angle_vec(position):
return [
position / np.power(10000, 2 * (hid_j // 2) / d_hid)
for hid_j in range(d_hid)
]
sinusoid_table = np.array(
[get_position_angle_vec(pos_i) for pos_i in range(n_position)]
)
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) #dim 21
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2])
return torch.FloatTensor(sinusoid_table).unsqueeze(0)
def interpolate_pos_encoding_2d(target_spatial_size, pos_embed):
N = pos_embed.shape[1]
if N == target_spatial_size:
return pos_embed
dim = pos_embed.shape[-1]
pos_embed, updated = cast_if_src_dtype(pos_embed, torch.bfloat16, torch.float32)
pos_embed = nn.functional.interpolate(
pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(
0, 3, 1, 2
),
scale_factor = math.sqrt(target_spatial_size / N),
mode="bicubic",
)
if updated:
pos_embed, _ = cast_if_src_dtype(pos_embed, torch.float32, torch.bfloat16)
pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return pos_embed | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/utils/tensor_helpers.py | tensor_helpers.py |
from math import ceil
import torch
import torch.functional as F
from torch import nn
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float("-inf")
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres=0.9):
k = ceil((1 - thres) * logits.shape[-1])
val, ind, = torch.topk(logits, k)
probs = torch.full_like(logits, float("-inf"))
probs.scatter_(1, ind, val)
return probs
def top_a(
logits,
min_p_pow=2.0,
min_p_ratio=0.02
):
probs = F.softmax(logits, dim=-1)
limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio
logits[probs < limit] = float("-inf")
logits[probs >= limit] = 1
return logits
def log(t, eps=1e-20):
return torch.log(t.clamp(min=eps))
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumnel_sample(t, temperature=1., dim=-1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim=dim)
class ContrastiveTopK(nn.Module):
def __init__(self,
alpha,
k):
super(ContrastiveTopK, self).__init__()
self.alpha = alpha
self.k = k
def top_k(self, logits):
k = ceil((1 - self.alpha) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
def forward(self,
logits_exp,
logits_ama):
logits_exp_topk = self.top_k(logits_exp)
logits_ama_topk = self.top_k(logits_ama)
#probabilities
p_exp = F.softmax(logits_exp_topk, dim=-1)
p_ama = F.softmax(logits_ama_topk, dim=-1)
#mask
_, ind = torch.topk(p_exp, self.k)
mask = torch.zeros_like(p_exp)
mask.scatter_(1, ind, p_exp[ind] >= self.alpha * p_exp[ind[-1]])
#scores
scores = torch.where(mask.bool(), torch.log(p_exp / (p_ama + 1e-8)),
torch.tensor(-float('inf')))
return scores
#alpha = 0.5
#k = 10
# cdk = ContrastiveTopK(alpha, k)
#logits_exp = torch.randn(100, 50)
#logits_ama = torch.randn(100, 50)
#scores
#scores = cdk(logits_exp, logits_ama)
#return | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/utils/inference_helpers.py | inference_helpers.py |
from functools import partial, wraps
from torch import nn
def exists(val):
"""
Check if the value is not None.
Args:
val: The value to check.
Returns:
bool: True if value exists (is not None), False otherwise.
"""
return val is not None
def default(val, d):
"""
Return the value if it exists, otherwise return a default value.
Args:
val: The value to check.
d: The default value to return if val is None.
Returns:
The value if it exists, otherwise the default value.
"""
return val if exists(val) else d
def once(fn):
"""
Decorator to ensure the function is only called once.
Args:
fn (function): The function to wrap.
Returns:
function: The wrapped function.
"""
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
def eval_decorator(fn):
"""
Decorator to ensure a method switches to eval mode before execution
and returns to its original mode afterwards. For torch.nn.Module objects.
Args:
fn (function): The function to wrap.
Returns:
function: The wrapped function.
"""
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
def cast_tuple(val, depth):
"""
Cast a value to a tuple of a specific depth.
Args:
val: Value to be cast.
depth (int): Depth of the tuple.
Returns:
tuple: Tuple of the given depth with repeated val.
"""
return val if isinstance(val, tuple) else (val,) * depth
def maybe(fn):
"""
Decorator that calls a function if the first argument exists.
Args:
fn (function): The function to wrap.
Returns:
function: The wrapped function.
"""
@wraps(fn)
def inner(x, *args, **kwargs):
if not exists(x):
return x
return fn(x, *args, **kwargs)
return inner
class always():
"""
Class that always returns a specified value when called.
"""
def __init__(self, val):
"""
Initialize the always class with a value.
Args:
val: The value to always return.
"""
self.val = val
def __call__(self, *args, **kwargs):
"""
Return the specified value.
Returns:
The specified value.
"""
return self.val
class not_equals():
"""
Class that checks if a value does not equal the specified value.
"""
def __init__(self, val):
"""
Initialize with a value.
Args:
val: The value to compare against.
"""
self.val = val
def __call__(self, x, *args, **kwargs):
"""
Compare the input x with the specified value.
Returns:
bool: True if x is not equal to the specified value, False otherwise.
"""
return x != self.val
class equals():
"""
Class that checks if a value equals the specified value.
"""
def __init__(self, val):
"""
Initialize with a value.
Args:
val: The value to compare against.
"""
self.val = val
def __call__(self, x, *args, **kwargs):
"""
Compare the input x with the specified value.
Returns:
bool: True if x is equal to the specified value, False otherwise.
"""
return x == self.val
def init_zero_(layer):
"""
Initialize the weights and bias of a torch layer to zero.
Args:
layer (torch.nn.Module): The layer to initialize.
"""
nn.init.constant_(layer.weight, 0.)
if exists(layer.bias):
nn.init.constant_(layer.bias, 0.)
def pick_and_pop(keys, d):
"""
Remove and return values from a dictionary based on provided keys.
Args:
keys (list): List of keys to remove from the dictionary.
d (dict): The dictionary to pick from.
Returns:
dict: A dictionary with the specified keys and their values.
"""
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
"""
Group dictionary keys based on a condition.
Args:
cond (function): Condition to split dictionary.
d (dict): The dictionary to group.
Returns:
tuple: Two dictionaries split based on the condition.
"""
return_val = [dict(), dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
"""
Check if a string begins with a specific prefix.
Args:
prefix (str): The prefix to check for.
str (str): The string to check.
Returns:
bool: True if string starts with prefix, False otherwise.
"""
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
"""
Group dictionary items by keys that start with a specific prefix.
Args:
prefix (str): The prefix to check for.
d (dict): The dictionary to group.
Returns:
tuple: Two dictionaries split based on the prefix condition.
"""
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
"""
Group dictionary items by keys that start with a specific prefix and remove the prefix.
Args:
prefix (str): The prefix to check for.
d (dict): The dictionary to group.
Returns:
tuple: Dictionary with the prefix removed and another dictionary with remaining items.
"""
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
def divisible_by(num, den):
return (num % den) == 0 | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/utils/helpers.py | helpers.py |
import torch
import torch.nn as nn
from accelerate import Accelerator
from einops import rearrange
from zeta.nn.utils.helpers import exists
def print_num_params(model, accelerator: Accelerator):
# n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
accelerator.print(f"Number of parameters in model: {n_params}")
class Block(nn.Module):
def __init__(self,
dim,
dim_out,
groups=8):
super().__init__()
self.proj = nn.Conv3d(dim, dim_out, (1, 3, 3), padding=(0, 1, 1))
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self,
x,
scale_shift=None):
x = self.proj(x)
x = self.norm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + 1
return self.act(x)
class ResnetBlock(nn.Module):
def __init__(self,
dim,
dim_out,
*,
time_emb_dim=None,
groups=8):
super().__init__()
self.mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(time_emb_dim, dim_out * 2)
) if exists(time_emb_dim) else None
self.block1 = Block(dim, dim_out, groups=groups)
self.block2 = Block(dim_out, dim_out, groups=groups)
self.res_conv = nn.Conv3d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self,
x,
time_emb=None):
scale_shift = None
if exists(self.mlp):
assert exists(time_emb), 'time_emb must be passed in'
time_emb = self.mlp(time_emb)
time_emb = rearrange(time_emb, 'b c -> b c 1 1 1')
scale_shift = time_emb.chunk(2, dim=1)
h = self.block1(x, scale_shift=scale_shift)
h = self.block2(h)
return h + self.res_conv(x)
def load_model(path):
with open(path, 'rb') as f:
return torch.load(f, map_location=torch.device('cpu')) | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/utils/model_utils.py | model_utils.py |
import math
import numpy as np
import torch
import torch.nn as nn
from fairscale.nn import checkpoint_wrapper, wrap
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
from zeta.nn.nn.utils import init_bert_params
from zeta.nn.modules.droppath import DropPath
from zeta.nn.modules.feedforward_network import FeedForwardNetwork, make_experts
from zeta.nn.utils.attention.multihead_attention import MultiheadAttention
from zeta.nn.utils.module.multiway_network import MultiwayWrapper, set_split_position
from zeta.nn.utils.module.relative_position_bias import RelativePositionBias
from zeta.nn.utils.xmoe.moe_layer import MOELayer
from zeta.nn.utils.xmoe.routing import Top1Gate, Top2Gate
class EncoderLayer(nn.Module):
def __init__(self, args, depth, is_moe_layer=False, is_encoder_decoder=False):
super().__init__()
self.args = args
self.embed_dim = args.encoder_embed_dim
self.self_attn = self.build_self_attention(self.embed_dim, args)
self.self_attn_layer_norm = MultiwayWrapper(args, LayerNorm(self.embed_dim, eps=args.layernorm_eps))
self.dropout_module = torch.nn.Dropout(args.dropout)
if args.drop_path_rate > 0:
drop_path_prob = np.linspace(0, args.drop_path_rate, args.encoder_layers)[
depth
]
self.drop_path = DropPath(drop_path_prob)
else:
self.drop_path = None
self.normalize_before = args.encoder_normalize_before
self.is_moe_layer = is_moe_layer
self.ffn_dim = args.encoder_ffn_embed_dim
if not self.is_moe_layer:
self.ffn = MultiwayWrapper(
args,
self.build_ffn(
self.embed_dim,
self.args,
),
)
else:
assert not self.args.multiway
if args.moe_top1_expert:
gate = Top1Gate(
self.embed_dim,
args.moe_expert_count,
use_fp32=args.moe_gating_use_fp32,
moe_eval_capacity_token_fraction=args.moe_eval_capacity_token_fraction,
use_xmoe=args.use_xmoe,
)
else:
gate = Top2Gate(
self.embed_dim,
args.moe_expert_count,
args.moe_gating_use_fp32,
args.moe_second_expert_policy,
args.moe_normalize_gate_prob_before_dropping,
args.moe_eval_capacity_token_fraction,
use_xmoe=args.use_xmoe,
)
experts = make_experts(args, self.embed_dim, self.ffn_dim)
self.moe_layer = MOELayer(gate, experts, args)
self.final_layer_norm = MultiwayWrapper(args, LayerNorm(self.embed_dim, eps=args.layernorm_eps))
if args.deepnorm:
if is_encoder_decoder:
self.alpha = (
math.pow(
math.pow(args.encoder_layers, 4) * args.decoder_layers, 0.0625
)
* 0.81
)
else:
self.alpha = math.pow(2.0 * args.encoder_layers, 0.25)
else:
self.alpha = 1.0
def build_ffn(self, embed_dim, args):
return FeedForwardNetwork(
embed_dim,
self.ffn_dim,
args.activation_fn,
args.dropout,
args.activation_dropout,
args.layernorm_eps,
args.subln,
)
def build_self_attention(self, embed_dim, args):
return MultiheadAttention(
args,
embed_dim,
args.encoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
encoder_decoder_attention=False,
subln=args.subln,
)
def residual_connection(self, x, residual):
return residual * self.alpha + x
def forward(self, x, encoder_padding_mask, attn_mask=None, rel_pos=None, multiway_split_position=None, incremental_state=None):
if multiway_split_position is not None:
assert self.args.multiway
self.apply(set_split_position(multiway_split_position))
if attn_mask is not None:
attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8)
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, _ = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask,
attn_mask=attn_mask,
rel_pos=rel_pos,
incremental_state=incremental_state,
)
x = self.dropout_module(x)
if self.drop_path is not None:
x = self.drop_path(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
if not self.is_moe_layer:
x = self.ffn(x)
l_aux = None
else:
x = x.transpose(0, 1)
x, l_aux = self.moe_layer(x)
x = x.transpose(0, 1)
if self.drop_path is not None:
x = self.drop_path(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x, l_aux
class Encoder(nn.Module):
def __init__(
self,
args,
embed_tokens=None,
embed_positions=None,
output_projection=None,
is_encoder_decoder=False,
**kwargs
):
self.args = args
super().__init__(**kwargs)
self.dropout_module = torch.nn.Dropout(args.dropout)
embed_dim = args.encoder_embed_dim
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
self.embed_tokens = embed_tokens
self.embed_positions = embed_positions
if (
output_projection is None
and not is_encoder_decoder
and not args.no_output_layer
and args.vocab_size > 0
):
self.output_projection = self.build_output_projection(args)
else:
self.output_projection = output_projection
if args.layernorm_embedding:
self.layernorm_embedding = MultiwayWrapper(
args, LayerNorm(embed_dim, eps=args.layernorm_eps), dim=1
)
else:
self.layernorm_embedding = None
self.layers = nn.ModuleList([])
moe_freq = args.moe_freq
for i in range(args.encoder_layers):
is_moe_layer = moe_freq != 0 and (i + 1) % moe_freq == 0
self.layers.append(
self.build_encoder_layer(
args,
depth=i,
is_moe_layer=is_moe_layer,
is_encoder_decoder=is_encoder_decoder,
)
)
self.num_layers = len(self.layers)
if args.encoder_normalize_before and args.normalize_output:
self.layer_norm = MultiwayWrapper(args, LayerNorm(embed_dim, eps=args.layernorm_eps))
else:
self.layer_norm = None
if args.rel_pos_buckets > 0 and args.max_rel_pos > 0:
self.relative_position = RelativePositionBias(
num_buckets=args.rel_pos_buckets,
max_distance=args.max_rel_pos,
n_heads=args.encoder_attention_heads,
)
else:
self.relative_position = None
if args.bert_init:
self.apply(init_bert_params)
if args.deepnorm:
if is_encoder_decoder:
init_scale = (
math.pow(
math.pow(args.encoder_layers, 4) * args.decoder_layers, 0.0625
)
/ 1.15
)
else:
init_scale = math.pow(8.0 * args.encoder_layers, 0.25)
for name, p in self.named_parameters():
if (
"fc1" in name
or "fc2" in name
or "out_proj" in name
or "v_proj" in name
):
p.data.div_(init_scale)
if args.subln:
if is_encoder_decoder:
init_scale = math.sqrt(
math.log(3 * args.decoder_layers)
* math.log(2 * args.encoder_layers)
/ 3
)
else:
init_scale = math.sqrt(math.log(args.encoder_layers * 2))
for name, p in self.named_parameters():
if (
"fc1" in name
or "fc2" in name
or "out_proj" in name
or "v_proj" in name
):
p.data.mul_(init_scale)
def build_output_projection(
self,
args,
):
if args.share_encoder_input_output_embed:
assert args.encoder_embedding_type == "language"
output_projection = torch.nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
)
output_projection.weight = self.embed_tokens.weight
else:
output_projection = torch.nn.Linear(
args.encoder_embed_dim, args.vocab_size, bias=False
)
torch.nn.init.normal_(
output_projection.weight, mean=0, std=args.encoder_embed_dim**-0.5
)
return output_projection
def build_encoder_layer(
self, args, depth, is_moe_layer=False, is_encoder_decoder=False
):
layer = EncoderLayer(
args,
depth,
is_moe_layer=is_moe_layer,
is_encoder_decoder=is_encoder_decoder,
)
if args.checkpoint_activations:
layer = checkpoint_wrapper(layer)
if args.fsdp:
layer = wrap(layer)
return layer
def forward_embedding(
self,
src_tokens,
token_embedding=None,
positions=None,
):
if token_embedding is None:
token_embedding = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * token_embedding
if self.embed_positions is not None:
if src_tokens is not None:
x = embed + self.embed_positions(src_tokens, positions=positions)
else:
x = embed + self.embed_positions(x, positions=positions)
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
return x, embed
def forward(
self,
src_tokens,
encoder_padding_mask=None,
attn_mask=None,
return_all_hiddens=False,
token_embeddings=None,
multiway_split_position=None,
features_only=False,
incremental_state=None,
positions=None,
**kwargs
):
assert src_tokens is not None or token_embeddings is not None
if encoder_padding_mask is None:
if src_tokens is not None:
encoder_padding_mask = torch.zeros_like(
src_tokens, device=src_tokens.device
).bool()
else:
encoder_padding_mask = torch.zeros(
[token_embeddings.size(0), token_embeddings.size(1)],
device=token_embeddings.device,
).bool()
if multiway_split_position is not None:
assert self.args.multiway
self.apply(set_split_position(multiway_split_position))
x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings, positions)
x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x))
encoder_states = []
if return_all_hiddens:
encoder_states.append(x)
rel_pos_bias = None
if self.relative_position is not None:
rel_pos_bias = self.relative_position(
batch_size=x.size(0), qlen=x.size(1), klen=x.size(1)
)
# incremental_state is not None during inference if we use the bidirectional encoder as a generator as in s2s-ft (https://arxiv.org/abs/2110.13640)
l_aux = []
for idx, layer in enumerate(self.layers):
x, l_aux_i = layer(
x,
encoder_padding_mask=encoder_padding_mask if incremental_state is None else None,
attn_mask=attn_mask,
rel_pos=rel_pos_bias,
multiway_split_position=multiway_split_position,
incremental_state=incremental_state[idx] if incremental_state is not None else None,
)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
l_aux.append(l_aux_i)
if self.layer_norm is not None:
x = self.layer_norm(x)
if not features_only and self.output_projection is not None:
x = self.output_projection(x)
return {
"encoder_out": x,
"encoder_embedding": encoder_embedding,
"encoder_padding_mask": encoder_padding_mask,
"encoder_states": encoder_states,
"l_aux": l_aux,
} | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/architecture/encoder.py | encoder.py |
from inspect import isfunction
import math
from abc import ABC, abstractmethod
from collections import namedtuple
from dataclasses import dataclass
from functools import partial, wraps
from random import random
from typing import Callable, List, Optional
import torch
import torch.nn.functional as F
from einops import rearrange, reduce, repeat
from torch import Tensor, einsum, nn
from zeta.nn.attention.attend import Attend, Intermediates
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
DEFAULT_DIM_HEAD = 64
@dataclass
class LayerIntermediates:
hiddens: Optional[List[Tensor]] = None
attn_intermediates: Optional[List[Intermediates]] = None
layer_hiddens: Optional[List[Tensor]] = None
attn_z_loss: Optional[Tensor] = None
# helpers
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def cast_tuple(val, depth):
return val if isinstance(val, tuple) else (val,) * depth
def divisible_by(num, den):
return (num % den) == 0
def maybe(fn):
@wraps(fn)
def inner(x, *args, **kwargs):
if not exists(x):
return x
return fn(x, *args, **kwargs)
return inner
class always():
def __init__(self, val):
self.val = val
def __call__(self, *args, **kwargs):
return self.val
class not_equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x != self.val
class equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x == self.val
def Sequential(*modules):
return nn.Sequential(*filter(exists, modules))
# tensor helpers
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def l2norm(t, groups = 1):
t = rearrange(t, '... (g d) -> ... g d', g = groups)
t = F.normalize(t, p = 2, dim = -1)
return rearrange(t, '... g d -> ... (g d)')
def pad_at_dim(t, pad, dim = -1, value = 0.):
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
zeros = ((0, 0) * dims_from_right)
return F.pad(t, (*zeros, *pad), value = value)
def or_reduce(masks):
head, *body = masks
for rest in body:
head = head | rest
return head
# auxiliary loss helpers
def calc_z_loss(
pre_softmax_attns: List[Tensor],
mask = None,
weight = 1.
):
# the same loss applied to the mixture of experts router logits in https://arxiv.org/abs/2202.08906
# in the paper, in a tiny footnote, they mention using it on attention logits with stabilizing effects
# also used in PaLM as one of the measures
lse = 0.
for attn in pre_softmax_attns:
lse = lse + attn.logsumexp(dim = -1)
loss = torch.square(lse)
loss = reduce(loss, 'b h n -> b n', 'sum')
if not exists(mask):
return loss.mean() * weight
loss = loss[mask].sum() / mask.sum().clamp(min = 1e-5)
return loss * weight
# init helpers
def init_zero_(layer):
nn.init.constant_(layer.weight, 0.)
if exists(layer.bias):
nn.init.constant_(layer.bias, 0.)
# keyword argument helpers
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
# initializations
def deepnorm_init(
transformer,
beta,
module_name_match_list = ['.ff.', '.to_v', '.to_out']
):
for name, module in transformer.named_modules():
if type(module) != nn.Linear:
continue
needs_beta_gain = any(map(lambda substr: substr in name, module_name_match_list))
gain = beta if needs_beta_gain else 1
nn.init.xavier_normal_(module.weight.data, gain = gain)
if exists(module.bias):
nn.init.constant_(module.bias.data, 0)
# structured dropout, more effective than traditional attention dropouts
def dropout_seq(seq, mask, dropout):
b, n, *_, device = *seq.shape, seq.device
logits = torch.randn(b, n, device = device)
if exists(mask):
mask_value = max_neg_value(logits)
logits = logits.masked_fill(~mask, mask_value)
keep_prob = 1. - dropout
num_keep = max(1, int(keep_prob * n))
keep_indices = logits.topk(num_keep, dim = 1).indices
batch_indices = torch.arange(b, device = device)
batch_indices = rearrange(batch_indices, 'b -> b 1')
seq = seq[batch_indices, keep_indices]
if exists(mask):
seq_counts = mask.sum(dim = -1)
seq_keep_counts = torch.ceil(seq_counts * keep_prob).int()
keep_mask = torch.arange(num_keep, device = device) < rearrange(seq_keep_counts, 'b -> b 1')
mask = mask[batch_indices, keep_indices] & keep_mask
return seq, mask
# activations
class ReluSquared(nn.Module):
def forward(self, x):
return F.relu(x) ** 2
# embedding
class TokenEmbedding(nn.Module):
def __init__(self, dim, num_tokens, l2norm_embed = False):
super().__init__()
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(num_tokens, dim)
def forward(self, x):
token_emb = self.emb(x)
return l2norm(token_emb) if self.l2norm_embed else token_emb
# positional embeddings
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len, l2norm_embed = False):
super().__init__()
self.scale = dim ** -0.5 if not l2norm_embed else 1.
self.max_seq_len = max_seq_len
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
assert seq_len <= self.max_seq_len, f'you are passing in a sequence length of {seq_len} but your absolute positional embedding has a max sequence length of {self.max_seq_len}'
if not exists(pos):
pos = torch.arange(seq_len, device = device)
pos_emb = self.emb(pos)
pos_emb = pos_emb * self.scale
return l2norm(pos_emb) if self.l2norm_embed else pos_emb
class ScaledSinusoidalEmbedding(nn.Module):
def __init__(self, dim, theta = 10000):
super().__init__()
assert divisible_by(dim, 2)
self.scale = nn.Parameter(torch.ones(1) * dim ** -0.5)
half_dim = dim // 2
freq_seq = torch.arange(half_dim).float() / half_dim
inv_freq = theta ** -freq_seq
self.register_buffer('inv_freq', inv_freq, persistent = False)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
if not exists(pos):
pos = torch.arange(seq_len, device = device)
emb = einsum('i, j -> i j', pos, self.inv_freq)
emb = torch.cat((emb.sin(), emb.cos()), dim = -1)
return emb * self.scale
class RelativePositionBias(nn.Module):
def __init__(self, scale, causal = False, num_buckets = 32, max_distance = 128, heads = 8):
super().__init__()
self.scale = scale
self.causal = causal
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128):
ret = 0
n = -relative_position
if not causal:
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
device = self.device
q_pos = torch.arange(j - i, j, dtype = torch.long, device = device)
k_pos = torch.arange(j, dtype = torch.long, device = device)
rel_pos = k_pos[None, :] - q_pos[:, None]
rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance)
values = self.relative_attention_bias(rp_bucket)
bias = rearrange(values, 'i j h -> h i j')
return bias * self.scale
class DynamicPositionBias(nn.Module):
def __init__(self, dim, *, heads, depth, log_distance = False, norm = False):
super().__init__()
assert depth >= 1, 'depth for dynamic position bias MLP must be greater or equal to 1'
self.log_distance = log_distance
self.mlp = nn.ModuleList([])
self.mlp.append(Sequential(
nn.Linear(1, dim),
nn.LayerNorm(dim) if norm else None,
nn.SiLU()
))
for _ in range(depth - 1):
self.mlp.append(Sequential(
nn.Linear(dim, dim),
nn.LayerNorm(dim) if norm else None,
nn.SiLU()
))
self.mlp.append(nn.Linear(dim, heads))
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
assert i == j
n, device = j, self.device
# get the (n x n) matrix of distances
seq_arange = torch.arange(n, device = device)
context_arange = torch.arange(n, device = device)
indices = rearrange(seq_arange, 'i -> i 1') - rearrange(context_arange, 'j -> 1 j')
indices += (n - 1)
# input to continuous positions MLP
pos = torch.arange(-n + 1, n, device = device).float()
pos = rearrange(pos, '... -> ... 1')
if self.log_distance:
pos = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1)
for layer in self.mlp:
pos = layer(pos)
# get position biases
bias = pos[indices]
bias = rearrange(bias, 'i j h -> h i j')
return bias
class AlibiPositionalBias(nn.Module):
def __init__(self, heads, total_heads, **kwargs):
super().__init__()
self.heads = heads
self.total_heads = total_heads
slopes = Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.register_buffer('slopes', slopes, persistent = False)
self.register_buffer('bias', None, persistent = False)
def get_bias(self, i, j, device):
i_arange = torch.arange(j - i, j, device = device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if math.log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** math.floor(math.log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
@property
def device(self):
return next(self.buffers()).device
def forward(self, i, j):
h, device = self.total_heads, self.device
if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i:
return self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self.slopes
num_heads_unalibied = h - bias.shape[0]
bias = pad_at_dim(bias, (0, num_heads_unalibied), dim = 0)
self.register_buffer('bias', bias, persistent = False)
return self.bias
class RotaryEmbedding(nn.Module):
def __init__(
self,
dim,
use_xpos = False,
scale_base = 512,
interpolation_factor = 1.,
base = 10000,
base_rescale_factor = 1.
):
super().__init__()
# proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning
# has some connection to NTK literature
# https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
base *= base_rescale_factor ** (dim / (dim - 2))
inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
assert interpolation_factor >= 1.
self.interpolation_factor = interpolation_factor
if not use_xpos:
self.register_buffer('scale', None)
return
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.scale_base = scale_base
self.register_buffer('scale', scale)
def forward(self, seq_len, device):
t = torch.arange(seq_len, device = device).type_as(self.inv_freq)
t = t / self.interpolation_factor
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
if not exists(self.scale):
return freqs, 1.
power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
return freqs, scale
def rotate_half(x):
x = rearrange(x, '... (j d) -> ... j d', j = 2)
x1, x2 = x.unbind(dim = -2)
return torch.cat((-x2, x1), dim = -1)
def apply_rotary_pos_emb(t, freqs, scale = 1):
seq_len = t.shape[-2]
freqs = freqs[-seq_len:, :]
return (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale)
# norms
class Scale(nn.Module):
def __init__(self, value, fn):
super().__init__()
self.value = value
self.fn = fn
def forward(self, x, **kwargs):
out = self.fn(x, **kwargs)
scale_fn = lambda t: t * self.value
if not isinstance(out, tuple):
return scale_fn(out)
return (scale_fn(out[0]), *out[1:])
class ScaleNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1) * (dim ** -0.5))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True)
return x / norm.clamp(min = self.eps) * self.g
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale * self.g
class SimpleRMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale
# residual and residual gates
class Residual(nn.Module):
def __init__(self, dim, scale_residual = False, scale_residual_constant = 1.):
super().__init__()
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
self.scale_residual_constant = scale_residual_constant
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
if self.scale_residual_constant != 1:
residual = residual * self.scale_residual_constant
return x + residual
class GRUGating(nn.Module):
def __init__(self, dim, scale_residual = False, **kwargs):
super().__init__()
self.gru = nn.GRUCell(dim, dim)
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
gated_output = self.gru(
rearrange(x, 'b n d -> (b n) d'),
rearrange(residual, 'b n d -> (b n) d')
)
return gated_output.reshape_as(x)
# token shifting
def shift(t, amount, mask = None):
if amount == 0:
return t
else:
amount = min(amount, t.shape[1])
if exists(mask):
t = t.masked_fill(~mask[..., None], 0.)
return pad_at_dim(t, (amount, -amount), dim = - 2, value = 0.)
class ShiftTokens(nn.Module):
def __init__(self, shifts, fn):
super().__init__()
self.fn = fn
self.shifts = tuple(shifts)
def forward(self, x, **kwargs):
mask = kwargs.get('mask', None)
shifts = self.shifts
segments = len(shifts)
feats_per_shift = x.shape[-1] // segments
splitted = x.split(feats_per_shift, dim = -1)
segments_to_shift, rest = splitted[:segments], splitted[segments:]
segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts)))
x = torch.cat((*segments_to_shift, *rest), dim = -1)
return self.fn(x, **kwargs)
# feedforward
class GLU(nn.Module):
def __init__(
self,
dim_in,
dim_out,
activation: Callable,
mult_bias = False
):
super().__init__()
self.act = activation
self.proj = nn.Linear(dim_in, dim_out * 2)
self.mult_bias = nn.Parameter(torch.ones(dim_out)) if mult_bias else 1.
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim = -1)
return x * self.act(gate) * self.mult_bias
class FeedForward(nn.Module):
def __init__(
self,
dim,
dim_out = None,
mult = 4,
glu = False,
glu_mult_bias = False,
swish = False,
relu_squared = False,
post_act_ln = False,
dropout = 0.,
no_bias = False,
zero_init_output = False
):
super().__init__()
inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
if relu_squared:
activation = ReluSquared()
elif swish:
activation = nn.SiLU()
else:
activation = nn.GELU()
if glu:
project_in = GLU(dim, inner_dim, activation, mult_bias = glu_mult_bias)
else:
project_in = nn.Sequential(
nn.Linear(dim, inner_dim, bias = not no_bias),
activation
)
self.ff = Sequential(
project_in,
nn.LayerNorm(inner_dim) if post_act_ln else None,
nn.Dropout(dropout),
nn.Linear(inner_dim, dim_out, bias = not no_bias)
)
# init last linear layer to 0
if zero_init_output:
init_zero_(self.ff[-1])
def forward(self, x):
return self.ff(x)
# attention. it is all we need
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = DEFAULT_DIM_HEAD,
heads = 8,
causal = False,
flash = False,
talking_heads = False,
head_scale = False,
sparse_topk = None,
num_mem_kv = 0,
dropout = 0.,
on_attn = False,
gate_values = False,
zero_init_output = False,
max_attend_past = None,
qk_norm = False,
qk_norm_groups = 1,
qk_norm_scale = 10,
qk_norm_dim_scale = False,
one_kv_head = False,
kv_heads = None,
shared_kv = False,
value_dim_head = None,
tensor_product = False, # https://arxiv.org/abs/2208.06061
cascading_heads = False,
add_zero_kv = False, # same as add_zero_attn in pytorch
onnxable = False
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
self.causal = causal
self.max_attend_past = max_attend_past
assert not (exists(kv_heads) and one_kv_head), 'either attn_one_kv_head is set to True (in which case kv_heads is set to 1), or attn_kv_heads is set, but not both'
value_dim_head = default(value_dim_head, dim_head)
kv_heads = default(kv_heads, heads)
kv_heads = 1 if one_kv_head else kv_heads
assert divisible_by(heads, kv_heads)
self.kv_heads = kv_heads
q_dim = dim_head * heads
k_dim = dim_head * kv_heads
v_dim = value_dim_head * kv_heads
out_dim = value_dim_head * heads
self.to_q = nn.Linear(dim, q_dim, bias = False)
self.to_k = nn.Linear(dim, k_dim, bias = False)
# shared key / values, for further memory savings during inference
assert not (shared_kv and value_dim_head != dim_head), 'key and value head dimensions must be equal for shared key / values'
self.to_v = nn.Linear(dim, v_dim, bias = False) if not shared_kv else None
# relations projection from tp-attention
self.to_r = nn.Linear(dim, v_dim, bias = False) if tensor_product else None
# add GLU gating for aggregated values, from alphafold2
self.to_v_gate = None
if gate_values:
self.to_v_gate = nn.Linear(dim, out_dim)
nn.init.constant_(self.to_v_gate.weight, 0)
nn.init.constant_(self.to_v_gate.bias, 1)
# cosine sim attention
self.qk_norm = qk_norm
self.qk_norm_groups = qk_norm_groups
self.qk_norm_scale = qk_norm_scale
# whether to use the rmsnorm (equivalent to cosine sim attention when scale is equal to 1) - https://arxiv.org/abs/2302.05442
self.qk_norm_dim_scale = qk_norm_dim_scale
self.qk_norm_q_scale = self.qk_norm_k_scale = 1
if qk_norm and qk_norm_dim_scale:
self.qk_norm_q_scale = nn.Parameter(torch.ones(dim_head))
self.qk_norm_k_scale = nn.Parameter(torch.ones(dim_head))
assert (not qk_norm) or divisible_by(dim_head, qk_norm_groups), 'dimension per attention head must be divisible by the qk norm groups'
assert not (qk_norm and (dim_head // qk_norm_groups) <= 2), 'the group dimension may be too small (2 was too small in my tests, but 4 still works, surprisingly)'
# attend class - includes core attention algorithm + talking heads
self.attend = Attend(
heads = heads,
causal = causal,
talking_heads = talking_heads,
dropout = dropout,
sparse_topk = sparse_topk,
qk_norm = qk_norm,
scale = qk_norm_scale if qk_norm else self.scale,
add_zero_kv = add_zero_kv,
flash = flash,
onnxable = onnxable
)
# if cascading_heads:
# # cascading heads - wrap the Attend logic
# self.attend = CascadingHeads(self.attend)
# head scaling
self.head_scale = head_scale
if head_scale:
self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1))
# explicit topk sparse attention
self.sparse_topk = sparse_topk
# add memory key / values
self.num_mem_kv = num_mem_kv
if num_mem_kv > 0:
self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
# attention on attention
self.attn_on_attn = on_attn
self.to_out = nn.Sequential(nn.Linear(out_dim, dim * 2, bias = False), nn.GLU()) if on_attn else nn.Linear(out_dim, dim, bias = False)
# init output projection 0
if zero_init_output:
init_zero_(self.to_out)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
rel_pos = None,
rotary_pos_emb = None,
prev_attn = None,
mem = None
):
b, n, _, h, kv_h, head_scale, device, has_context = *x.shape, self.heads, self.kv_heads, self.head_scale, x.device, exists(context)
kv_input = default(context, x)
q_input = x
k_input = kv_input
v_input = kv_input
r_input = x
if exists(mem):
k_input = torch.cat((mem, k_input), dim = -2)
v_input = torch.cat((mem, v_input), dim = -2)
q = self.to_q(q_input)
k = self.to_k(k_input)
v = self.to_v(v_input) if exists(self.to_v) else k
r = self.to_r(r_input) if exists(self.to_r) else None
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
k, v, r = map(lambda t: maybe(rearrange)(t, 'b n (h d) -> b h n d', h = kv_h), (k, v, r))
if self.qk_norm:
qk_l2norm = partial(l2norm, groups = self.qk_norm_groups)
q, k = map(qk_l2norm, (q, k))
scale = self.qk_norm_scale
q = q * self.qk_norm_q_scale
k = k * self.qk_norm_k_scale
if exists(rotary_pos_emb) and not has_context:
freqs, xpos_scale = rotary_pos_emb
l = freqs.shape[-1]
q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale ** -1.) if exists(xpos_scale) else (1., 1.)
(ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v))
ql, kl, vl = map(lambda arg: apply_rotary_pos_emb(arg[0], freqs, arg[1]), ((ql, q_xpos_scale), (kl, k_xpos_scale), (vl, k_xpos_scale)))
q, k, v = map(lambda t: torch.cat(t, dim = -1), ((ql, qr), (kl, kr), (vl, vr)))
input_mask = context_mask if has_context else mask
if self.num_mem_kv > 0:
mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b = b), (self.mem_k, self.mem_v))
if self.qk_norm:
mem_k = l2norm(mem_k)
mem_k = mem_k * self.qk_norm_k_scale
k = torch.cat((mem_k, k), dim = -2)
v = torch.cat((mem_v, v), dim = -2)
if exists(input_mask):
input_mask = pad_at_dim(input_mask, (self.num_mem_kv, 0), dim = -1, value = True)
i, j = map(lambda t: t.shape[-2], (q, k))
# determine masking
mask_value = max_neg_value(q)
masks = []
final_attn_mask = None
if exists(input_mask):
input_mask = rearrange(input_mask, 'b j -> b 1 1 j')
masks.append(~input_mask)
if exists(attn_mask):
assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4'
if attn_mask.ndim == 2:
attn_mask = rearrange(attn_mask, 'i j -> 1 1 i j')
elif attn_mask.ndim == 3:
attn_mask = rearrange(attn_mask, 'h i j -> 1 h i j')
masks.append(~attn_mask)
if exists(self.max_attend_past):
range_q = torch.arange(j - i, j, device = device)
range_k = torch.arange(j, device = device)
dist = rearrange(range_q, 'i -> 1 1 i 1') - rearrange(range_k, 'j -> 1 1 1 j')
max_attend_past_mask = dist > self.max_attend_past
masks.append(max_attend_past_mask)
if len(masks) > 0:
final_attn_mask = ~or_reduce(masks)
# prepare relative positional bias, if needed
attn_bias = None
if exists(rel_pos):
attn_bias = rel_pos(i, j)
# attention is all we need
out, intermediates = self.attend(
q, k, v,
mask = final_attn_mask,
attn_bias = attn_bias,
prev_attn = prev_attn
)
# https://arxiv.org/abs/2208.06061 proposes to add a residual for better gradients
if exists(r):
out = out * r + out
# normformer scaling of heads
if head_scale:
out = out * self.head_scale_params
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
# alphafold2 styled gating of the values
if exists(self.to_v_gate):
gates = self.to_v_gate(x)
out = out * gates.sigmoid()
# combine the heads
out = self.to_out(out)
if exists(mask):
mask = rearrange(mask, 'b n -> b n 1')
out = out.masked_fill(~mask, 0.)
return out, intermediates
class AttentionLayers(nn.Module):
def __init__(
self,
dim,
depth,
heads = 8,
causal = False,
cross_attend = False,
only_cross = False,
use_scalenorm = False,
use_rmsnorm = False,
use_simple_rmsnorm = False,
alibi_pos_bias = False,
alibi_num_heads = None,
rel_pos_bias = False,
rel_pos_num_buckets = 32,
rel_pos_max_distance = 128,
dynamic_pos_bias = False,
dynamic_pos_bias_log_distance = False,
dynamic_pos_bias_mlp_depth = 2,
dynamic_pos_bias_norm = False,
rotary_pos_emb = False,
rotary_emb_dim = None,
rotary_xpos = False,
rotary_interpolation_factor = 1.,
rotary_xpos_scale_base = 512,
rotary_base_rescale_factor = 1.,
custom_layers = None,
sandwich_coef = None,
par_ratio = None,
residual_attn = False,
cross_residual_attn = False,
macaron = False,
pre_norm = True,
pre_norm_has_final_norm = True,
gate_residual = False,
scale_residual = False,
scale_residual_constant = 1.,
deepnorm = False,
shift_tokens = 0,
sandwich_norm = False,
resi_dual = False,
resi_dual_scale = 1.,
zero_init_branch_output = False,
layer_dropout = 0.,
cross_attn_tokens_dropout = 0.,
**kwargs
):
super().__init__()
rotary_pos_emb = rotary_pos_emb or rotary_xpos
ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
attn_kwargs, kwargs = groupby_prefix_and_trim('attn_', kwargs)
dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
self.dim = dim
self.depth = depth
self.layers = nn.ModuleList([])
self.has_pos_emb = rel_pos_bias or rotary_pos_emb
rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32)
assert not (rotary_xpos and not causal), 'rotary xpos is not compatible with bidirectional attention'
self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim, use_xpos = rotary_xpos, scale_base = rotary_xpos_scale_base, interpolation_factor = rotary_interpolation_factor, base_rescale_factor = rotary_base_rescale_factor) if rotary_pos_emb else None
assert not (alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both'
assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
# relative positional bias
flash_attn = attn_kwargs.get('flash', False)
assert (int(rel_pos_bias) + int(dynamic_pos_bias) + int(alibi_pos_bias)) <= 1, 'you can only choose up to one of t5, alibi, or dynamic positional bias'
self.rel_pos = None
if rel_pos_bias:
assert not flash_attn, 'flash attention not compatible with t5 relative positional bias'
self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance)
elif dynamic_pos_bias:
assert not flash_attn, 'flash attention not compatible with dynamic positional bias'
self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm)
elif alibi_pos_bias:
alibi_num_heads = default(alibi_num_heads, heads)
assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads'
self.rel_pos = AlibiPositionalBias(heads = alibi_num_heads, total_heads = heads)
# determine deepnorm and residual scale
if deepnorm:
assert scale_residual_constant == 1, 'scale residual constant is being overridden by deep norm settings'
pre_norm = sandwich_norm = resi_dual = False
scale_residual = True
scale_residual_constant = (2 * depth) ** 0.25
assert (int(sandwich_norm) + int(resi_dual)) <= 1, 'either sandwich norm or resiDual is selected, but not both'
assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm'
if resi_dual:
pre_norm = False
self.pre_norm = pre_norm
self.sandwich_norm = sandwich_norm
self.resi_dual = resi_dual
assert 0 < resi_dual_scale <= 1., 'resiDual prenorm residual must be scaled by a factor greater than 0 and less than or equal to 1.'
self.resi_dual_scale = resi_dual_scale
self.residual_attn = residual_attn
self.cross_residual_attn = cross_residual_attn
assert not (flash_attn and (residual_attn or cross_residual_attn)), 'flash attention is not compatible with residual attention'
self.cross_attend = cross_attend
assert (int(use_scalenorm) + int(use_rmsnorm) + int(use_simple_rmsnorm)) <= 1, 'you can only use either scalenorm, rmsnorm, or simple rmsnorm'
if use_scalenorm:
norm_class = ScaleNorm
elif use_rmsnorm:
norm_class = RMSNorm
elif use_simple_rmsnorm:
norm_class = SimpleRMSNorm
else:
norm_class = nn.LayerNorm
norm_fn = partial(norm_class, dim)
if cross_attend and not only_cross:
default_block = ('a', 'c', 'f')
elif cross_attend and only_cross:
default_block = ('c', 'f')
else:
default_block = ('a', 'f')
if macaron:
default_block = ('f',) + default_block
# zero init
if zero_init_branch_output:
attn_kwargs = {**attn_kwargs, 'zero_init_output': True}
ff_kwargs = {**ff_kwargs, 'zero_init_output': True}
# calculate layer block order
if exists(custom_layers):
layer_types = custom_layers
elif exists(par_ratio):
par_depth = depth * len(default_block)
assert 1 < par_ratio <= par_depth, 'par ratio out of range'
default_block = tuple(filter(not_equals('f'), default_block))
par_attn = par_depth // par_ratio
depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
par_width = (depth_cut + depth_cut // par_attn) // par_attn
assert len(default_block) <= par_width, 'default block is too large for par_ratio'
par_block = default_block + ('f',) * (par_width - len(default_block))
par_head = par_block * par_attn
layer_types = par_head + ('f',) * (par_depth - len(par_head))
elif exists(sandwich_coef):
assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
else:
layer_types = default_block * depth
self.layer_types = layer_types
self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
# stochastic depth
self.layer_dropouts = cast_tuple(layer_dropout, len(layer_types))
# structured dropout for cross attending
self.cross_attn_tokens_dropout = cross_attn_tokens_dropout
# calculate token shifting
shift_tokens = cast_tuple(shift_tokens, len(layer_types))
# whether it has post norm
self.final_norm = norm_fn() if pre_norm or resi_dual else nn.Identity()
# iterate and construct layers
for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)):
is_last_layer = ind == (len(self.layer_types) - 1)
if layer_type == 'a':
layer = Attention(dim, heads = heads, causal = causal, **attn_kwargs)
elif layer_type == 'c':
layer = Attention(dim, heads = heads, **attn_kwargs)
elif layer_type == 'f':
layer = FeedForward(dim, **ff_kwargs)
layer = layer if not macaron else Scale(0.5, layer)
else:
raise Exception(f'invalid layer type {layer_type}')
if layer_shift_tokens > 0:
shift_range_upper = layer_shift_tokens + 1
shift_range_lower = -layer_shift_tokens if not causal else 0
layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer)
residual_fn = GRUGating if gate_residual else Residual
residual = residual_fn(dim, scale_residual = scale_residual, scale_residual_constant = scale_residual_constant)
pre_branch_norm = norm_fn() if pre_norm else None
post_branch_norm = norm_fn() if sandwich_norm else None
post_main_norm = norm_fn() if not pre_norm else None
norms = nn.ModuleList([
pre_branch_norm,
post_branch_norm,
post_main_norm
])
self.layers.append(nn.ModuleList([
norms,
layer,
residual
]))
if deepnorm:
init_gain = (8 * depth) ** -0.25
deepnorm_init(self, init_gain)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
self_attn_context_mask = None,
mems = None,
return_hiddens = False
):
assert not (self.cross_attend ^ exists(context)), 'context must be passed in if cross_attend is set to True'
hiddens = []
layer_hiddens = []
intermediates = []
prev_attn = None
prev_cross_attn = None
mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
rotary_pos_emb = None
if exists(self.rotary_pos_emb):
max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + x.shape[1], mems)))
rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device)
outer_residual = x * self.resi_dual_scale
for ind, (layer_type, (norm, block, residual_fn), layer_dropout) in enumerate(zip(self.layer_types, self.layers, self.layer_dropouts)):
is_last = ind == (len(self.layers) - 1)
if self.training and layer_dropout > 0. and random() < layer_dropout:
continue
if layer_type == 'a':
if return_hiddens:
hiddens.append(x)
layer_mem = mems.pop(0) if mems else None
if layer_type == 'c':
if self.training and self.cross_attn_tokens_dropout > 0.:
context, context_mask = dropout_seq(context, context_mask, self.cross_attn_tokens_dropout)
inner_residual = x
if return_hiddens:
layer_hiddens.append(x)
pre_norm, post_branch_norm, post_main_norm = norm
if exists(pre_norm):
x = pre_norm(x)
if layer_type == 'a':
out, inter = block(x, mask = mask, context_mask = self_attn_context_mask, attn_mask = attn_mask, rel_pos = self.rel_pos, rotary_pos_emb = rotary_pos_emb, prev_attn = prev_attn, mem = layer_mem)
elif layer_type == 'c':
out, inter = block(x, context = context, mask = mask, context_mask = context_mask, prev_attn = prev_cross_attn)
elif layer_type == 'f':
out = block(x)
if self.resi_dual:
outer_residual = outer_residual + out * self.resi_dual_scale
if exists(post_branch_norm):
out = post_branch_norm(out)
x = residual_fn(out, inner_residual)
if layer_type in ('a', 'c') and return_hiddens:
intermediates.append(inter)
if layer_type == 'a' and self.residual_attn:
prev_attn = inter.pre_softmax_attn
elif layer_type == 'c' and self.cross_residual_attn:
prev_cross_attn = inter.pre_softmax_attn
if exists(post_main_norm):
x = post_main_norm(x)
if return_hiddens:
layer_hiddens.append(x)
if self.resi_dual:
x = x + self.final_norm(outer_residual)
else:
x = self.final_norm(x)
if return_hiddens:
intermediates = LayerIntermediates(
hiddens = hiddens,
attn_intermediates = intermediates,
layer_hiddens = layer_hiddens
)
return x, intermediates
return x
class Encoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on encoder'
super().__init__(causal = False, **kwargs)
class Decoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on decoder'
super().__init__(causal = True, **kwargs)
class CrossAttender(AttentionLayers):
def __init__(self, **kwargs):
super().__init__(cross_attend = True, only_cross = True, **kwargs)
class ViTransformerWrapper(nn.Module):
def __init__(
self,
*,
image_size,
patch_size,
attn_layers,
channels = 3,
num_classes = None,
post_emb_norm = False,
emb_dropout = 0.
):
super().__init__()
assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder'
assert divisible_by(image_size, patch_size), 'image dimensions must be divisible by the patch size'
dim = attn_layers.dim
num_patches = (image_size // patch_size) ** 2
patch_dim = channels * patch_size ** 2
self.patch_size = patch_size
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim))
self.patch_to_embedding = nn.Sequential(
nn.LayerNorm(patch_dim),
nn.Linear(patch_dim, dim),
nn.LayerNorm(dim)
)
self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
self.dropout = nn.Dropout(emb_dropout)
self.attn_layers = attn_layers
self.mlp_head = nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity()
def forward(
self,
img,
return_embeddings = False
):
p = self.patch_size
x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
x = self.patch_to_embedding(x)
n = x.shape[1]
x = x + self.pos_embedding[:, :n]
x = self.post_emb_norm(x)
x = self.dropout(x)
x = self.attn_layers(x)
if not exists(self.mlp_head) or return_embeddings:
return x
x = x.mean(dim = -2)
return self.mlp_head(x)
class Transformer(nn.Module):
def __init__(
self,
*,
num_tokens,
max_seq_len,
attn_layers,
emb_dim = None,
max_mem_len = 0,
shift_mem_down = 0,
emb_dropout = 0.,
post_emb_norm = False,
num_memory_tokens = None,
tie_embedding = False,
logits_dim = None,
use_abs_pos_emb = True,
scaled_sinu_pos_emb = False,
l2norm_embed = False,
emb_frac_gradient = 1., # GLM-130B and Cogview successfully used this, set at 0.1
attn_z_loss_weight = 1e-4
):
super().__init__()
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
dim = attn_layers.dim
emb_dim = default(emb_dim, dim)
self.emb_dim = emb_dim
self.num_tokens = num_tokens
self.max_seq_len = max_seq_len
self.max_mem_len = max_mem_len
self.shift_mem_down = shift_mem_down
self.l2norm_embed = l2norm_embed
self.token_emb = TokenEmbedding(emb_dim, num_tokens, l2norm_embed = l2norm_embed)
if not (use_abs_pos_emb and not attn_layers.has_pos_emb):
self.pos_emb = always(0)
elif scaled_sinu_pos_emb:
self.pos_emb = ScaledSinusoidalEmbedding(emb_dim)
else:
self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len, l2norm_embed = l2norm_embed)
self.emb_frac_gradient = emb_frac_gradient # fraction of the gradient that should go to the embedding, https://arxiv.org/abs/2105.13290
self.post_emb_norm = nn.LayerNorm(emb_dim) if post_emb_norm else nn.Identity()
self.emb_dropout = nn.Dropout(emb_dropout)
self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
self.attn_layers = attn_layers
self.init_()
logits_dim = default(logits_dim, num_tokens)
self.to_logits = nn.Linear(dim, logits_dim) if not tie_embedding else lambda t: t @ self.token_emb.emb.weight.t()
# memory tokens (like [cls]) from Memory Transformers paper
num_memory_tokens = default(num_memory_tokens, 0)
self.num_memory_tokens = num_memory_tokens
if num_memory_tokens > 0:
self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
def init_(self):
if self.l2norm_embed:
nn.init.normal_(self.token_emb.emb.weight, std = 1e-5)
if not isinstance(self.pos_emb, always):
nn.init.normal_(self.pos_emb.emb.weight, std = 1e-5)
return
nn.init.kaiming_normal_(self.token_emb.emb.weight)
def forward(
self,
x,
return_embeddings = False,
return_logits_and_embeddings = False,
return_intermediates = False,
mask = None,
return_mems = False,
return_attn = False,
mems = None,
pos = None,
prepend_embeds = None,
sum_embeds = None,
return_attn_z_loss = False,
attn_z_loss_weight = 1e-4,
**kwargs
):
b, n, device, num_mem, emb_frac_gradient = *x.shape, x.device, self.num_memory_tokens, self.emb_frac_gradient
return_hiddens = return_mems | return_attn | return_intermediates | return_attn_z_loss
# absolute positional embedding
external_pos_emb = exists(pos) and pos.dtype != torch.long
pos_emb = self.pos_emb(x, pos = pos) if not external_pos_emb else pos
x = self.token_emb(x) + pos_emb
# for summing embeddings passed externally - needs this for self-conditioning in non-autoregressive training
if exists(sum_embeds):
x = x + sum_embeds
# post embedding norm, purportedly leads to greater stabilization
x = self.post_emb_norm(x)
# whether to append embeds, as in PaLI, for image embeddings
if exists(prepend_embeds):
prepend_seq, prepend_dim = prepend_embeds.shape[1:]
assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as text model dimensions'
x = torch.cat((prepend_embeds, x), dim = -2)
# whether to reduce the gradient going to the embedding, from cogview paper, corroborated by GLM-130B model
if emb_frac_gradient < 1:
assert emb_frac_gradient > 0
x = x * emb_frac_gradient + x.detach() * (1 - emb_frac_gradient)
# embedding dropout
x = self.emb_dropout(x)
x = self.project_emb(x)
if num_mem > 0:
mem = repeat(self.memory_tokens, 'n d -> b n d', b = b)
x = torch.cat((mem, x), dim = 1)
# auto-handle masking after appending memory tokens
if exists(mask):
mask = pad_at_dim(mask, (num_mem, 0), dim = -1, value = True)
if self.shift_mem_down and exists(mems):
mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:]
mems = [*mems_r, *mems_l]
if return_hiddens:
x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs)
else:
x = self.attn_layers(x, mask = mask, mems = mems, **kwargs)
mem, x = x[:, :num_mem], x[:, num_mem:]
if return_logits_and_embeddings:
out = (self.to_logits(x), x)
elif return_embeddings:
out = x
else:
out = self.to_logits(x)
if return_attn_z_loss:
pre_softmax_attns = list(map(lambda t: t.pre_softmax_attn, intermediates.attn_intermediates))
intermediates.attn_z_loss = calc_z_loss(pre_softmax_attns, weight = attn_z_loss_weight)
return_intermediates = True
if return_intermediates:
return out, intermediates
if return_mems:
hiddens = intermediates.hiddens
new_mems = list(map(lambda pair: torch.cat(pair, dim = -2), zip(mems, hiddens))) if exists(mems) else hiddens
new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
return out, new_mems
if return_attn:
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
return out, attn_maps
return out | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/architecture/transformer.py | transformer.py |
class EncoderConfig(object):
def __init__(self, **kwargs):
self.encoder_embed_dim = kwargs.pop("encoder_embed_dim", 768)
self.encoder_attention_heads = kwargs.pop("encoder_attention_heads", 12)
self.encoder_ffn_embed_dim = kwargs.pop("encoder_ffn_embed_dim", 3072)
self.encoder_layers = kwargs.pop("encoder_layers", 12)
self.encoder_normalize_before = kwargs.pop("encoder_normalize_before", True)
self.normalize_output = kwargs.pop("normalize_output", True)
self.activation_fn = kwargs.pop("activation_fn", "gelu")
self.dropout = kwargs.pop("dropout", 0.0)
self.drop_path_rate = kwargs.pop("drop_path_rate", 0.0)
self.attention_dropout = kwargs.pop("attention_dropout", 0.0)
self.activation_dropout = kwargs.pop("activation_dropout", 0.0)
self.no_scale_embedding = kwargs.pop("no_scale_embedding", True)
self.layernorm_embedding = kwargs.pop("layernorm_embedding", False)
self.moe_freq = kwargs.pop("moe_freq", 0)
self.moe_top1_expert = kwargs.pop("moe_top1_expert", False)
self.moe_expert_count = kwargs.pop("moe_expert_count", 0)
self.moe_gating_use_fp32 = kwargs.pop("moe_gating_use_fp32", True)
self.moe_eval_capacity_token_fraction = kwargs.pop(
"moe_eval_capacity_token_fraction", 0.25
)
self.moe_second_expert_policy = kwargs.pop("moe_second_expert_policy", "random")
self.moe_normalize_gate_prob_before_dropping = kwargs.pop(
"moe_normalize_gate_prob_before_dropping", False
)
self.use_xmoe = kwargs.pop("use_xmoe", False)
self.rel_pos_buckets = kwargs.pop("rel_pos_buckets", 0)
self.max_rel_pos = kwargs.pop("max_rel_pos", 0)
self.deepnorm = kwargs.pop("deepnorm", False)
self.subln = kwargs.pop("subln", True)
self.bert_init = kwargs.pop("bert_init", False)
self.multiway = kwargs.pop("multiway", False)
self.share_encoder_input_output_embed = kwargs.pop(
"share_encoder_input_output_embed", False
)
self.max_source_positions = kwargs.pop("max_source_positions", 1024)
self.no_output_layer = kwargs.pop("no_output_layer", False)
self.layernorm_eps = kwargs.pop("layernorm_eps", 1e-5)
# Text
self.vocab_size = kwargs.pop("vocab_size", -1)
# Vision
self.img_size = kwargs.pop("img_size", 224)
self.patch_size = kwargs.pop("patch_size", 16)
self.in_chans = kwargs.pop("in_chans", 3)
# Fairscale
self.checkpoint_activations = kwargs.pop("checkpoint_activations", False)
self.fsdp = kwargs.pop("fsdp", False)
self.ddp_rank = kwargs.pop("ddp_rank", 0)
self.xpos_rel_pos = kwargs.pop("xpos_rel_pos", False)
self.xpos_scale_base = kwargs.pop("xpos_scale_base", 512)
if self.deepnorm:
self.encoder_normalize_before = False
self.subln = False
if self.subln:
self.encoder_normalize_before = True
self.deepnorm = False
if self.use_xmoe:
self.moe_normalize_gate_prob_before_dropping = True
self.moe_second_expert_policy = "random"
assert self.moe_freq > 0 and self.moe_expert_count > 0
def override(self, args):
for hp in self.__dict__.keys():
if getattr(args, hp, None) is not None:
self.__dict__[hp] = getattr(args, hp, None)
class DecoderConfig(object):
def __init__(self, **kwargs):
self.decoder_embed_dim = kwargs.pop("decoder_embed_dim", 768)
self.decoder_attention_heads = kwargs.pop("decoder_attention_heads", 12)
self.decoder_ffn_embed_dim = kwargs.pop("decoder_ffn_embed_dim", 3072)
self.decoder_layers = kwargs.pop("decoder_layers", 12)
self.decoder_normalize_before = kwargs.pop("decoder_normalize_before", True)
self.activation_fn = kwargs.pop("activation_fn", "gelu")
self.dropout = kwargs.pop("dropout", 0.0)
self.drop_path_rate = kwargs.pop("drop_path_rate", 0.0)
self.attention_dropout = kwargs.pop("attention_dropout", 0.0)
self.activation_dropout = kwargs.pop("activation_dropout", 0.0)
self.no_scale_embedding = kwargs.pop("no_scale_embedding", True)
self.layernorm_embedding = kwargs.pop("layernorm_embedding", False)
self.moe_freq = kwargs.pop("moe_freq", 0)
self.moe_top1_expert = kwargs.pop("moe_top1_expert", False)
self.moe_expert_count = kwargs.pop("moe_expert_count", 0)
self.moe_gating_use_fp32 = kwargs.pop("moe_gating_use_fp32", True)
self.moe_eval_capacity_token_fraction = kwargs.pop(
"moe_eval_capacity_token_fraction", 0.25
)
self.moe_second_expert_policy = kwargs.pop("moe_second_expert_policy", "random")
self.moe_normalize_gate_prob_before_dropping = kwargs.pop(
"moe_normalize_gate_prob_before_dropping", False
)
self.use_xmoe = kwargs.pop("use_xmoe", False)
self.rel_pos_buckets = kwargs.pop("rel_pos_buckets", 0)
self.max_rel_pos = kwargs.pop("max_rel_pos", 0)
self.deepnorm = kwargs.pop("deepnorm", False)
self.subln = kwargs.pop("subln", True)
self.bert_init = kwargs.pop("bert_init", False)
self.multiway = kwargs.pop("multiway", False)
self.share_decoder_input_output_embed = kwargs.pop(
"share_decoder_input_output_embed", False
)
self.max_target_positions = kwargs.pop("max_target_positions", 1024)
self.no_output_layer = kwargs.pop("no_output_layer", False)
self.layernorm_eps = kwargs.pop("layernorm_eps", 1e-5)
# Text
self.vocab_size = kwargs.pop("vocab_size", -1)
# Fairscale
self.checkpoint_activations = kwargs.pop("checkpoint_activations", False)
self.fsdp = kwargs.pop("fsdp", False)
self.ddp_rank = kwargs.pop("ddp_rank", 0)
self.xpos_rel_pos = kwargs.pop("xpos_rel_pos", False)
self.xpos_scale_base = kwargs.pop("xpos_scale_base", 512)
if self.deepnorm:
self.decoder_normalize_before = False
self.subln = False
if self.subln:
self.decoder_normalize_before = True
self.deepnorm = False
if self.use_xmoe:
self.moe_normalize_gate_prob_before_dropping = True
self.moe_second_expert_policy = "random"
assert self.moe_freq > 0 and self.moe_expert_count > 0
def override(self, args):
for hp in self.__dict__.keys():
if getattr(args, hp, None) is not None:
self.__dict__[hp] = getattr(args, hp, None)
class EncoderDecoderConfig(object):
def __init__(self, **kwargs):
self.encoder_embed_dim = kwargs.pop("encoder_embed_dim", 768)
self.encoder_attention_heads = kwargs.pop("encoder_attention_heads", 12)
self.encoder_ffn_embed_dim = kwargs.pop("encoder_ffn_embed_dim", 3072)
self.encoder_layers = kwargs.pop("encoder_layers", 12)
self.encoder_normalize_before = kwargs.pop("encoder_normalize_before", True)
self.decoder_embed_dim = kwargs.pop("decoder_embed_dim", 768)
self.decoder_attention_heads = kwargs.pop("decoder_attention_heads", 12)
self.decoder_ffn_embed_dim = kwargs.pop("decoder_ffn_embed_dim", 3072)
self.decoder_layers = kwargs.pop("decoder_layers", 12)
self.decoder_normalize_before = kwargs.pop("decoder_normalize_before", True)
self.activation_fn = kwargs.pop("activation_fn", "gelu")
self.dropout = kwargs.pop("dropout", 0.0)
self.drop_path_rate = kwargs.pop("drop_path_rate", 0.0)
self.attention_dropout = kwargs.pop("attention_dropout", 0.0)
self.activation_dropout = kwargs.pop("activation_dropout", 0.0)
self.no_scale_embedding = kwargs.pop("no_scale_embedding", True)
self.layernorm_embedding = kwargs.pop("layernorm_embedding", False)
self.moe_freq = kwargs.pop("moe_freq", 0)
self.moe_top1_expert = kwargs.pop("moe_top1_expert", False)
self.moe_expert_count = kwargs.pop("moe_expert_count", 0)
self.moe_gating_use_fp32 = kwargs.pop("moe_gating_use_fp32", True)
self.moe_eval_capacity_token_fraction = kwargs.pop(
"moe_eval_capacity_token_fraction", 0.25
)
self.moe_second_expert_policy = kwargs.pop("moe_second_expert_policy", "random")
self.moe_normalize_gate_prob_before_dropping = kwargs.pop(
"moe_normalize_gate_prob_before_dropping", False
)
self.use_xmoe = kwargs.pop("use_xmoe", False)
self.rel_pos_buckets = kwargs.pop("rel_pos_buckets", 0)
self.max_rel_pos = kwargs.pop("max_rel_pos", 0)
self.deepnorm = kwargs.pop("deepnorm", False)
self.subln = kwargs.pop("subln", True)
self.bert_init = kwargs.pop("bert_init", False)
self.multiway = kwargs.pop("multiway", False)
self.share_all_embeddings = kwargs.pop("share_all_embeddings", False)
self.share_decoder_input_output_embed = kwargs.pop(
"share_decoder_input_output_embed", False
)
self.max_source_positions = kwargs.pop("max_source_positions", 1024)
self.max_target_positions = kwargs.pop("max_target_positions", 1024)
self.no_output_layer = kwargs.pop("no_output_layer", False)
self.layernorm_eps = kwargs.pop("layernorm_eps", 1e-5)
# Text
self.vocab_size = kwargs.pop("vocab_size", -1)
# Fairscale
self.checkpoint_activations = kwargs.pop("checkpoint_activations", False)
self.fsdp = kwargs.pop("fsdp", False)
self.ddp_rank = kwargs.pop("ddp_rank", 0)
self.xpos_rel_pos = kwargs.pop("xpos_rel_pos", False)
self.xpos_scale_base = kwargs.pop("xpos_scale_base", 512)
if self.deepnorm:
self.encoder_normalize_before = False
self.decoder_normalize_before = False
self.subln = False
if self.subln:
self.encoder_normalize_before = True
self.decoder_normalize_before = True
self.deepnorm = False
if self.use_xmoe:
self.moe_normalize_gate_prob_before_dropping = True
self.moe_second_expert_policy = "random"
assert self.moe_freq > 0 and self.moe_expert_count > 0
def override(self, args):
for hp in self.__dict__.keys():
if getattr(args, hp, None) is not None:
self.__dict__[hp] = getattr(args, hp, None) | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/architecture/config.py | config.py |
import math
import numpy as np
import torch
import torch.nn as nn
from fairscale.nn import checkpoint_wrapper, wrap
from zeta.nn.utils import init_bert_params
from zeta.utils.droppath import DropPath
from zeta.utils.feedforward_network import FeedForwardNetwork, make_experts
from zeta.utils.attention.multihead_attention import MultiheadAttention
from zeta.utils.module.relative_position_bias import RelativePositionBias
from zeta.utils.xmoe.moe_layer import MOELayer
from zeta.utils.xmoe.routing import Top1Gate, Top2Gate
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
class DecoderLayer(nn.Module):
def __init__(
self,
args,
depth,
is_moe_layer=False,
is_encoder_decoder=False,
):
super().__init__()
self.args = args
self.embed_dim = args.decoder_embed_dim
self.dropout_module = torch.nn.Dropout(args.dropout)
if args.drop_path_rate > 0:
drop_path_prob = np.linspace(0, args.drop_path_rate, args.decoder_layers)[
depth
]
self.drop_path = DropPath(drop_path_prob)
else:
self.drop_path = None
self.self_attn = self.build_self_attention(self.embed_dim, args)
self.normalize_before = args.decoder_normalize_before
self.self_attn_layer_norm = LayerNorm(self.embed_dim, eps=args.layernorm_eps)
if not is_encoder_decoder:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, args)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, eps=args.layernorm_eps)
self.is_moe_layer = is_moe_layer
self.ffn_dim = args.decoder_ffn_embed_dim
if not self.is_moe_layer:
self.ffn = self.build_ffn(
self.embed_dim,
self.args,
)
else:
if args.moe_top1_expert:
gate = Top1Gate(
self.embed_dim,
args.moe_expert_count,
use_fp32=args.moe_gating_use_fp32,
moe_eval_capacity_token_fraction=args.moe_eval_capacity_token_fraction,
use_xmoe=args.use_xmoe,
)
else:
gate = Top2Gate(
self.embed_dim,
args.moe_expert_count,
args.moe_gating_use_fp32,
args.moe_second_expert_policy,
args.moe_normalize_gate_prob_before_dropping,
args.moe_eval_capacity_token_fraction,
use_xmoe=args.use_xmoe,
)
experts = make_experts(args, self.embed_dim, self.ffn_dim)
self.moe_layer = MOELayer(gate, experts, args)
self.final_layer_norm = LayerNorm(self.embed_dim, eps=args.layernorm_eps)
if args.deepnorm:
if is_encoder_decoder:
self.alpha = math.pow(3.0 * args.decoder_layers, 0.25)
else:
self.alpha = math.pow(2.0 * args.decoder_layers, 0.25)
else:
self.alpha = 1.0
def build_ffn(self, embed_dim, args):
return FeedForwardNetwork(
embed_dim,
self.ffn_dim,
args.activation_fn,
args.dropout,
args.activation_dropout,
args.layernorm_eps,
args.subln,
)
def build_self_attention(self, embed_dim, args):
return MultiheadAttention(
args,
embed_dim,
args.decoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
encoder_decoder_attention=False,
subln=args.subln,
)
def build_encoder_attention(self, embed_dim, args):
return MultiheadAttention(
args,
embed_dim,
args.decoder_attention_heads,
dropout=args.attention_dropout,
self_attention=False,
encoder_decoder_attention=True,
subln=args.subln,
)
def residual_connection(self, x, residual):
return residual * self.alpha + x
def forward(
self,
x,
encoder_out=None,
encoder_padding_mask=None,
incremental_state=None,
self_attn_mask=None,
self_attn_padding_mask=None,
self_attn_rel_pos=None,
cross_attn_rel_pos=None,
is_first_step=False,
):
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
attn_mask=self_attn_mask,
rel_pos=self_attn_rel_pos,
is_first_step=is_first_step,
)
x = self.dropout_module(x)
if self.drop_path is not None:
x = self.drop_path(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
if self.encoder_attn is not None and encoder_out is not None:
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=None,
rel_pos=cross_attn_rel_pos,
)
x = self.dropout_module(x)
if self.drop_path is not None:
x = self.drop_path(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
if not self.is_moe_layer:
x = self.ffn(x)
l_aux = None
else:
x, l_aux = self.moe_layer(x)
if self.drop_path is not None:
x = self.drop_path(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x, attn, None, l_aux
class Decoder(nn.Module):
def __init__(
self,
args,
embed_tokens=None,
embed_positions=None,
output_projection=None,
is_encoder_decoder=False,
**kwargs
):
super().__init__(**kwargs)
self.args = args
self.dropout_module = torch.nn.Dropout(args.dropout)
embed_dim = args.decoder_embed_dim
self.embed_dim = embed_dim
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
self.embed_tokens = embed_tokens
self.embed_positions = embed_positions
if (
output_projection is None
and not args.no_output_layer
and args.vocab_size > 0
):
self.output_projection = self.build_output_projection(args)
else:
self.output_projection = output_projection
if args.layernorm_embedding:
self.layernorm_embedding = LayerNorm(embed_dim, eps=args.layernorm_eps)
else:
self.layernorm_embedding = None
self.layers = nn.ModuleList([])
moe_freq = args.moe_freq
for i in range(args.decoder_layers):
is_moe_layer = moe_freq != 0 and (i + 1) % moe_freq == 0
self.layers.append(
self.build_decoder_layer(
args,
depth=i,
is_moe_layer=is_moe_layer,
is_encoder_decoder=is_encoder_decoder,
)
)
self.num_layers = len(self.layers)
if args.decoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim, eps=args.layernorm_eps)
else:
self.layer_norm = None
self.self_attn_relative_position = None
self.cross_attn_relative_position = None
if args.rel_pos_buckets > 0 and args.max_rel_pos > 0:
self.self_attn_relative_position = RelativePositionBias(
num_buckets=args.rel_pos_buckets,
max_distance=args.max_rel_pos,
n_heads=args.decoder_attention_heads,
)
if is_encoder_decoder:
self.cross_attn_relative_position = RelativePositionBias(
num_buckets=args.rel_pos_buckets,
max_distance=args.max_rel_pos,
n_heads=args.decoder_attention_heads,
)
if args.bert_init:
self.apply(init_bert_params)
if args.deepnorm:
if is_encoder_decoder:
init_scale = math.pow(12.0 * args.decoder_layers, 0.25)
else:
init_scale = math.pow(8.0 * args.decoder_layers, 0.25)
for name, p in self.named_parameters():
if (
"fc1" in name
or "fc2" in name
or "out_proj" in name
or "v_proj" in name
):
p.data.div_(init_scale)
if args.subln:
if is_encoder_decoder:
init_scale = math.sqrt(math.log(args.decoder_layers * 3))
else:
init_scale = math.sqrt(math.log(args.decoder_layers * 2))
for name, p in self.named_parameters():
if "encoder_attn" in name:
continue
if (
"fc1" in name
or "fc2" in name
or "out_proj" in name
or "v_proj" in name
):
p.data.mul_(init_scale)
def build_output_projection(
self,
args,
):
if args.share_decoder_input_output_embed:
output_projection = torch.nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
)
output_projection.weight = self.embed_tokens.weight
else:
output_projection = torch.nn.Linear(
args.decoder_embed_dim, args.vocab_size, bias=False
)
torch.nn.init.normal_(
output_projection.weight, mean=0, std=args.decoder_embed_dim**-0.5
)
return output_projection
def build_decoder_layer(
self, args, depth, is_moe_layer=False, is_encoder_decoder=False
):
layer = DecoderLayer(
args,
depth,
is_moe_layer=is_moe_layer,
is_encoder_decoder=is_encoder_decoder,
)
if args.checkpoint_activations:
layer = checkpoint_wrapper(layer)
if args.fsdp:
layer = wrap(layer)
return layer
def forward_embedding(
self,
tokens,
token_embedding=None,
incremental_state=None,
):
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
tokens, incremental_state=incremental_state
)
if incremental_state is not None and not self.is_first_step(incremental_state):
tokens = tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
if token_embedding is None:
token_embedding = self.embed_tokens(tokens)
x = embed = self.embed_scale * token_embedding
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
return x, embed
def is_first_step(self, incremental_state):
if incremental_state is None:
return False
return incremental_state.get("is_first_step", False)
def forward(
self,
prev_output_tokens,
self_attn_padding_mask=None,
encoder_out=None,
incremental_state=None,
features_only=False,
return_all_hiddens=False,
token_embeddings=None,
**kwargs
):
# embed tokens and positions
x, _ = self.forward_embedding(
prev_output_tokens, token_embeddings, incremental_state
)
is_first_step = self.is_first_step(incremental_state)
# relative position
self_attn_rel_pos_bias = None
slen = prev_output_tokens.size(1)
if self.self_attn_relative_position is not None:
self_attn_rel_pos_bias = self.self_attn_relative_position(
batch_size=x.size(0), qlen=slen, klen=slen
)
if incremental_state is not None and not is_first_step:
self_attn_rel_pos_bias = self_attn_rel_pos_bias[-1:, :, :]
cross_attn_rel_pos_bias = None
if self.cross_attn_relative_position is not None:
cross_attn_rel_pos_bias = self.cross_attn_relative_position(
batch_size=x.size(0),
qlen=slen,
klen=encoder_out["encoder_out"].size(1),
)
if incremental_state is not None and not is_first_step:
cross_attn_rel_pos_bias = cross_attn_rel_pos_bias[-1:, :, :]
# decoder layers
inner_states = [x]
if encoder_out is None:
l_aux = []
else:
l_aux = encoder_out["l_aux"] if "l_aux" in encoder_out else []
for idx, layer in enumerate(self.layers):
if incremental_state is None or is_first_step:
self_attn_mask = torch.triu(
torch.zeros([x.size(1), x.size(1)])
.float()
.fill_(float("-inf"))
.type_as(x),
1,
)
if is_first_step and incremental_state is not None:
if idx not in incremental_state:
incremental_state[idx] = {}
else:
self_attn_mask = None
if idx not in incremental_state:
incremental_state[idx] = {}
x, layer_attn, _, l_aux_i = layer(
x,
encoder_out["encoder_out"] if encoder_out is not None else None,
encoder_out["encoder_padding_mask"]
if encoder_out is not None
else None,
incremental_state[idx] if incremental_state is not None else None,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
self_attn_rel_pos=self_attn_rel_pos_bias,
cross_attn_rel_pos=cross_attn_rel_pos_bias,
is_first_step=is_first_step,
)
l_aux.append(l_aux_i)
inner_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
if not features_only:
x = self.output_layer(x)
return x, {
"inner_states": inner_states,
"l_aux": l_aux,
"attn": None,
}
def output_layer(self, features):
return self.output_projection(features) | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/architecture/decoder.py | decoder.py |
import torch
import torch.nn.functional as F
from einops import pack, rearrange, unpack
from torch import nn
from zeta.nn.utils.helpers import ( # noqa: E402
eval_decorator,
exists,
once, # noqa: F401
)
from zeta.nn.utils.inference_helpers import top_a, top_k, top_p
class AutoregressiveWrapper(nn.Module):
def __init__(
self,
net,
ignore_index = -100,
pad_value = 0,
mask_prob = 0.
):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
# paper shows masking (MLM) in conjunction with autoregressive decoder-only training leads to big improvements https://arxiv.org/abs/2210.13432
assert mask_prob < 1.
self.mask_prob = mask_prob
@torch.no_grad()
@eval_decorator
def generate(
self,
start_tokens,
seq_len,
eos_token = None,
temperature = 1.,
filter_logits_fn = top_k,
filter_thres = 0.9,
min_p_pow = 2.0,
min_p_ratio = 0.02,
**kwargs
):
start_tokens, ps = pack([start_tokens], '* n')
b, t = start_tokens.shape
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1]
if filter_logits_fn in {top_k, top_p}:
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
elif filter_logits_fn is top_a:
filtered_logits = filter_logits_fn(logits, min_p_pow = min_p_pow, min_p_ratio= min_p_ratio)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_tokens = (out == eos_token)
if is_eos_tokens.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
out, = unpack(out, ps, '* n')
return out
def forward(self, x, return_loss=True, **kwargs):
seq, ignore_index = x.shape[1], self.ignore_index
inp, target = x[:, :-1], x[:, 1:]
if self.mask_prob > 0.:
rand = torch.randn(inp.shape, device = x.device)
rand[:, 0] = -torch.finfo(rand.dtype).max # first token should not be masked out
num_mask = min(int(seq * self.mask_prob), seq - 1)
indices = rand.topk(num_mask, dim = -1).indices
mask = ~torch.zeros_like(inp).scatter(1, indices, 1.).bool()
kwargs.update(self_attn_context_mask = mask)
logits = self.net(inp, **kwargs)
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
target,
ignore_index = ignore_index
)
if return_loss:
return logits, loss
return logits | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/nn/architecture/auto_regressive_wrapper.py | auto_regressive_wrapper.py |
import logging
import torch
from transformers import CLIPProcessor, AutoTokenizer
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
class MultiModalTokenizer:
"""
A tokenizer class for the kosmos model
Attributes:
processor(CLIPProcessor): The processor to tokenize images
tokenizer: (AutoTokenizer): The tokenizer to tokenize text
im_idx: (int): The Index of the "" token.
"""
def __init__(self,
max_length: int = 8192):
self.max_length = max_length
try:
self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K")
self.tokenizer = AutoTokenizer.from_pretrained(
"EleutherAI/gpt-neox-20b",
additional_special_tokens=[""],
eos_token="<eos>",
pad_token="<pad>",
extra_ids=0,
model_max_length=self.max_length
)
except Exception as e:
logging.error(f"Failed to initialize KosmosTokenizer: {e}")
raise
self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""])
def tokenize_texts(self, texts: str):
"""
Tokenize given texts.
Args:
Texts (str): The Text to be tokenized
Returns:
A tuple containing the tokenized texts and only the text tokens.
"""
try:
texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids
# Add image tokens to text as "<s>  text </s>"
image_tokens = torch.tensor([[self.im_idx, self.im_end_idx]] * texts.shape[0])
return torch.cat([texts[:, 0:1], image_tokens, texts[:, 1:]], dim=1), texts
except Exception as e:
logging.error(f"Failed to tokenize texts: {e}")
raise
def tokenize_images(self, images):
"""
Tokenizes given images.
Args:
images: The images to be tokenized
Returns:
The tokenized images.
"""
try:
return self.processor(images=images, return_tensors="pt").pixel_values
except Exception as e:
logging.error(f"Failed to tokenize images: {e}")
raise
def tokenize(self, sample):
"""
Tokenizes given sample.
Args:
Sample: The sample to be tokenized
Returns:
A dictionary containing the tokenized text tokens, images, labels, and attention mask.
"""
try:
text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"])
attention_mask = text_tokens != self.tokenizer.pad_token_id
dummy_image_features = torch.ones((text_tokens.shape[0], 64))
attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1)
return {
"text_tokens": text_tokens,
"images": self.tokenize_images(sample["image"]),
"labels": only_text_tokens,
"attention_mask": attention_mask,
}
except Exception as e:
logging.error(f"Failed to tokenize sample: {e}")
raise | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/tokenizers/multi_modal_tokenizer.py | multi_modal_tokenizer.py |
import os
from logging import getLogger
from typing import List, Optional
from sentencepiece import SentencePieceProcessor
logger = getLogger()
class SentencePieceTokenizer:
"""
A SentencePieceTokenizer is a tokenizer that uses a pretrained SentencePiece model to convert text into tokens and vice versa.
It includes the ability to add special tokens for infilling tasks and provides functionality to encode and decode text with or without implicit leading spaces.
Parameters:
- model_path (str): Path to the pretrained SentencePiece model file.
Attributes:
- n_words (int): Vocabulary size of the SentencePiece model.
- bos_id (int): Token ID of the beginning-of-sentence (BOS) token.
- eos_id (int): Token ID of the end-of-sentence (EOS) token.
- pad_id (int): Token ID of the padding (PAD) token.
- prefix_id (int, optional): Token ID of the prefix token. Default: None.
- middle_id (int, optional): Token ID of the middle token. Default: None.
- suffix_id (int, optional): Token ID of the suffix token. Default: None.
- eot_id (int, optional): Token ID of the end-of-turn (EOT) token. Default: None.
"""
def __init__(self, model_path: str):
# reload tokenizer
assert os.path.isfile(model_path), model_path
self.sp_model = SentencePieceProcessor(model_file=model_path)
logger.info(f"Reloaded SentencePiece model from {model_path}")
# BOS / EOS token IDs
self.n_words: int = self.sp_model.vocab_size()
self.bos_id: int = self.sp_model.bos_id()
self.eos_id: int = self.sp_model.eos_id()
self.pad_id: int = self.sp_model.pad_id()
# token IDs for special infilling tokens
self.prefix_id: Optional[int] = self.sp_model.piece_to_id("β<PRE>") or None
self.middle_id: Optional[int] = self.sp_model.piece_to_id("β<MID>") or None
self.suffix_id: Optional[int] = self.sp_model.piece_to_id("β<SUF>") or None
self.eot_id: Optional[int] = self.sp_model.piece_to_id("β<EOT>") or None
logger.info(
f"#words: {self.n_words} - BOS ID: {self.bos_id} - EOS ID: {self.eos_id} "
f"- PRE ID: {self.prefix_id} - MID ID: {self.middle_id} - SUF ID: {self.suffix_id} - EOT ID: {self.eot_id}"
)
assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()
def encode(self, s: str, bos: bool, eos: bool) -> List[int]:
assert type(s) is str
t = self.sp_model.encode(s)
if bos:
t = [self.bos_id] + t
if eos:
t = t + [self.eos_id]
return t
def decode(self, t: List[int]) -> str:
return self.sp_model.decode(t)
def encode_infilling(self, s: str) -> List[int]:
"""Encode a string without an implicit leading space."""
return self.sp_model.encode("βΊ" + s)[2:]
def decode_infilling(self, t: List[int]) -> str:
"""Decode a string without an implicit leading space."""
return self.sp_model.decode([self.sp_model.piece_to_id("βΊ")] + t)[1:] | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/tokenizers/sentence_piece.py | sentence_piece.py |
import torch
import torch.nn as nn
from zeta.nn.architecture.encoder import Encoder
from zeta.utils.embedding import (
PositionalEmbedding,
TextEmbedding,
VisionEmbedding,
)
from zeta.utils.module.multiway_network import MutliwayEmbedding
class BEiT3(nn.Module):
def __init__(self, args, **kwargs):
super().__init__()
self.args = args
assert args.multiway
assert args.vocab_size > 0
assert not args.share_encoder_input_output_embed
self.text_embed = TextEmbedding(args.vocab_size, args.encoder_embed_dim)
self.vision_embed = VisionEmbedding(
args.img_size,
args.patch_size,
args.in_chans,
args.encoder_embed_dim,
contain_mask_token=True,
prepend_cls_token=True,
)
# being consistent with Fairseq, which starts from 2 for position embedding
embed_positions = MutliwayEmbedding(
modules=[
PositionalEmbedding(self.vision_embed.num_position_embeddings() + 2, args.encoder_embed_dim),
PositionalEmbedding(args.max_source_positions, args.encoder_embed_dim),
],
dim=1,
)
self.encoder = Encoder(
args,
embed_tokens=None,
embed_positions=embed_positions,
output_projection=None,
is_encoder_decoder=False,
)
def forward(
self,
textual_tokens=None,
visual_tokens=None,
text_padding_position=None,
attn_mask=None,
vision_masked_position=None,
incremental_state=None,
positions=None,
):
assert textual_tokens is not None or visual_tokens is not None
if textual_tokens is None:
x = self.vision_embed(visual_tokens, vision_masked_position)
encoder_padding_mask = None
multiway_split_position = -1
elif visual_tokens is None:
x = self.text_embed(textual_tokens)
encoder_padding_mask = text_padding_position
multiway_split_position = 0
else:
x1 = self.vision_embed(visual_tokens, vision_masked_position)
multiway_split_position = x1.size(1)
x2 = self.text_embed(textual_tokens)
x = torch.cat([x1, x2], dim=1)
if text_padding_position is not None:
encoder_padding_mask = torch.cat(
[
torch.zeros(x1.shape[:-1]).to(x1.device).bool(),
text_padding_position,
],
dim=1,
)
else:
encoder_padding_mask = None
encoder_out = self.encoder(
src_tokens=None,
encoder_padding_mask=encoder_padding_mask,
attn_mask=attn_mask,
token_embeddings=x,
multiway_split_position=multiway_split_position,
incremental_state=incremental_state,
positions=positions,
)
encoder_out["multiway_split_position"] = multiway_split_position
return encoder_out | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/models/BEiT3.py | BEiT3.py |
import torch
from zeta import DecoderConfig, Decoder
from zeta.utils.embedding import PositionalEmbedding
from transformers import CLIPProcessor, CLIPModel, AutoTokenizer
from flamingo_pytorch import PerceiverResampler
from torch.nn import Module
import bitsandbytes
class KosmosTokenizer:
def __init__(self):
self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K")
self.tokenizer = AutoTokenizer.from_pretrained(
"EleutherAI/gpt-neox-20b",
additional_special_tokens=[""],
eos_token ="<eos>",
pad_token="<pad>",
extra_ids=0,
model_max_length=8192
)
self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""])
def tokenize_texts(self, texts):
texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids
# Add image tokens to text as "<s>  text </s>"
image_tokens = torch.tensor([[self.im_idx, self.im_end_idx]] * texts.shape[0])
return torch.cat([texts[:, 0:1], image_tokens, texts[:, 1:]], dim=1), texts
def tokenize_images(self, images):
return self.processor(images=images, return_tensors="pt").pixel_values
def tokenize(self, sample):
text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"])
attention_mask = text_tokens != self.tokenizer.pad_token_id
dummy_image_features = torch.ones((text_tokens.shape[0], 64))
attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1)
return {
"text_tokens": text_tokens,
"images": self.tokenize_images(sample["image"]),
"labels": only_text_tokens,
"attention_mask": attention_mask,
}
class Kosmos(Module):
def __init__(self):
super().__init__()
# Instantiate Clip Vit-l/14
self.clip_model = CLIPModel.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K").vision_model
self.embed = bitsandbytes.nn.modules.Embedding(
32002,
2048,
padding_idx=1
)
self.embed_positions= PositionalEmbedding(
2048,
2048,
1
)
self.output_projection = torch.nn.Linear(
2048, 32002, bias=False
)
torch.nn.init.normal_(
self.output_projection.weight, mean=0, std=2048**-0.5
)
# Config following KOSMOS-1 paper (https://arxiv.org/pdf/2302.14045.pdf)
self.config = DecoderConfig(
decoder_layers=24,
decoder_embed_dim=2048,
decoder_ffn_embed_dim=8192,
decoder_attention_heads=32,
dropout=0.1,
activation_fn="gelu",
attention_dropout=0.1,
vocab_size=64007,
subln=True,
xpos_rel_pos=True,
multiway=True,
max_rel_pos=2048,
)
self.decoder = Decoder(
self.config,
embed_tokens=self.embed,
embed_positions=self.embed_positions,
output_projection=self.output_projection
)
self.perceive = PerceiverResampler(
dim = 1024,
depth = 2,
dim_head = 64,
heads = 8,
num_latents = 64,
num_media_embeds = 257
)
self.image_proj = torch.nn.Linear(1024, 2048, bias=False)
torch.nn.init.normal_(
self.image_proj.weight, mean=0, std=2048**-0.5
)
def forward(self, text_tokens, images, **kwargs):
images = self.clip_model(pixel_values=images)["last_hidden_state"]
images = self.perceive(images).squeeze(1)
images = self.image_proj(images)
model_input = self.decoder.forward_embedding(text_tokens)[1]
model_input = torch.cat([model_input[:, 0:2], images, model_input[:, 2:]], dim=1)
model_input = self.decoder.forward_embedding(model_input, token_embedding=model_input)[0]
return self.decoder(model_input, passed_x=model_input)[0] | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/models/kosmos.py | kosmos.py |
import torch
from zeta.nn.architecture.auto_regressive_wrapper import AutoregressiveWrapper
from zeta.nn.architecture.transformer import (
Decoder,
Encoder,
Transformer,
ViTransformerWrapper,
)
class PalmE(torch.nn.Module):
def __init__(self,
image_size=256,
patch_size=32,
encoder_dim=512,
encoder_depth=6,
encoder_heads=8,
num_tokens=20000,
max_seq_len=1024,
decoder_dim=512,
decoder_depth=6,
decoder_heads=8,
alibi_num_heads=4,
use_abs_pos_emb=False,
cross_attend=True,
alibi_pos_bias=True,
rotary_xpos=True,
attn_flash=True,
qk_norm=True):
super(PalmE, self).__init__()
self.encoder = ViTransformerWrapper(
image_size=image_size,
patch_size=patch_size,
attn_layers=Encoder(
dim=encoder_dim,
depth=encoder_depth,
heads=encoder_heads
)
)
self.decoder = Transformer(
num_tokens=num_tokens,
max_seq_len=max_seq_len,
use_abs_pos_emb=use_abs_pos_emb,
attn_layers=Decoder(
dim=decoder_dim,
depth=decoder_depth,
heads=decoder_heads,
cross_attend=cross_attend,
alibi_pos_bias=alibi_pos_bias,
alibi_num_heads=alibi_num_heads,
rotary_xpos=rotary_xpos,
attn_flash=attn_flash,
qk_norm=qk_norm,
)
)
self.decoder = AutoregressiveWrapper(self.decoder)
def forward(self, img, text):
try:
encoded = self.encoder(img, return_embeddings=True)
return self.decoder(text, context=encoded)
except Exception as error:
print(f"Failed in forward method: {error}")
raise | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/models/palme.py | palme.py |
from torch.nn import Module
from zeta.nn.architecture.auto_regressive_wrapper import AutoregressiveWrapper
from zeta.nn.architecture.transformer import (
Decoder,
Transformer,
)
class Andromeda(Module):
"""
Andromeda is a transformer-based model architecture. It initializes with
a Transformer and AutoregressiveWrapper with default or user-specified parameters.
"""
def __init__(self,
num_tokens=50432,
max_seq_len=8192,
dim=2560,
depth=32,
dim_head=128,
heads=24,
use_abs_pos_emb=False,
alibi_pos_bias=True,
alibi_num_heads=12,
rotary_xpos=True,
attn_flash=True,
attn_kv_heads = 2,
qk_norm=True,
attn_qk_norm=True,
attn_qk_norm_dim_scale=True, ):
"""
Initialize the model with specified or default parameters.
Args:
- num_tokens: Number of tokens in the vocabulary
- max_seq_len: Maximum sequence length
- dim: Dimension of the model
- depth: Depth of the model
- dim_head: Dimension of the model head
- heads: Number of heads
- use_abs_pos_emb: Whether to use absolute position embedding
- alibi_pos_bias: Alibi position bias
- alibi_num_heads: Number of alibi heads
- rotary_xpos: Rotary position
- attn_flash: Attention flash
- deepnorm: Deep normalization
- shift_tokens: Number of tokens to shift
- attn_one_kv_head: Attention one key/value head
- qk_norm: Query-key normalization
- attn_qk_norm: Attention query-key normalization
- attn_qk_norm_dim_scale: Attention query-key normalization dimension scale
- embedding_provider: Embedding provider module
"""
super().__init__()
try:
self.Andromeda = Transformer(
num_tokens=num_tokens,
max_seq_len=max_seq_len,
use_abs_pos_emb=use_abs_pos_emb,
attn_layers=Decoder(
dim=dim,
depth=depth,
dim_head=dim_head,
heads=heads,
alibi_pos_bias=alibi_pos_bias,
alibi_num_heads=alibi_num_heads,
rotary_xpos=rotary_xpos,
attn_flash=attn_flash,
attn_kv_heads=attn_kv_heads,
qk_norm=qk_norm,
attn_qk_norm=attn_qk_norm,
attn_qk_norm_dim_scale=attn_qk_norm_dim_scale
)
)
self.decoder = AutoregressiveWrapper(self.Andromeda)
except Exception as e:
print("Failed to initialize Andromeda: ", e)
raise
def forward(self, text_tokens, **kwargs):
"""
Forward pass through the model. It expects the input text_tokens.
Args:
- text_tokens: Input tokens
- kwargs: Other arguments
Returns:
- output from the decoder
"""
try:
model_input = self.decoder.forward(text_tokens)[0]
return self.decoder(model_input, padded_x=model_input[0])
except Exception as e:
print("Failed in forward method: ", e)
raise | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/models/andromeda.py | andromeda.py |
import torch
from torch import nn
from zeta.nn.architecture.transformer import (
Decoder,
Encoder,
Transformer,
ViTransformerWrapper,
)
from zeta.nn.architecture.auto_regressive_wrapper import AutoregressiveWrapper
class GPT4(nn.Module):
"""
GPT4 is a transformer-based model architecture. It initializes with
a Transformer and AutoregressiveWrapper with default or user-specified parameters.
Initialize the model with specified or default parameters.
Args:
- num_tokens: Number of tokens in the vocabulary
- max_seq_len: Maximum sequence length
- dim: Dimension of the model
- depth: Depth of the model
- dim_head: Dimension of the model head
- heads: Number of heads
- use_abs_pos_emb: Whether to use absolute position embedding
- alibi_pos_bias: Alibi position bias
- alibi_num_heads: Number of alibi heads
- rotary_xpos: Rotary position
- attn_flash: Attention flash
- deepnorm: Deep normalization
- shift_tokens: Number of tokens to shift
- attn_one_kv_head: Attention one key/value head
- qk_norm: Query-key normalization
- attn_qk_norm: Attention query-key normalization
- attn_qk_norm_dim_scale: Attention query-key normalization dimension scale
- embedding_provider: Embedding provider module
"""
def __init__(self,
num_tokens=50432,
max_seq_len=8192,
dim=2560,
depth=32,
dim_head=128,
heads=24,
use_abs_pos_emb=False,
alibi_pos_bias=True,
alibi_num_heads=12,
rotary_xpos=True,
attn_flash=True,
attn_one_kv_head=True, # multiquery attention
qk_norm=True,
attn_qk_norm=True,
attn_qk_norm_dim_scale=True,
):
super().__init__()
try:
self.decoder = Transformer(
num_tokens=num_tokens,
max_seq_len=max_seq_len,
use_abs_pos_emb=use_abs_pos_emb,
attn_layers=Decoder(
dim=dim,
depth=depth,
dim_head=dim_head,
heads=heads,
alibi_pos_bias=alibi_pos_bias,
alibi_num_heads=alibi_num_heads,
rotary_xpos=rotary_xpos,
attn_flash=attn_flash,
attn_one_kv_head=attn_one_kv_head,
qk_norm=qk_norm,
attn_qk_norm=attn_qk_norm,
attn_qk_norm_dim_scale=attn_qk_norm_dim_scale
)
)
self.decoder = AutoregressiveWrapper(self.decoder)
except Exception as e:
print("Failed to initialize Andromeda: ", e)
raise
def forward(self, text_tokens, **kwargs):
try:
model_input = self.decoder.forward(text_tokens)[0]
return self.decoder(model_input, padded_x=model_input[0])
except Exception as e:
print("Failed in forward method: ", e)
raise
class GPT4MultiModal(torch.nn.Module):
def __init__(self,
image_size=256,
patch_size=32,
encoder_dim=512,
encoder_depth=6,
encoder_heads=8,
num_tokens=20000,
max_seq_len=1024,
decoder_dim=512,
decoder_depth=6,
decoder_heads=8,
alibi_num_heads=4,
use_abs_pos_emb=False,
cross_attend=True,
alibi_pos_bias=True,
rotary_xpos=True,
attn_flash=True,
qk_norm=True):
super(GPT4MultiModal, self).__init__()
self.encoder = ViTransformerWrapper(
image_size=image_size,
patch_size=patch_size,
attn_layers=Encoder(
dim=encoder_dim,
depth=encoder_depth,
heads=encoder_heads
)
)
self.decoder = Transformer(
num_tokens=num_tokens,
max_seq_len=max_seq_len,
use_abs_pos_emb=use_abs_pos_emb,
attn_layers=Decoder(
dim=decoder_dim,
depth=decoder_depth,
heads=decoder_heads,
cross_attend=cross_attend,
alibi_pos_bias=alibi_pos_bias,
alibi_num_heads=alibi_num_heads,
rotary_xpos=rotary_xpos,
attn_flash=attn_flash,
qk_norm=qk_norm,
)
)
def forward(self, img, text):
try:
encoded = self.encoder(img, return_embeddings=True)
return self.decoder(text, context=encoded)
except Exception as error:
print(f"Failed in forward method: {error}")
raise | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/models/gpt4.py | gpt4.py |
import math
import os
from datetime import timedelta
import torch
from accelerate import Accelerator
from accelerate.utils import InitProcessGroupKwargs
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import default_data_collator, set_seed
from zeta.training.dataloader import build_dataloaders, build_pre_tokenized
from zeta.training.fsdp import fsdp
from zeta.training.optimizers.decoupled_optimizer import decoupled_optimizer
from zeta.training.scheduler import get_lr_scheduler_with_warmup
from zeta.training.activation_checkpoint import activation_checkpointing
def print_num_params(model, accelerator: Accelerator):
# n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
accelerator.print(f"Number of parameters in model: {n_params}")
def Trainer(
gradient_accumulate_every: int = None,
batch_size: int = None,
seq_len: int = None,
entity_name: str = None,
model = None,
use_fsdp: bool = False,
use_activation_checkpointing: bool = False,
learning_rate = None,
seed = None,
use_pretokenized: bool = False,
resume_from_checkpoint = None,
checkpointing_steps = None,
output_dir = None,
weight_decay = None,
use_deepspeed = None
):
# accelerator
timeout = InitProcessGroupKwargs(timeout=timedelta(seconds=1_000_000))
accelerator = Accelerator(
gradient_accumulation_steps=gradient_accumulate_every,
mixed_precision="fp16",
log_with="wandb",
kwargs_handlers=[timeout],
)
# AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu'] = 4 #??????
accelerator.init_trackers(
project_name="LongNet",
config={
"batch_size": batch_size,
"gradient_accumulate_every": gradient_accumulate_every,
"learning_rate": learning_rate,
"seq_len": seq_len,
},
init_kwargs={"wandb": {"entity": entity_name}},
)
accelerator.print(f"Total GPUS: {accelerator.num_processes}")
# set seed
set_seed(seed)
model = model().to(accelerator.device)
print_num_params(model, accelerator)
if use_fsdp:
model = fsdp(
model,
mp="fp16",
shard_strat="SHARD_GRAD"
)
if use_activation_checkpointing:
activation_checkpointing(model, accelerator)
model = accelerator.prepare(model)
# dataloaders
if use_pretokenized:
train_dataset = build_pre_tokenized()
else:
train_dataset = build_dataloaders()
train_loader = DataLoader(
train_dataset, batch_size=batch_size, collate_fn=default_data_collator,
)
# optimizer
optim = decoupled_optimizer(
model=model,
learning_rate=learning_rate,
weight_decay=weight_decay,
beta_1=0.90,
beta_2=0.95,
optimizer_type='Adam8bit',
use_fsdp=True,
accelerator=accelerator
)
# Determine number of training steps
max_train_steps = math.ceil(len(train_loader) / gradient_accumulate_every)
accelerator.print(f"Max train steps: {max_train_steps}")
# lr scheduler
NUM_WARMUP_STEPS = int(max_train_steps * 0.01)
accelerator.print(f"Num warmup steps: {NUM_WARMUP_STEPS}")
lr_scheduler = get_lr_scheduler_with_warmup(
optimizer=optim,
scheduler_type="cosine",
num_warmup_steps=NUM_WARMUP_STEPS,
max_train_steps=max_train_steps,
grad_accumulate_every=gradient_accumulate_every,
)
# prepare
optim, train_loader, lr_scheduler = accelerator.prepare(
optim, train_loader, lr_scheduler
)
# checkpoint scheduler
accelerator.register_for_checkpointing(lr_scheduler)
# I do not know why Huggingface recommends recalculation of max_train_steps
max_train_steps = math.ceil(len(train_loader) / gradient_accumulate_every)
accelerator.print(f"Max train steps recalculated: {max_train_steps}")
# Total batch size for logging
total_batch_size = (
batch_size * accelerator.num_processes * gradient_accumulate_every
)
accelerator.print(f"Total batch size: {total_batch_size}")
# resume training
progress_bar = tqdm(
range(max_train_steps), disable=not accelerator.is_local_main_process
)
completed_steps = 0
if resume_from_checkpoint:
if resume_from_checkpoint is not None or resume_from_checkpoint != "":
accelerator.print(f"Resuming from checkpoint {resume_from_checkpoint}")
accelerator.load_state(resume_from_checkpoint)
path = os.path.basename(resume_from_checkpoint)
training_difference = os.path.splitext(path)[0]
# need to multiply `gradient_accumulation_steps` to reflect real steps
resume_step = (
int(training_difference.replace("step_", ""))
* gradient_accumulate_every
)
if resume_from_checkpoint and resume_step is not None:
train_loader = accelerator.skip_first_batches(train_loader, resume_step)
completed_steps += resume_step
progress_bar.update(resume_step)
# training
model.train()
for step, batch in enumerate(train_loader):
with accelerator.accumulate(model):
inputs = batch["input_ids"].to(accelerator.device)
loss = model(inputs, return_loss=True)
accelerator.backward(loss)
accelerator.log({"loss": loss.item()}, step=step)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(model.parameters(), 1.0)
optim.step()
lr_scheduler.step()
optim.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
completed_steps += 1
if isinstance(checkpointing_steps, int):
if completed_steps % checkpointing_steps == 0:
output_dir = f"step_{completed_steps }"
if output_dir is not None:
output_dir = os.path.join(output_dir, output_dir)
accelerator.save_state(output_dir)
if completed_steps >= max_train_steps:
break
# end training
# accelerator.print(f"Training Finished")
accelerator.end_training()
# save final model
# accelerator.print(f"Saving model to {output_dir}")
if output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
with accelerator.main_process_first():
accelerator.save(
unwrapped_model.state_dict(), f"{output_dir}/final/final_model.pt"
)
def train(
MASTER_ADDR = None,
MASTER_PORT = None,
RANK = None,
WORLD_SIZE = None):
os.environ['MASTER_ADDR'] or MASTER_ADDR # = 'localhost'
os.environ['MASTER_PORT'] or MASTER_PORT #= '9994'
# # [CRITICAL] Pay attention to this when scaling to multiple GPUs and clusters
# # Pay attention to this, use "accelerate config"
os.environ['RANK'] or RANK #= str(0) # Number of nodes (servers)
os.environ['WORLD_SIZE'] or WORLD_SIZE #= str(torch.cuda.device_count())
torch.distributed.init_process_group()
Trainer() | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/training/train.py | train.py |
import torch
from accelerate import Accelerator
from transformers import (get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup)
def get_lr_scheduler_with_warmup(
optimizer: torch.optim.Optimizer,
scheduler_type: str,
num_warmup_steps: int,
max_train_steps: int,
grad_accumulate_every: int = 1,
accelerator: Accelerator = None,
):
"""
Get a learning rate scheduler with warmup.
Args:
optimizer (Optimizer): The optimizer for which to create the learning rate scheduler.
scheduler_type (str): The type of learning rate scheduler to create, either "linear" or "cosine".
num_warmup_steps (int): The number of warmup steps for the learning rate scheduler.
max_train_steps (int): The maximum number of training steps.
grad_accumulate_every (int, optional): The gradient accumulation factor. Defaults to 1.
accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None.
Returns:
The learning rate scheduler with warmup.
Raises:
ValueError: If scheduler_type is not "linear" or "cosine".
"""
NUM_WARMUP_STEPS = num_warmup_steps
GRADIENT_ACCUMULATE_EVERY = grad_accumulate_every
if accelerator is not None:
accelerator.print(f"Using {scheduler_type} lr scheduler")
if scheduler_type == "linear":
return get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
elif scheduler_type == "cosine":
return get_cosine_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'linear' or 'cosine', got: {}".format(
scheduler_type
)
) | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/training/scheduler.py | scheduler.py |
from functools import partial
import torch
from torch.distributed.fsdp import (
FullyShardedDataParallel,
MixedPrecision,
BackwardPrefetch,
ShardingStrategy,
)
from torch.distributed.fsdp.wrap import (
transformer_auto_wrap_policy
)
def fsdp(
model: torch.nn.Module,
auto_wrap: bool = False,
mp: str = "fp32",
shard_strat: str = "NO_SHARD",
TransformerBlock = None
):
"""
This function wraps a given PyTorch model with the FullyShardedDataParallel (FSDP) wrapper to enable efficient data parallelism and model sharding.
Args:
model (torch.nn.Module): The original PyTorch model to be wrapped with FSDP.
auto_wrap (bool, optional): If True, it enables automatic wrapping of the model's layers according to the transformer_auto_wrap_policy. Default is False.
mp (str, optional): The mixed precision mode to be used. Can be 'bf16' for BFloat16, 'fp16' for Float16 or 'fp32' for Float32 precision. Default is 'fp32'.
shard_strat (str, optional): The sharding strategy to be used. Can be 'SHARD_GRAD' for sharding at gradient computation, 'FULL_SHARD' for full model sharding or 'NO_SHARD' for no sharding. Default is 'NO_SHARD'.
Raises:
ValueError: If the provided mp (mixed precision mode) is not 'bf16', 'fp16' or 'fp32'.
ValueError: If the provided shard_strat (sharding strategy) is not 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD'.
Returns:
torch.nn.Module: The input model wrapped with FSDP.
"""
if auto_wrap:
LongNet_auto_wrap_policy = partial(
transformer_auto_wrap_policy,
transformer_layer_cls={
TransformerBlock,
},
)
else:
LongNet_auto_wrap_policy = None
if mp == "bf16":
mp_fsdp = MixedPrecision(
param_dtype=torch.bfloat16,
# Gradient communication precision.
reduce_dtype=torch.bfloat16,
# Buffer precision.
buffer_dtype=torch.bfloat16,
)
elif mp == "fp16":
mp_fsdp = MixedPrecision(
param_dtype=torch.float16,
# Gradient communication precision.
reduce_dtype=torch.float16,
# Buffer precision.
buffer_dtype=torch.float16,
)
elif mp == "fp32":
mp_fsdp = MixedPrecision(
param_dtype=torch.float32,
# Gradient communication precision.
reduce_dtype=torch.float32,
# Buffer precision.
buffer_dtype=torch.float32,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'bf16', 'fp16' or 'fp32', got: {}".format(
mp
)
)
if shard_strat == "SHARD_GRAD":
sharding_strat_fsdp = ShardingStrategy.SHARD_GRAD_OP
elif shard_strat == "FULL_SHARD":
sharding_strat_fsdp = ShardingStrategy.FULL_SHARD
elif shard_strat == "NO_SHARD":
sharding_strat_fsdp = ShardingStrategy.NO_SHARD
else:
raise ValueError(
"Invalid scheduler_type. Expected 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD', got: {}".format(
shard_strat
)
)
model = FullyShardedDataParallel(
model,
auto_wrap_policy=LongNet_auto_wrap_policy,
mixed_precision=mp_fsdp,
backward_prefetch=BackwardPrefetch.BACKWARD_PRE,
sharding_strategy=sharding_strat_fsdp,
forward_prefetch=True,
use_orig_params=True,
)
return model | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/training/fsdp.py | fsdp.py |
from itertools import chain
from datasets import load_dataset
from transformers import (AutoTokenizer)
def build_dataloaders(
seq_len: int = None,
num_cpu: int = None
):
"""
Build data loaders for training.
This function performs the following steps:
1. Load the tokenizer from the pretrained "EleutherAI/gpt-neox-20b" model.
2. Load the "openwebtext" dataset.
3. Tokenize the dataset, adding the end-of-sentence token to each text.
4. Process the tokenized dataset into chunks of a specified block size.
Returns:
Dataset: The processed dataset ready for training.
"""
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
dataset = load_dataset("openwebtext", split="train")
tokenized_dataset = dataset.map(
lambda example: tokenizer([t + tokenizer.eos_token for t in example["text"]]),
batched=True,
num_proc=seq_len,
remove_columns=["text"],
)
block_size = seq_len
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
return result
train_dataset = tokenized_dataset.map(
group_texts, batched=True, num_proc=num_cpu,
)
return train_dataset
def build_pre_tokenized(
dataset_name: str = None
):
d0 = load_dataset(dataset_name)
# d1 = load_dataset("conceptofmind/c4_21-to-40_neox_with_eos_8k", split="train")
# d2 = load_dataset("conceptofmind/c4_41-to-60_neox_with_eos_8k", split="train")
# d3 = load_dataset("conceptofmind/c4_61-to-80_neox_with_eos_8k", split="train")
# d4 = load_dataset("conceptofmind/c4_81-to-100_neox_with_eos_8k", split="train")
# train_dataset = concatenate_datasets([d0, d1, d2, d3, d4])
return d0 | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/training/dataloader.py | dataloader.py |
import torch
class StableAdamWUnfused(torch.optim.Optimizer):
def __init__(
self,
params,
lr=0.002,
weight_decay=0.2,
betas=(0.9, 0.99),
eps=1e-8,
clip_thresh=1.0,
precision="amp_bfloat16",
custom_scalar=65536,
):
beta1, beta2 = betas[0], betas[1]
defaults = dict(lr=lr, weight_decay=weight_decay, beta1=beta1, beta2=beta2)
super(StableAdamWUnfused, self).__init__(params, defaults)
self.eps = eps
self.d = clip_thresh
# Set precision to "custom_fp16" if you want to use a fixed loss scalar, custom_scalar, which is divided out in the update step.
# If you do this, call (custom_scalar * loss).backward() instead of loss.backward().
self.precision = precision
self.custom_scaler = custom_scalar
for group in self.param_groups:
group["step"] = 1.0
print("Using StableAdamWUnfused-v1")
def __setstate__(self, state):
super(StableAdamWUnfused, self).__setstate__(state)
def step(self, closure=None):
if closure is not None:
closure()
for group in self.param_groups:
lr = group["lr"]
weight_decay = group["weight_decay"]
beta1 = group["beta1"]
beta2 = group["beta2"]
step = group["step"]
for p in group["params"]:
if p.grad is None:
continue
theta = p.data
param_state = self.state[p]
if self.precision == "custom_fp16":
g = p.grad.data / self.custom_scaler
if torch.any(torch.isnan(g) | torch.isinf(g)):
continue
else:
g = p.grad.data
if "exp_avg" not in param_state:
v = param_state["exp_avg"] = torch.zeros_like(theta)
u = param_state["exp_avg_sq"] = torch.zeros_like(theta)
else:
v = param_state["exp_avg"]
u = param_state["exp_avg_sq"]
beta1hat = beta1 * (1 - beta1 ** (step - 1)) / (1 - beta1**step)
beta2hat = beta2 * (1 - beta2 ** (step - 1)) / (1 - beta2**step)
v = v.mul_(beta1hat).add_(g, alpha=1.0 - beta1hat)
u = u.mul_(beta2hat).addcmul_(g, g, value=1.0 - beta2hat)
denominator = u.sqrt().add_(self.eps)
# StableAdamW = AdamW + update clipping (https://arxiv.org/abs/1804.04235) applied tensor-wise.
rms = (
torch.div(
g.pow(2), torch.maximum(u, (self.eps**2) * torch.ones_like(u))
)
.mean()
.sqrt()
.item()
)
theta = theta.mul_(1.0 - lr * weight_decay).addcdiv_(
v, denominator, value=-lr * (1.0 / max(1.0, rms / self.d))
)
# save current params
param_state["exp_avg"] = v
param_state["exp_avg_sq"] = u
group["step"] = step + 1 | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/training/optimizers/stable_adam.py | stable_adam.py |
import torch
from torch import Tensor
from torch.optim.optimizer import Optimizer
from typing import List
class SophiaG(Optimizer):
"""
SophiaG optimizer class.
"""
def __init__(self, params, lr=1e-4, betas=(0.965, 0.99), rho = 0.04,
weight_decay=1e-1, *, maximize: bool = False,
capturable: bool = False, dynamic: bool = False):
"""
Initialize the optimizer.
"""
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= rho:
raise ValueError("Invalid rho parameter at index 1: {}".format(rho))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, rho=rho,
weight_decay=weight_decay,
maximize=maximize, capturable=capturable, dynamic=dynamic)
super(SophiaG, self).__init__(params, defaults)
def __setstate__(self, state):
"""
Set the state of the optimizer.
"""
super().__setstate__(state)
for group in self.param_groups:
group.setdefault('maximize', False)
group.setdefault('capturable', False)
group.setdefault('dynamic', False)
state_values = list(self.state.values())
step_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['step'])
if not step_is_tensor:
for s in state_values:
s['step'] = torch.tensor(float(s['step']))
@torch.no_grad()
def update_hessian(self):
"""
Update the hessian.
"""
for group in self.param_groups:
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
if len(state) == 0:
state['step'] = torch.zeros((1,), dtype=torch.float, device=p.device) \
if self.defaults['capturable'] else torch.tensor(0.)
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['hessian'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if 'hessian' not in state.keys():
state['hessian'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['hessian'].mul_(beta2).addcmul_(p.grad, p.grad, value=1 - beta2)
@torch.no_grad()
def update_exp_avg(self):
"""
Update the exponential average.
"""
for group in self.param_groups:
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
state['exp_avg'].mul_(beta1).add_(p.grad, alpha=1 - beta1)
@torch.no_grad()
def step(self, closure=None, bs=5120):
"""
Perform a step of the optimizer.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
self.update_hessian()
self.update_exp_avg()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
state_steps = []
hessian = []
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('Hero does not support sparse gradients')
grads.append(p.grad)
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = torch.zeros((1,), dtype=torch.float, device=p.device) \
if self.defaults['capturable'] else torch.tensor(0.)
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['hessian'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if 'hessian' not in state.keys():
state['hessian'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
state_steps.append(state['step'])
hessian.append(state['hessian'])
if self.defaults['capturable']:
bs = torch.ones((1,), dtype=torch.float, device=p.device) * bs
self._sophiag(params_with_grad,
grads,
exp_avgs,
hessian,
state_steps,
bs=bs,
beta1=beta1,
beta2=beta2,
rho=group['rho'],
lr=group['lr'],
weight_decay=group['weight_decay'],
maximize=group['maximize'],
capturable=group['capturable'])
return loss
def _sophiag(self, params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
hessian: List[Tensor],
state_steps: List[Tensor],
capturable: bool = False,
*,
bs: int,
beta1: float,
beta2: float,
rho: float,
lr: float,
weight_decay: float,
maximize: bool):
"""
SophiaG function.
"""
if not all(isinstance(t, torch.Tensor) for t in state_steps):
raise RuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors")
self._single_tensor_sophiag(params,
grads,
exp_avgs,
hessian,
state_steps,
bs=bs,
beta1=beta1,
beta2=beta2,
rho=rho,
lr=lr,
weight_decay=weight_decay,
maximize=maximize,
capturable=capturable)
def _single_tensor_sophiag(self, params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
hessian: List[Tensor],
state_steps: List[Tensor],
*,
bs: int,
beta1: float,
beta2: float,
rho: float,
lr: float,
weight_decay: float,
maximize: bool,
capturable: bool):
"""
SophiaG function for single tensor.
"""
for i, param in enumerate(params):
grad = grads[i] if not maximize else -grads[i]
exp_avg = exp_avgs[i]
hess = hessian[i]
step_t = state_steps[i]
if capturable:
assert param.is_cuda and step_t.is_cuda and bs.is_cuda
if torch.is_complex(param):
grad = torch.view_as_real(grad)
exp_avg = torch.view_as_real(exp_avg)
hess = torch.view_as_real(hess)
param = torch.view_as_real(param)
# update step
step_t += 1
# Perform stepweight decay
param.mul_(1 - lr * weight_decay)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
if capturable:
step_size = lr
step_size_neg = step_size.neg()
ratio = (exp_avg.abs() / (rho * bs * hess + 1e-15)).clamp(None,1)
param.addcmul_(exp_avg.sign(), ratio, value=step_size_neg)
else:
step_t.item()
step_size_neg = - lr
ratio = (exp_avg.abs() / (rho * bs * hess + 1e-15)).clamp(None,1)
param.addcmul_(exp_avg.sign(), ratio, value=step_size_neg) | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/training/optimizers/decoupled_sophia.py | decoupled_sophia.py |
import bitsandbytes as bnb
import torch
from accelerate import Accelerator
from lion_pytorch import Lion
from torch.nn import LayerNorm
from torch.optim import AdamW
from zeta.training.optimizers.stable_adam import StableAdamWUnfused
def decoupled_optimizer(
model: torch.nn.Module,
learning_rate: float,
weight_decay: float,
beta_1: float,
beta_2: float,
optimizer_type: str,
use_fsdp: bool = True,
accelerator: Accelerator = None,
):
"""
Decouples the optimizer from the training process.
This function sets up the optimizer for the model by creating two groups of parameters:
one for weight decay and one without weight decay. Then, it initializes the optimizer
with these two groups of parameters.
Args:
model (Module): The model whose parameters are optimized.
learning_rate (float): The learning rate for the optimizer.
weight_decay (float): The weight decay for the optimizer.
beta_1 (float): The exponential decay rate for the 1st moment estimates.
beta_2 (float): The exponential decay rate for the 2nd moment estimates.
optimizer_type (str): The type of the optimizer. Can be 'lion', 'adamw', or 'stable_adamw'.
use_fsdp (bool, optional): If True, the optimizer will work with fully sharded data parallelism. Defaults to True.
accelerator (Accelerator, optional): The accelerator from HuggingFace's Accelerate library. Defaults to None.
Returns:
Optimizer: The initialized optimizer.
Raises:
ValueError: If the optimizer type is not 'lion', 'adamw' or 'stable_adamw'.
"""
accelerator.print(f"Using {optimizer_type} optimizer")
# Create an empty dictionary called param_dict to store the model's named parameters.
param_dict = {}
# Iterate over the model's named parameters and populate the param_dict with key-value pairs.
for param_name, param in model.named_parameters():
param_dict[param_name] = param
# Separate the model's named modules into two groups: decay and no_decay.
# Create an empty list to store the names of the LayerNorm and Embedding layer weights with no weight decay.
no_decay = []
if use_fsdp:
exclude_module = "_fsdp_wrapped_module.token_emb"
else:
exclude_module = "token_emb"
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of any of the desired types (LayerNorm or torch.nn.Embedding).
for ndim in [LayerNorm, torch.nn.Embedding]:
if isinstance(module, ndim):
# If torch.nn.Embedding, append its name with a ".weight" suffix to the no_decay list.
if module_name == exclude_module:
no_decay.append(f"{module_name}.weight")
else:
# If the module is an instance of LayerNorm
no_decay.append(f"{module_name}.gamma")
# Exit the inner loop since the desired module has been found.
break
# Create an empty list to store the names of the Linear layer weights with weight decay.
decay = []
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of the desired type (torch.nn.Linear).
for ndim in [torch.nn.Linear]:
if isinstance(module, ndim):
# If the module is an instance of torch.nn.Linear, append its name with a ".weight" suffix to the decay list.
decay.append(f"{module_name}.weight")
# Exit the inner loop since the desired module has been found.
break
# Create two separate lists of model parameters: decay_param and no_decay_param.
# The decay_param list contains the parameters that should have weight decay applied.
# The no_decay_param list contains the parameters that should not have weight decay applied, excluding the 'to_logits.weight' parameter.
# Create an empty list called decay_param to store the parameters with weight decay.
decay_param = []
if use_fsdp:
exclude_param = "_fsdp_wrapped_module.to_logits.weight"
else:
exclude_param = "to_logits.weight"
# Iterate over the decay list, which contains the names of the parameters with weight decay.
for param in decay:
# Check if the current parameter is not 'to_logits.weight'.
# Append the corresponding parameter from param_dict to the decay_param list.
if param != exclude_param:
decay_param.append(param_dict[param])
# Create an empty list called no_decay_param to store the parameters without weight decay.
no_decay_param = []
# Iterate over the no_decay list, which contains the names of the parameters without weight decay.
for param in no_decay:
# Append the corresponding parameter from param_dict to the no_decay_param list.
no_decay_param.append(param_dict[param])
# Create a list called grouped_params that contains two dictionaries.
# The first dictionary has the decay_param list and the corresponding weight_decay value.
# The second dictionary has the no_decay_param list and a weight_decay value of 0.0.
grouped_params = [
{"params": decay_param, "weight_decay": weight_decay},
{"params": no_decay_param, "weight_decay": 0.0},
]
# Create a variable called optimizer that stores an instance of the optimizer.
if optimizer_type == "lion":
optimizer = Lion(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "adamw":
optimizer = AdamW(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "stable_adamw":
optimizer = StableAdamWUnfused(
grouped_params, lr=learning_rate, betas=(beta_1, beta_2),
)
elif optimizer_type=="Adam8bit":
optimizer = bnb.optim.Adam8bit(grouped_params, lr=learning_rate, betas=(beta_1, beta_2))
elif optimizer_type=="Lion8Bit":
optimizer = bnb.optim.Lion8bit(grouped_params, lr=learning_rate, betas=(beta_1, beta_2))
else:
raise ValueError(
"Invalid optimizer_type. Expected 'lion', 'adamw', 'deepspeed' or 'stable_adamw', got: {}".format(
optimizer_type
)
)
# Return the optimizer.
return optimizer | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/training/optimizers/decoupled_optimizer.py | decoupled_optimizer.py |
import logging
import math
from typing import Callable, Optional, Tuple
import torch
from torch.optim.optimizer import Optimizer
log = logging.getLogger(__name__)
class DecoupledLionW(Optimizer):
"""
DecoupledLionW is an optimizer designed to improve training performance and convergence for deep learning models.
It is an extension of the Lion optimizer, incorporating decoupled weight decay and a momentum-based update rule.
The optimizer utilizes the Adam-like update rule, where the weight decay is applied separately from the gradient update.
The update rule consists of three steps: weight decay, momentum update, and momentum decay.
Weight decay reduces the magnitude of the model's weights, preventing overfitting and improving generalization.
The momentum update is an interpolation between the current gradient and the previous momentum state, allowing for faster convergence and smoother optimization.
Momentum decay gradually reduces the momentum term over time, preventing it from becoming too large and destabilizing the optimization process.
The optimizer supports both single-node and multi-node distributed training, enabling efficient training on parallel computing environments.
It provides various metric functions to track the optimization process, such as L2 norm of moments, parameters, updates, and gradients, as well as cosine similarity between updates and gradients.
The optimizer allows reporting per-parameter metrics to analyze the behavior of individual model parameters during training.
"""
metric_functions = {
'l2_norm/moment': lambda param, optim_state, step_tensor: torch.linalg.vector_norm(optim_state['exp_avg']),
'l2_norm/param': lambda param, optim_state, step_tensor: torch.linalg.vector_norm(param.data),
'l2_norm/update': lambda param, optim_state, step_tensor: torch.linalg.vector_norm(step_tensor),
'l2_norm/grad': lambda param, optim_state, step_tensor: torch.linalg.vector_norm(param.grad),
'cosine/update_grad': lambda param, optim_state, step_tensor: torch.nn.functional.cosine_similarity(param.grad.flatten(), step_tensor.flatten(), dim=0),
'cosine/moment_grad': lambda param, optim_state, step_tensor: torch.nn.functional.cosine_similarity(param.grad.flatten(), optim_state['exp_avg'].flatten(), dim=0),
}
def __init__(
self,
params,
lr: float = 1e-4,
betas: Tuple[float, float] = (0.9, 0.99),
weight_decay: float = 0.0,
):
if lr <= 0.:
raise Exception(f'Invalid LR: {lr}. LR must be > 0')
if not all([0. <= beta <= 1. for beta in betas]):
raise Exception(f'Invalid beta values: {betas}. All betas must be between 0 and 1.')
if weight_decay >= 1e-3:
log.warning(f'You are using a high value of `weight_decay={weight_decay}` for the `DecoupledLionW` optimizer. Are you sure you want to do this? Your model\'s weights will be multiplied by {1.0 - weight_decay} on every step!')
defaults = {'lr': lr, 'betas': betas, 'weight_decay': weight_decay}
super().__init__(params, defaults)
for group in self.param_groups:
group['initial_lr'] = group['lr']
@staticmethod
def lionw(p, grad, exp_avg, lr, initial_lr, wd, beta1, beta2) -> None:
if wd != 0:
decay_factor = (lr / initial_lr) if initial_lr else 1.0
p.data.mul_(1 - decay_factor * wd)
update = exp_avg.lerp(grad, 1 - beta1).sign_()
p.add_(update, alpha=-lr)
exp_avg.lerp_(grad, 1 - beta2)
@torch.no_grad()
def step(self, closure: Optional[Callable] = None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in filter(lambda p: p.grad is not None and p.requires_grad, group['params']):
grad, lr, initial_lr, wd, beta1, beta2, state = p.grad, group['lr'], group['initial_lr'], group['weight_decay'], *group['betas'], self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
exp_avg = state['exp_avg']
self.lionw(p, grad, exp_avg, lr, initial_lr, wd, beta1, beta2)
return loss
def pre_reduce_metrics(self, optimizer_metrics):
metrics = optimizer_metrics.keys()
metrics = sorted(metrics, key=lambda metric: 0 if 'l2_norm' in metric else 1)
for metric in metrics:
if metric.startswith('l2_norm'):
optimizer_metrics[metric] = optimizer_metrics[metric]**2
elif metric.startswith('cosine'):
_, vectors, layer = tuple(metric.split('/'))
A, B = tuple(vectors.split('_'))
A_rank_subset_norm = math.sqrt(optimizer_metrics[f'l2_norm/{A}/{layer}'])
B_rank_subset_norm = math.sqrt(optimizer_metrics[f'l2_norm/{B}/{layer}'])
optimizer_metrics[metric] *= A_rank_subset_norm * B_rank_subset_norm
return optimizer_metrics
def report_per_parameter_metrics(self, param: torch.Tensor, name: str, optimizer_metrics: dict):
lr = self.param_groups[0]['lr']
weight_decay = self.param_groups[0]['weight_decay']
initial_lr = self.param_groups[0]['initial_lr']
beta1, _ = self.param_groups[0]['betas']
if param in self.state:
param_optim_state = self.state[param]
step_tensor = param_optim_state['exp_avg'].clone().lerp_(param.grad, 1 - beta1).sign_().mul_(lr)
decay_factor = (lr / initial_lr) if initial_lr else 1.0
step_tensor.add_(param, alpha=-weight_decay * decay_factor)
for metric in self.metric_functions:
optimizer_metrics[f'{metric}/{name}'] = self.metric_functions[metric](param, param_optim_state, step_tensor)
return optimizer_metrics | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/training/optimizers/decoupled_lion.py | decoupled_lion.py |
import torch
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
import logging
# Helpers
def one_hot_encoding(y_true, num_classes):
y_true_one_hot = torch.zeros(y_true.size(0), num_classes)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return y_true_one_hot
def is_multi_label_classification(y_true: torch.Tensor) -> bool:
return len(y_true.shape) > 1 and y_true.shape[1] > 1 and y_true.dtype == torch.float
def contains_non_negative_integers(y_true):
return torch.all(y_true >= 0) and torch.all(y_true == y_true.to(torch.int64))
def are_probability_distributions(y_pred, y_true):
return torch.all(y_pred >= 0) and torch.all(y_pred <= 1) and torch.all(y_true >= 0) and torch.all(y_true <= 1)
def are_log_probabilities(y_pred):
return torch.all(y_pred <= 0)
class HashableTensorWrapper:
def __init__(self, tensor):
self.tensor = tensor
self.tensor_shape = tensor.shape
self.tensor_dtype = tensor.dtype
def __hash__(self):
return hash((self.tensor_shape, self.tensor_dtype))
def __eq__(self, other):
return isinstance(other, HashableTensorWrapper) and self.tensor_shape == other.tensor_shape and self.tensor_dtype == other.tensor_dtype
def generate_tensor_key(tensor):
shape_tuple = ()
for dim in tensor.shape:
shape_tuple += (dim,)
return (shape_tuple, str(tensor.dtype))
# Losses
class LossFunction:
def compute_Loss(self, y_pred, y_true):
raise NotImplementedError("compute_loss method must be implemented!")
class L1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.L1Loss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MSELoss(LossFunction):
def __init__(self):
self.loss_function = nn.MSELoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class SmoothL1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.SmoothL1Loss()
def compute_Loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MultiLabelSoftMarginLoss(LossFunction):
def __init__(self):
self.loss_function = nn.MultiLabelSoftMarginLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class PoissonNLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.PoissonNLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class KLDivLoss(LossFunction):
def __init__(self):
self.loss_function = nn.KLDivLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(F.log_softmax(y_pred, dim=1))
class NLLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.NLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class CrossEntropyLoss(LossFunction):
def __init__(self):
self.loss_function = nn.CrossEntropyLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class Nebula(LossFunction):
def __init__(self, domain_knowledge=None, user_input=None):
self.loss_function = None
self.domain_knowledge = domain_knowledge
self.user_input = user_input
self.loss_function_cache = {}
self.unique_values_cache = {}
self.class_balance_cache = {}
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
self.logger.addHandler(handler)
def determine_loss_function(self, y_pred, y_true):
self.logger.info("Determining the loss function")
is_classification = None
dataset_id = id(y_true)
# Cache unique values
if dataset_id not in self.unique_values_cache:
self.unique_values_cache[dataset_id] = torch.unique(y_true)
unique_values = self.unique_values_cache[dataset_id]
# Cache class balance
if dataset_id not in self.class_balance_cache:
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
self.class_balance_cache[dataset_id] = value_counts / torch.sum(value_counts)
class_balance = self.class_balance_cache[dataset_id]
# Optimization 2: Use PyTorch functions instead of NumPy
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
# The remaining code remains unchanged as it already incorporates the suggested optimizations
if is_classification is None:
if len(unique_values) <= 10 and torch.all(torch.eq(unique_values % 1, 0)):
is_classification = True
if is_classification is None:
if torch.all(value_counts > 0):
is_classification = True
if y_pred.ndim > 2:
pass
if is_classification is None:
sparsity = torch.count_nonzero(y_true) / y_true.numel()
if sparsity < 0.5:
self.loss_function = torch.nn.BCEWithLogitsLoss()
self.compute_loss = self.loss_function
return
y_pred_flat = y_pred.flatten()
y_true_flat = y_true.flatten()
if y_pred_flat.shape != y_true_flat.shape:
y_pred_flat = y_pred_flat[:y_true_flat.numel()]
correlation = torch.tensor(np.corrcoef(y_pred_flat.cpu().numpy(), y_true_flat.cpu().numpy())[0, 1])
if is_classification is None:
if self.domain_knowledge == "classification":
is_classification = True
elif self.domain_knowledge == "regression":
is_classification = False
if is_classification is None:
if torch.max(y_pred) > 0.9:
is_classification = True
if is_classification is None:
if torch.any(class_balance < 0.1):
is_classification = True
if is_classification is None:
if self.user_input == "classification":
is_classification = True
elif self.user_input == "regression":
is_classification = False
#Multi-LabelClassification
if is_multi_label_classification(y_true):
self.loss_function = MultiLabelSoftMarginLoss()
#poissonNLLLoss
if contains_non_negative_integers(y_true):
self.loss_function = PoissonNLLoss()
#KLDIvLoss
if are_probability_distributions(y_pred, y_true):
self.loss_function = KLDivLoss()
#NLLLoss
if is_classification and are_log_probabilities(y_pred):
self.loss_function = NLLLoss()
# SmotthL1Loss
if is_classification is None:
#check range of values in y_true
if torch.min(y_true) >= 0 and torch.max(y_true) <= 1:
self.loss_function = SmoothL1Loss()
# Set the loss function based on the determined problem type
if is_classification:
self.logger.info("Determined problem as classification. Using CrossEntropyLoss")
self.loss_function = CrossEntropyLoss()
else:
self.logger.info("Determining loss function for this dataset")
self.loss_function = MSELoss()
def __call__(self, y_pred, y_true):
# V1
dataset_id = id(y_true)
if dataset_id not in self.loss_function_cache:
self.logger.info("Determining loss function for the dataset")
self.determine_loss_function(y_pred, y_true)
self.loss_function_cache[dataset_id] = self.loss_function
cached_loss_function = self.loss_function_cache[dataset_id]
return cached_loss_function.compute_loss(y_pred, y_true) | zetascale | /zetascale-0.4.4.tar.gz/zetascale-0.4.4/zeta/training/loss/nebula.py | nebula.py |
<p align="center">
<img src="https://raw.githubusercontent.com/lens-biophotonics/ZetaStitcher/master/doc/_static/zetastitcher.svg", height="150">
</p>
ZetaStitcher is a tool designed to stitch large volumetric images such as
those produced by Light-Sheet Fluorescence Microscopes.
Key features:
* able to handle datasets as big as 10<sup>12</sup> voxels
* multichannel images
* powerful and simple Python API to query arbitrary regions within the fused
volume
## How to install
On Ubuntu 20.04 LTS, run these commands:
```
sudo apt-get install python3-pip libgl1 libglib2.0-0
pip3 install zetastitcher
```
## Docker image
To build a docker image with ZetaStitcher:
```
make docker
```
You can call the stitching commands using an ephemeral container like this:
```
docker run -it -v`pwd`:/home --rm zetastitcher stitch-align -h
docker run -it -v`pwd`:/home --rm zetastitcher stitch-fuse -h
```
## Documentation
Please read the documentation and follow the tutorial at this page:<br/>
https://lens-biophotonics.github.io/ZetaStitcher/
## Acknowledgements
This open source software code was developed in whole in the Human
Brain Project, funded from the European Unionβs Horizon 2020 Framework
Programme for Research and Innovation under Specific Grant Agreements
No. 720270 and No. 785907 (Human Brain Project SGA1 and SGA2).
<p align="center">
<img height="100" style="max-height: 100px" src="https://europa.eu/european-union/sites/europaeu/files/docs/body/flag_yellow_low.jpg">
Co-funded by the European Union
</p>
| zetastitcher | /zetastitcher-0.6.0.tar.gz/zetastitcher-0.6.0/README.md | README.md |
A simple ETL framework for Python, SQL and BAT files which uses a Postgres database for activity logging.
the zetl framework requires python and Postgres to run.
---
### 1. Install Python
Download and install python (https://www.python.org/downloads/) to your local computer.
### 2. Install Postgres
## zetl v2+ uses sqlite backend instead of postgres, so installing postgres is not mandatory.
Download and install postgres (https://www.postgresql.org/download/) to your local computer. Remember the password.
When you run zetl it will prompt you for database connection details. At the end of prompting, it will ask if you want
to save the connection details (y/n). If you select y, the details are saved in that folder and you aren't prompted again
unless the details fail on connect.
Here are the defaults for postgtres:
> - host: localhost
> - port: 1532
> - name: postgres
> - schema: public
> - Username: postgres
> - Password: <whatever_you_supplied>
### 3.Install zetl
Just install with pip
```
pip install zetl
```
Wherever you run zetl, it will look for a folder called zetl_scripts, where all your etl folders are stored.
> zetl_scripts
In the tests folder on git hub you can see examples of etl folders, and etl scripts under the zetl_scripts folder.
>
> zetl_scripts\demo1
> zetl_scripts\demo2
> zetl_scripts\demo3
> zetl_scripts\empty_log
> zetl_scripts\view_log
>
### 3. Run zetl
```
py -m zetl.run
```
This prompt for connection details to the Postgres database you just istalled.
Hit enter to accept the defaults and enter the password you entered during the database setup.
### 4. Run zetl commands
To run any zetl commands, go to the command line and change to the zetl directory. eg. CD \zetl
If your setup is successful, when you run zetl.py with no parameters, it will connect and list ETL's available to run such as:
> - demo1
> - demo2
> - demo3
> - view_log
> - empty_log
---
### Usage
---
### What is an ETL in the zetl framework ?
An ETL exists in the form of a directory, under zetl_scripts, with files of a specific naming convention which are either python, windows bat, or sql. The file naming convention is as follows: step_number.activity.extension
> - **step_number** is any integer unique in the immediate folder
> - **activity** is any alphanumeric name for the activity of the file
> - **extension** must be either py, bat or sql
#### For example:
> - zetl\zetl_scripts\demo1\1.hello.py
> - zetl\zetl_scripts\demo1\2.something.sql
> - zetl\zetl_scripts\demo1\3.hello.bat
### create an ETL
create a folder under zetl_scripts and add a file which follows the naming convention step_number.activity.extension
For example:
- 1.anything.sql
- 2.anythingelses.bat
- 3.something.py
### run an ETL
Go to the command line and change to the zetl directory. eg. CD \zetl
pass the ETL as a parameter to zetl
for example:
> zetl demo1
### View the ETL Log
Everytime an ETL runs, the z_log table is updated with the activity. To see view the log, query the z_log table or run the ETL view_log as follows:
> zetl view_log
| zetl | /zetl-3.0.1.tar.gz/zetl-3.0.1/README.md | README.md |
# Zetsubou
[](https://github.com/BentouDev/Zetsubou/actions/workflows/python-ci.yml) [](https://badge.fury.io/py/zetsubou)
### FASTbuild project generator for the helpless
High level wrapper around FASTbuild build system, written in python. Generates Visual Studio solution from simple yaml description. Supports Conan package manager. Provides commands for common operations, like setting up dev environment, building or clean (and many more in future).
_Currently only Windows and msvc are supported, but clang and Linux are planned._
---
## Install
```
pip install zetsubou
```
## Usage
```cmd
zetsubou [COMMAND] [PROJECT] [OPTIONS...]
```
```cmd
zetsubou regen project.yml --verbose
```
## Commands
- clean - removes all generated build folder and sln
- install - setups virtual environment based on your build_tools.ini
- gen - generates bff files, creates visual studio project and solution
- regen - clean, install and gen in one command
- build - build generated project
- create - (WiP) emit new project from template
---
## Example Project
### project.yml
```yml
project: MyTest
config:
verbose_build: false
platforms:
- 'platform/windows.yml'
rules:
- 'configurations/MsvcRules.yml'
configurations:
- 'configurations/Debug.yml'
- 'configurations/Release.yml'
config_string: '{platform}-{configuration}-{toolchain}'
conan:
build_tools: build_tools.ini
dependencies: dependencies.ini
targets:
- 'my_app/my_app.yml'
```
### my_app.yml
```yml
target: 'MyApp'
config:
kind: EXECUTABLE
source:
paths: 'src'
patterns:
- '*.cpp'
```
### Directory structure
```ini
my_project/
βββ build/ # generated
β βββ conan/ # conan dependencies install output
β βββ fbuild/ # generated fastbuild files (bff)
β βββ projects/ # generated vcxproj files
β βββ scripts/ # command scripts
β βββ venv/ # virtual environment, with activate and deactivate scripts
β
βββ my_app/
β βββ src/
β β βββ main.cpp
β βββ my_app.yml
β
βββ my_project.sln # generated
βββ build_tools.ini
βββ dependencies.ini
βββ project.yml
```
| zetsubou | /zetsubou-0.7.1.tar.gz/zetsubou-0.7.1/README.md | README.md |
# ZettaSQL
ZettaSQL - the most popular Open Source SQL database management system, is developed,distributed, and supported by [Ahens](https://ahens.rf.gd) Corporation.π§ π
- ZettaSQL is a database management system.
- ZettaSQL databases are relational.
- ZettaSQL software is Open Source.
- The ZettaSQL Database Server is very fast, reliable, scalable, and easy to use.
- ZettaSQL Server works in client/server or embedded systems.
- ZettaSQL commands are at all case sensitive. Everything is stored and retrived in small letters.
# Installation
- Open your command line (Command Prompt / Powershell / Bash / Terminal etc.)
- Run this command `pip install zettasql`.
- It will install all the dependencies.
# Usage
Learn more about ZettaSQL usage [here](https://soumadeepchoudhury.github.io/zettasql/). All commands are to be run in command line.
# Documentation
Learn more about ZettaSQL documentation [here](https://soumadeepchoudhury.github.io/zettasql/).
# Issues
You are free to raise issues for bugs [here](https://github.com/SoumadeepChoudhury/zettasql/issues)
# Contribution
You are free to contribute in this open source project.ππ | zettasql | /zettasql-1.0.0.tar.gz/zettasql-1.0.0/README.md | README.md |
def displayTable(field: list = None, records: list = None, sep: str = None):
'''Display the output in tablular format'''
maxLength = [] # To determine the maximum length/width of each field/column
for i in range(len(field)): # Finding maxlength for each fields
length = len(field[i])
for j in records:
if length < len(str(j[i])):
length = len(str(j[i]))
maxLength.append(length+4)
length = 0 # Designer the outliner shape - block
outliner = ''
while length < len(maxLength):
outliner += f"+{'-'*maxLength[length]}"
length += 1
outliner += "+"
for i in range(len(records)+1): # Building Table format
if i == 0:
print(outliner)
for j in range(len(maxLength)):
if j == 0:
print("|", end='')
if i == 0:
print(f"{field[j]:^{maxLength[j]}}|", end='')
else:
if sep != None:
print(
f"{records[i-1][j].replace(sep,''):^{maxLength[j]}}|", end='')
else:
print(
f"{records[i-1][j]:^{maxLength[j]}}|", end='')
print()
if i == 0 or i == len(records):
print(outliner)
print(f"{len(records)} rows in set ", end='')
def validParenthesis(userInput: str):
if "{" in userInput or "}" in userInput or '[' in userInput or ']' in userInput:
return False
listStack = []
res = True
for i in userInput:
if i in '(':
listStack.append(i)
if i in ')':
if '(' in listStack:
listStack.pop()
break
else:
res = False
break
if listStack == [] and len(userInput) >= 2 and res:
return True
return False
def getData(file: object):
data: list = []
import pickle
file.seek(0)
while True:
try:
data.append(str(pickle.load(file)))
except:
break
return data
def checkForDefaultToken(tokens: list, re: object):
try:
present = list(filter(re.compile("default*").match, tokens))[0].strip()
if re.fullmatch(r"^default\s?=\s?[0-9A-Za-z]+$", present) and present != "":
defaultValue = present.split("=")[1].strip()
if defaultValue != None:
return True, defaultValue
return True, None
except:
return False
def getConstraints(tokens: list, constraints: tuple):
allConstraint: list = []
for constraint in constraints:
if tokens.count(constraint) == 1:
allConstraint.append(constraint)
allConstraint = ','.join(allConstraint)
return allConstraint
def ifMultipleDatatype(tokens: list, datatype: list):
count: int = 0
for token in tokens:
if 'default' in token:
token = 'default'
if datatype.count(token) == 1:
count += 1
if count > 1:
return True
return False
def getLength(tokens: list, datatypes: dict, constraints: tuple):
if len(tokens) > 2:
if tokens[2] in constraints:
return datatypes[tokens[1]][1]
if tokens[1] in datatypes and int(tokens[2]) in range(*datatypes[tokens[1]]):
return int(tokens[2])
return None
def getTableData(file: object, tableName: str):
existingTable = getData(file)
for table in existingTable:
if table.startswith(tableName):
return table
return None
def isValidEntry(tableData: list, input: list):
def checkLengthRange(inputLength: int, datatypeElement: str, datatype: str):
if datatype in ('varchar', 'char', 'blob', 'int'):
try:
datatypeMaxRange: int = int(
datatypeElement[datatypeElement.rfind('(')+1:datatypeElement.find(")")])
if inputLength-2 <= datatypeMaxRange:
return True
except:
return True
return True
validity: dict = {'int': 1, 'varchar': "", 'char': '',
'blob': '', 'date': '', 'decimal': 2.0, 'bool': True}
if len(tableData) == len(input):
for index in range(len(tableData)):
datatype: str = tableData[index].split(",")[0]
datatypeElement: str = datatype
datatype = datatype[datatype.find("(")+1:datatype.rfind(")")]
datatype = datatype.split("(")[0]
if datatype == 'int' and input[index] == "''":
input[index] = "null"
continue
if not type(eval(input[index])) == type(validity[datatype]) or not checkLengthRange(len(input[index]), datatypeElement, datatype):
return False
return True
def insertDefaultValue(keys: list, value: list, input: str):
if 'default' in keys and input == "''":
startIndex: int = keys.find("default(")+8
endIndex: int = keys[startIndex:].find(")")+startIndex
try:
return eval(keys[startIndex:endIndex])
except:
return keys[startIndex:endIndex]
if input == 'null':
return input
return eval(input)
def getIndexPos_selectedItems(data: list, items: list):
newList_Item: dict = {}
for item in items:
for element in data:
if element.startswith(item):
newList_Item[item] = data.index(element)
return newList_Item | zettasql | /zettasql-1.0.0.tar.gz/zettasql-1.0.0/zclient/HelperModule.py | HelperModule.py |
from .HelperModule import *
import re
import os
FILE: object = None # Holds the database file object
DATABASE_NAME: str = None # Holds the database name
TABLE_DISPLAYED: bool = False # Flag for if table is displayed
ERROR: bool = False # Flag for any error
# KEYS,EXTRAS -> Used in desc table for showing the structure
KEYS: list = ["primary_key", "foreign_key", "unique_key"]
EXTRAS: list = ["auto_increment"]
DATATYPES: dict = {'int': [0, 4294967295], 'varchar': [1, 256], 'blob': [0, 65535], 'char': [1, 256], 'date': [],
'decimal': [], 'bool': []} # Contains the datatypes
CONSTRAINTS = ('auto_increment', 'primary_key',
'unique_key', 'foreign key', 'default')
PATH = __file__ if '/' in __file__ else __file__.replace("\\", "/")
PATH = PATH[:PATH.rfind("/")]
PATH = PATH[:PATH.rfind("/")]
DATATYPE_CONSTRAINT_MATCH: dict = {'int': ['primary_key', 'auto_increment', 'foreign_key', 'unique_key', 'deafult'],
'varchar': ['default', 'primary_key', 'foreign_key', 'unique_key'],
'char': ['default', 'primary_key', 'foreign_key', 'unique_key'],
'blob': ['default', 'primary_key', 'foreign_key', 'unique_key'],
'date': ['default', 'primary_key', 'foreign_key', 'unique_key'],
'decimal': ['default', 'primary_key', 'foreign_key', 'unique_key', 'auto_increment'],
'bool': ['default']}
# NOTE : the records should be in row wise per list Eg -> [[row1 details],[row2 details]...]
def use_Database(cmd: str):
global ERROR, PATH
if re.fullmatch(r"^use (\S*);$", cmd):
file: str = cmd.replace("use ", "").strip().replace(";", "").strip()
global FILE, DATABASE_NAME
try:
# Globally opens the file for future use.
if not os.path.exists(f"{PATH}/zclient/databases"):
os.mkdir(f"{PATH}/zclient/databases")
if FILE != None:
print("Database changed")
FILE = open(f"{PATH}/zclient/databases/{file}.zdb", 'rb+')
DATABASE_NAME = file
except FileNotFoundError:
ERROR = True
print(f"ERROR 1020: Unknown database '{file}' ")
else:
ERROR = True
bug: list = re.split(r'^use (\S*);$', cmd)
print(
f"ERROR 1011: Syntax Error in ZettaSQL Commands near \'{bug[0].strip()}\'.")
def create_Database(cmd: str):
present: bool = False
global ERROR, PATH
if re.fullmatch(r"^create database (\S*);$", cmd) or re.fullmatch(r"^create database if not exists ([\S]*);$", cmd):
if "if not exists" in cmd:
present = True
# Getting the file (database) Name
file: str = cmd.replace(";", "").strip().split()[-1]
if os.path.exists(f"{PATH}/zclient/databases/{file}.zdb"):
if not present:
ERROR = True
print(
f"ERROR 1021: Can't create database '{file}'; database exists")
else:
# Creates the file Instance (Temporary stored)
_ = open(f"{PATH}/zclient/databases/{file}.zdb", 'wb+')
_.close()
else:
ERROR = True
bug: list = re.split(
r'^create database (\S*);$' if not present else r'^create database if not exists (\S*);$', cmd)
print(
f"ERROR 1011: Syntax Error in ZettaSQL command near \'{bug[len(bug)//2]}\'")
def show_Database(cmd: str):
global ERROR, PATH
if re.fullmatch(r"^show databases;$", cmd):
global TABLE_DISPLAYED
files: list = list()
if not os.path.exists(f"{PATH}/zclient/databases"):
os.mkdir(f"{PATH}/zclient/databases")
filesInDirectory: list = os.listdir(f"{PATH}/zclient/databases/")
for i in filesInDirectory:
if not i.startswith("."):
files.append([i])
displayTable(field=["Databases"], records=files, sep=".zdb")
TABLE_DISPLAYED = True
else:
ERROR = True
bug: list = re.split(r'^show databases;$', cmd)
print(
f"ERROR 1011: Syntax Error in ZettaSQL command near \'{bug[len(bug)//2]}\'")
def show_Tables(cmd: str):
global ERROR
if re.fullmatch(r"^show tables;$", cmd):
global FILE, TABLE_DISPLAYED
if FILE == None:
ERROR = True
print("ERROR 1022 : ZettaSQL Database not selected.")
else:
import pickle
FILE.seek(0)
tables: list = []
while True:
try:
data = str(pickle.load(FILE)).split('=')[0].strip()
tables.append([data])
except:
break
displayTable(field=[
f'Table_in_{DATABASE_NAME}'], records=tables)
TABLE_DISPLAYED = True
else:
ERROR = True
bug: list = re.split(r'^show tables;$', cmd)
print(
f"ERROR 1011: Syntax Error in ZettaSQL command near \'{bug[len(bug)//2]}\'")
def create_Table(cmd: str):
global ERROR, DATATYPES, CONSTRAINTS, FILE, DATATYPE_CONSTRAINT_MATCH
if FILE == None:
ERROR = True
print(f"ERROR 1022: Database not selected.")
return
if re.fullmatch(r"^create table (\w+\s?\((\S{1,}\s\S{1,},?\s*)+\));$", cmd):
cmd = str(
(cmd.replace("create table ", "").strip()).replace(";", "")) # replacing unnnecessary strings
items: list = cmd[
cmd.find("(")+1:cmd.rfind(")")].split(",") # getting the name,datatype and value of arguments passed
tableName: str = cmd[:cmd.find(
'(')].strip() # Contain the table name
tableData: str = f"{tableName}="+"{"
for item in items:
if re.fullmatch("^\S+\s\S+(\s*(\(?)([1-9][0-9]*)(\)?))?(\s?\S*)+$", item.strip()) and validParenthesis(item):
# getting the names, datatypes and values from command in 'item'
itemModified: str = re.sub("[\(\)]", " ", item).strip()
tokens: list = itemModified.split(' ')
# removing empty element from list of 'tokens' by list comprehension
tokens = [i.strip() for i in tokens if i != '']
if ifMultipleDatatype(tokens, list(DATATYPES.keys())):
ERROR = True
tableData = ""
print(
f"ERROR 1011: Syntax Error in ZettaSQL command near '{item}'")
break
tableNameAlreadyExists: bool = False # To check if table_name already exists
if tokens[1] in DATATYPES:
name: str = tokens[0]
datatype: str = tokens[1]
try:
length: int | None = getLength(
tokens, DATATYPES, CONSTRAINTS)
constraints: str | list = getConstraints(
tokens, CONSTRAINTS)
except:
ERROR = True
tableData = ""
print(
f"ERROR 1011: Syntax Error in ZettaSQL command near '{item}'")
break
defaultValue: int | float | str | bool | None = None
defaultPresent: bool = checkForDefaultToken(tokens, re)
if defaultPresent:
# default value entered might be of wrong type
try:
# If default value is not given
if defaultPresent[1] == None:
raise
if datatype not in ('varchar', 'char', 'date'):
defaultValue = eval(defaultPresent[1])
else:
defaultValue = defaultPresent[1]
constraints += f',default({defaultValue})' if constraints != '' else f'default({defaultValue})'
except:
ERROR = True
tableData = ""
print(
f"ERROR 1024: Value error in default constraint near default={defaultPresent[1]}")
break
data = getData(FILE)
for names in data: # Checking for if tableName exists
if tableName.lower() == names.split("=")[0].strip().lower():
tableData = ""
print(
f"ERROR 1023: Cannot create table. '{tableName}' already exists.")
tableNameAlreadyExists = True
ERROR = True
break
if tableNameAlreadyExists:
break
# Format of table --> Table_Name={"col_name(datatype,constrainst)":[...],"col_name(datatype,constraint)":[...]}
tableData += f"\"{name}({datatype}{'('+str(length)+')' if length!=None else ''}{(','+constraints) if constraints!='' else ''})\":[]{',' if item!=items[-1] else ''}"
else:
ERROR = True
tableData = ""
print(
f"ERROR 1011: Syntax Error in ZettaSQL command near '{tokens[1]}'")
break
else:
ERROR = True
tableData = ""
print(
f"ERROR 1011: Syntax Error in ZettaSQL command near '{item}'")
break
tableData += '}' if tableData != "" else ''
if FILE != None and tableData != "":
import pickle
FILE.seek(0)
for i in data:
pickle.dump(i, FILE)
pickle.dump(tableData, FILE)
FILE.flush()
else:
ERROR = True
bug: list = re.split(r'^create table (\S*)\(\S* \S*\);$', cmd)
print(
f"ERROR 1011: Syntax Error in ZettaSQL command near \'{bug[len(bug)//2]}\'")
def desc(cmd: str):
global ERROR, TABLE_DISPLAYED, FILE, DATATYPES
if re.fullmatch(r"^desc (\S+)\s?;$", cmd):
cmd = cmd[:-1]
preset: list = None
try:
cmd = cmd.split(" ")[1].strip()
existingTables: list = getData(FILE)
for tableName in existingTables:
if cmd.lower() == tableName.split("=")[0].strip().lower():
preset: dict = eval(tableName.split("=")[1].strip())
break
if preset == None:
ERROR = True
print(f"ERROR 1019: Unknown table '{cmd}'")
else:
fields: list = ["Field", "Type",
"Null", "Key", "Default", "Extra"]
records: list = []
added: bool = False
for key in preset.keys():
added = False
dataPreset: list = [key[:key.find("(")]]
defaultPresent: bool = False
key = key[key.find('(')+1:key.rfind(')')]
keyElements = key.split(",")
for element in keyElements:
field = re.split("[\(\)]", element)
fieldName = field[0]
if 'default' in field:
defaultPresent = True
break
if len(field) > 1:
if int(field[1]) == DATATYPES[fieldName][1]:
dataPreset.append(fieldName)
else:
dataPreset.append(f"{fieldName}({field[1]})")
added = True
if fieldName in DATATYPES and not added:
dataPreset.append(fieldName)
if defaultPresent:
default = field[1]
extras = list(set(EXTRAS) & set(keyElements))
if extras == []:
extras = ''
specialKey = list(set(KEYS) & set(keyElements))
if specialKey == []:
specialKey = ''
dataPreset.extend([
'no' if specialKey != '' else 'yes', specialKey[0][0:3] if specialKey != '' else '', default if defaultPresent else "null", extras[0] if extras != '' else ''])
records.append(dataPreset)
displayTable(field=fields, records=records)
TABLE_DISPLAYED = True
except:
ERROR = True
bug: list = re.split(r'^desc (\S+);$', cmd)
print(
f"ERROR 1011: Syntax Error in ZettaSQL command near \'{bug[len(bug)//2]}\'")
else:
ERROR = True
bug: list = re.split(r'^desc (\S+);$', cmd)
print(
f"ERROR 1011: Syntax Error in ZettaSQL command near \'{bug[len(bug)//2]}\'")
def drop_database(cmd: str):
global ERROR, FILE, PATH
if re.fullmatch(r"^drop database (\S+);$", cmd):
file = re.split(r"^drop database (\S+);$", cmd)[1]
if os.path.exists(f"{PATH}/zclient/databases/{file}.zdb"):
FILE_name: str = str(FILE.name if FILE != None else '')
if file == FILE_name[FILE_name.rfind("/")+1:FILE_name.rfind(".")]:
FILE.close()
FILE = None
os.remove(f"{PATH}/zclient/databases/{file}.zdb")
else:
ERROR = True
print(f"ERROR 1020: Unknown database '{file}'")
else:
ERROR = True
bug: list = re.split(r'^drop database (\S+);$', cmd)
print(
f"ERROR 1011: Syntax Error in ZettaSQL command near \'{bug[len(bug)//2]}\'")
def drop_table(cmd: str):
global ERROR, FILE
if re.fullmatch(r"^drop table (\S+);$", cmd):
existingTables: list = getData(FILE)
tableName: str = re.split(r"^drop table (\S+);$", cmd)[1].strip()
tableFound: bool = False
import pickle
FILE.seek(0)
FILE.truncate(0)
for table in existingTables:
if table.startswith(f"{tableName}="):
tableFound = True
continue
pickle.dump(table, FILE)
FILE.flush()
if not tableFound:
ERROR = True
print(f"ERROR 1019: Unknown table '{tableName}'")
else:
ERROR = True
bug: list = re.split(r'^drop table (\S+);$', cmd)
print(
f"ERROR 1011: Syntax Error in ZettaSQL command near \'{bug[len(bug)//2]}\'")
def delete(cmd: str):
global ERROR, FILE
if re.fullmatch(r"^delete from table (\S+);$", cmd):
existingTables: list = getData(FILE)
tableName: str = re.split(r"^delete from table (\S+);$", cmd)[1]
tableFound: bool = False
import pickle
FILE.seek(0)
for table in existingTables:
if table.startswith(f"{tableName}="):
tableFound = True
table = eval(table.split("=")[1].strip())
table = {x: [] for x in table}
table = tableName+'='+str(table)
pickle.dump(table, FILE)
FILE.flush()
if not tableFound:
ERROR = True
print(f"ERROR 1019: Unknown table '{tableName}'")
else:
ERROR = True
bug: list = re.split(r'^delete from table (\S+);$', cmd)
print(
f"ERROR 1011: Syntax Error in ZettaSQL command near \'{bug[len(bug)//2]}\'")
def insert(cmd: str):
global ERROR, FILE
# insert into table2(no,sname) values(1,"Hi Hello");
if re.fullmatch(r"^insert into (\S+)\svalues\s?\(\S+\);$", cmd):
tableName = re.split(
r"^insert into (\S+)\svalues\s?\(\S+\);$", cmd)[1].strip().lower()
cmd = cmd[:-1].strip() # removing the semicolon
cmd = cmd[cmd.rfind("(")+1:cmd.rfind(")")] # getting the values
cmd = cmd.split(",") # splitting the values
refinedInput = [i for i in cmd if i != ''] # removing blank spaces
tableData: str = eval(getTableData(
FILE, tableName).split("=")[1].strip())
try:
if len(list(tableData.keys())) == len(refinedInput) and isValidEntry(list(tableData.keys()), refinedInput):
index = 0
for key, value in tableData.items():
refinedInput[index] = insertDefaultValue(
key, value, refinedInput[index])
if refinedInput[index] == '' or refinedInput[index] == "''":
refinedInput[index] = 'null'
value.append(refinedInput[index])
index += 1
if FILE != None:
existingTables: list = getData(FILE)
import pickle
FILE.seek(0)
for table in existingTables:
if not table.startswith(tableName):
pickle.dump(table, FILE)
continue
pickle.dump(f"{tableName}={tableData}", FILE)
FILE.flush()
else:
raise ValueError("Entry doesn't satisfy")
except Exception as e:
ERROR = True
print(
f"ERROR 1011: Syntax Error in ZettaSQL command. \"{e}\"")
if tableData == None:
ERROR = True
print(f"ERROR 1019: Unknown Table '{tableName}'")
else:
ERROR = True
bug: list = re.split(r"^insert into (\S+)\svalues\s?\(\S+\);$", cmd)
print(
f"ERROR 1011: Syntax Error in ZettaSQL command near \'{bug[len(bug)//2]}\'")
def select(cmd: str):
global ERROR, FILE, TABLE_DISPLAYED
try:
if re.fullmatch(r"^select [\*|(\S+\s?,?\s?)*]+ from \S+;$", cmd):
cmd = cmd[:-1]
cmd = cmd.split(" ")
items: list = cmd[cmd.index("select")+1:cmd.index("from")]
items = items[0].split(",") if len(items) == 1 else items
items = [x.replace(",", "").strip()
for x in items if x.strip() not in (",", "", " ")]
tableName: str = cmd[-1]
if '*' in items and len(items) > 1:
ERROR = True
print(
f"ERROR 1011: Syntax Error in ZettaSQL command near \'{items[1]}\'")
return
returnedData: str = getTableData(FILE, tableName)
data: dict = eval(returnedData.split(
"=")[1].strip() if returnedData != None else {})
if data == {}:
raise ValueError
fields: list = []
records: list = []
for keys in data.keys():
fields.append(keys[:keys.find("(")])
if '*' in items:
for times in range(len(list(data.values())[0])):
recordLine: list = []
for values in data.values():
# print(times, "-->", values[times])
recordLine.append(values[times])
records.append(recordLine)
displayTable(field=fields, records=records)
TABLE_DISPLAYED = True
else:
updateField: list = []
for item in items:
if item in fields:
updateField.append(item)
else:
ERROR = True
print(
f"ERROR 1011: Syntax Error in ZettaSQL command near '{item}'")
return
fields = updateField
itemIndexed: dict = getIndexPos_selectedItems(
list(data.keys()), items)
for times in range(len(list(data.values())[0])):
recordLine: list = []
for itemIndex in itemIndexed.values():
recordLine.append(list(data.values())
[itemIndex][times])
records.append(recordLine)
displayTable(field=fields, records=records)
TABLE_DISPLAYED = True
else:
ERROR = True
bug: list = re.split(
r"^select [\s?\*\s?|(\S+\s?,?\s?)*]+ from \S+;$", cmd)
print(
f"ERROR 1011: Syntax Error in ZettaSQL command near \'{bug[len(bug)//2]}\'")
except ValueError:
ERROR = True
print(f"ERROR 1019: Unknown table '{tableName}'")
COMMANDS: dict = {"use": use_Database, "create database": create_Database,
"show databases": show_Database, "show tables": show_Tables, "create table": create_Table, "desc": desc, "drop database": drop_database, "drop table": drop_table, "delete from": delete, "insert into": insert, "select": select}
def main():
try:
from getpass import getpass
import sys
import signal
try:
import readline
except:
pass
global TABLE_DISPLAYED, COMMANDS, ERROR, PATH
flag: bool = False
def signal_control(SignalNumber: object, Frame: object):
# global flag
# function that controls the key events :- CTRL+C
if flag:
print("\n\nzettasql> ", end='')
else:
print("\rPassword: ", end='')
# handling singal event from keyboard.
signal.signal(signal.SIGINT, signal_control)
arg: list = sys.argv # getting the command line tags
if len(arg) >= 4 and arg[1] == '-u' and arg[3] == '-p':
# checking for login criteria
if os.path.exists(f"{PATH}/zserver/.config"):
# .config files stores users data like username and password.
with open(f"{PATH}/zserver/.config", 'rb') as file:
import pickle
data: dict = pickle.load(file)
if data['username@admin'] != arg[2]:
print(
f"Access Denied for {arg[2]} :- Not registered user.")
sys.exit()
else:
print(f"Access Denied for {arg[2]} :- Not registered user.")
sys.exit()
pas: str = getpass("Password: ")
if data['password@admin'] != pas:
print(f"Access Denied for {arg[2]} (with password : YES) ")
sys.exit()
else:
flag = True
if not os.path.exists(f"{PATH}/zserver/.log"):
# .log file contains server information like it's state of connection
print("ERROR 1000 : Can't connect to ZettaSQL server")
sys.exit()
with open(f'{PATH}/zclient/info.log', 'r+') as file:
# info.log file contains application information
# checks for connection id and edits the info.log file for new connection id
items: list = file.readlines()
present: bool = False
file.seek(0)
data: str = file.read()
if "id :" not in data:
_id: int = 1
else:
present = True
# finding the location of "id :" and getting the value from that by splicing.
_id = int(str([i.split(":")[1].strip()
for i in items if "id :" in i][0]))
file.seek(0)
if not present:
# rewriting new data with connection id
file.write(data+"id : "+str(_id+1))
else:
# updating the connection id
file.write(data.replace(
str(f"id : {_id}"), str(f"id : {_id+1}")))
version: int | float = str([i for i in items if "version :" in i][0]).split(
":")[1].strip() # fetching the version code
print("""
______ _ _ _____ _____ _
|___ / | | | | / ___|| _ | |
/ / ___| |_| |_ __ _\ `--. | | | | |
/ / / _ \ __| __/ _` |`--. \| | | | |
./ /__| __/ |_| || (_| /\__/ /\ \/' / |____
\_____/\___|\__|\__\__,_\____/ \_/\_\_____/
""")
print(f"Welcome to the ZettaSQL monitor. Commands end with ; or \g.\nYour ZettaSQL connection id is {_id} \nZettaSQL Server version: {version} Ahens | An Initiative to Initial. \n \nCopyright (c) 2023, Ahens | An Initiative to Initial and/or its affiliates. \n\nAhens | An Initiative to Initial is a registered trademark of Ahens | An Initiative to Initial Corporation and/or its \n affiliates. Other names may be trademarks of their respective owners.\n\nType 'help;' or '\h' for help. Type '\c' to clear the current input statement.")
import time
while True:
ERROR = False
cmd: str = input("\nzettasql> ").lower()
if cmd == "":
continue
if cmd[-1] != ';':
print(
f"ERROR 1011: Syntax Error in ZettaSQL command near '{cmd[-5:]}'")
continue
if (cmd in ('exit;', 'quit;', 'bye;')):
global FILE
# Close the FILE (database) instance.
FILE.close() if FILE != None else True
print('Bye')
break
else:
try:
for item in COMMANDS.keys():
if item in cmd:
# Calling respective functions
start: float = time.time() # Getting execution time
COMMANDS[item](cmd)
end: float = time.time()
if not ERROR:
if TABLE_DISPLAYED:
print(
f"({round((end-start),3)} sec)")
TABLE_DISPLAYED = False
else:
print(
f"Query ran in {round((end-start),3)} sec.")
break
else:
print(
f"ERROR 1012: Unknown ZettaSQL command : '{cmd.split()[0]}'")
except:
pass
else:
print("Access denied.")
sys.exit()
except:
sys.exit()
if __name__ == "__main__":
main() | zettasql | /zettasql-1.0.0.tar.gz/zettasql-1.0.0/zclient/main.py | main.py |
def start():
import subprocess
import platform
import os
import random
import pickle
PATH = __file__ if '/' in __file__ else __file__.replace("\\", "/")
PATH = PATH[:PATH.rfind("/")]
PATH = PATH[:PATH.rfind("/")]
if not os.path.exists(f"{PATH}/zserver/.NULL"):
os.mkdir(f"{PATH}/zserver/.NULL")
PID = 0
process = None
PORT = str(random.randint(0, 65336))
if not os.path.exists(f"{PATH}/zserver/.log"):
# .log file contains the server information.
try:
if not os.path.exists(f"{PATH}/zserver/.config"):
# .config file contains the cloent details.
with open(f"{PATH}/zserver/.config", 'wb') as file:
# Upload the user credentials in .config
from getpass import getpass
print("[*] Create root User")
username = input("[*] Enter root username: ")
password = getpass("[*] Enter root password: ")
pickle.dump({'username@admin': username,
'password@admin': password}, file)
if platform.system() in ("Darwin", "Linux"):
# Starting the server in Linux Based OS
process = subprocess.Popen(["python3", "-m", "http.server", PORT, "--directory",
".NULL"], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
PID = process.pid # Getting the PID in order to kill it later
elif platform.system() == 'Windows':
# Start server in Windows OS
with open(os.devnull, 'w') as nullVal:
process = subprocess.Popen(
["python", "-m", "http.server", PORT, "--directory", ".NULL"], stdout=nullVal, stderr=nullVal)
PID = process.pid
except Exception as e:
print(f"Error {e.args[0]}. Unable to connect to ZettaSQL serer.")
if PID != 0:
with open(f"{PATH}/zserver/.log", 'wb') as file:
try:
# Updating server information in .log file
data = str(PID)+"%"+str(PORT)
pickle.dump(data, file)
except:
print('Unable to connect to ZettaSQL server.')
process.terminate()
else:
print("ZettaSQL server is already on.")
if __name__ == "__main__":
start() | zettasql | /zettasql-1.0.0.tar.gz/zettasql-1.0.0/zserver/start.py | start.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.