input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
import IamAccount
from .models.iam_account_permissions import IamAccountPermissions
from .models.iam_api_key import IamApiKey
from .models.iam_app_registration import IamAppRegistration
from .models.iam_certificate import IamCertificate
from .models.iam_certificate_request import IamCertificateRequest
from .models.iam_client_meta import IamClientMeta
from .models.iam_domain_group import IamDomainGroup
from .models.iam_end_point_password_properties import IamEndPointPasswordProperties
from .models.iam_end_point_privilege import IamEndPointPrivilege
from .models.iam_end_point_role import IamEndPointRole
from .models.iam_end_point_user import IamEndPointUser
from .models.iam_end_point_user_role import IamEndPointUserRole
from .models.iam_group_permission_to_roles import IamGroupPermissionToRoles
from .models.iam_idp import IamIdp
from .models.iam_idp_reference import IamIdpReference
from .models.iam_ldap_base_properties import IamLdapBaseProperties
from .models.iam_ldap_dns_parameters import IamLdapDnsParameters
from .models.iam_ldap_group import IamLdapGroup
from .models.iam_ldap_provider import IamLdapProvider
from .models.iam_local_user_password import IamLocalUserPassword
from .models.iam_o_auth_token import IamOAuthToken
from .models.iam_permission import IamPermission
from .models.iam_permission_reference import IamPermissionReference
from .models.iam_permission_to_roles import IamPermissionToRoles
from .models.iam_private_key_spec import IamPrivateKeySpec
from .models.iam_privilege import IamPrivilege
from .models.iam_privilege_set import IamPrivilegeSet
from .models.iam_qualifier import IamQualifier
from .models.iam_resource_limits import IamResourceLimits
from .models.iam_resource_permission import IamResourcePermission
from .models.iam_resource_roles import IamResourceRoles
from .models.iam_role import IamRole
from .models.iam_security_holder import IamSecurityHolder
from .models.iam_service_provider import IamServiceProvider
from .models.iam_session import IamSession
from .models.iam_session_limits import IamSessionLimits
from .models.iam_sso_session_attributes import IamSsoSessionAttributes
from .models.iam_system import IamSystem
from .models.iam_trust_point import IamTrustPoint
from .models.iam_user import IamUser
from .models.iam_user_group import IamUserGroup
from .models.iam_user_preference import IamUserPreference
from .models.inventory_base import InventoryBase
from .models.inventory_dn_mo_binding import InventoryDnMoBinding
from .models.inventory_inventory_mo import InventoryInventoryMo
from .models.inventory_request import InventoryRequest
from .models.license_account_license_data import LicenseAccountLicenseData
from .models.license_customer_op import LicenseCustomerOp
from .models.license_license_info import LicenseLicenseInfo
from .models.license_smartlicense_token import LicenseSmartlicenseToken
from .models.meta_access_privilege import MetaAccessPrivilege
from .models.meta_definition import MetaDefinition
from .models.meta_prop_definition import MetaPropDefinition
from .models.meta_relationship_definition import MetaRelationshipDefinition
from .models.mo_mo_ref import MoMoRef
from .models.mo_tag import MoTag
from .models.mo_version_context import MoVersionContext
from .models.niaapi_detail import NiaapiDetail
from .models.niaapi_field_notice import NiaapiFieldNotice
from .models.niaapi_file_downloader import NiaapiFileDownloader
from .models.niaapi_hardware_eol import NiaapiHardwareEol
from .models.niaapi_maintained_release import NiaapiMaintainedRelease
from .models.niaapi_new_release_detail import NiaapiNewReleaseDetail
from .models.niaapi_new_release_post import NiaapiNewReleasePost
from .models.niaapi_nia_metadata import NiaapiNiaMetadata
from .models.niaapi_release_recommend import NiaapiReleaseRecommend
from .models.niaapi_revision_info import NiaapiRevisionInfo
from .models.niaapi_software_eol import NiaapiSoftwareEol
from .models.niaapi_software_regex import NiaapiSoftwareRegex
from .models.niaapi_version_regex import NiaapiVersionRegex
from .models.niaapi_version_regex_platform import NiaapiVersionRegexPlatform
from .models.niatelemetry_diskinfo import NiatelemetryDiskinfo
from .models.niatelemetry_nia_inventory import NiatelemetryNiaInventory
from .models.niatelemetry_nia_license_state import NiatelemetryNiaLicenseState
from .models.onprem_image_package import OnpremImagePackage
from .models.onprem_schedule import OnpremSchedule
from .models.onprem_upgrade_note import OnpremUpgradeNote
from .models.onprem_upgrade_phase import OnpremUpgradePhase
from .models.organization_organization import OrganizationOrganization
from .models.os_answers import OsAnswers
from .models.os_base_install_config import OsBaseInstallConfig
from .models.os_catalog import OsCatalog
from .models.os_configuration_file import OsConfigurationFile
from .models.os_operating_system_parameters import OsOperatingSystemParameters
from .models.os_os_support import OsOsSupport
from .models.os_place_holder import OsPlaceHolder
from .models.os_template_file import OsTemplateFile
from .models.pkix_distinguished_name import PkixDistinguishedName
from .models.pkix_key_generation_spec import PkixKeyGenerationSpec
from .models.pkix_subject_alternate_name import PkixSubjectAlternateName
from .models.policy_abstract_config_change_detail import PolicyAbstractConfigChangeDetail
from .models.policy_abstract_config_result import PolicyAbstractConfigResult
from .models.policy_abstract_config_result_entry import PolicyAbstractConfigResultEntry
from .models.policy_abstract_policy import PolicyAbstractPolicy
from .models.policy_abstract_profile import PolicyAbstractProfile
from .models.policy_config_change import PolicyConfigChange
from .models.policy_config_context import PolicyConfigContext
from .models.policy_config_result_context import PolicyConfigResultContext
from .models.policyinventory_abstract_device_info import PolicyinventoryAbstractDeviceInfo
from .models.policyinventory_job_info import PolicyinventoryJobInfo
from .models.recovery_abstract_backup_info import RecoveryAbstractBackupInfo
from .models.recovery_backup_schedule import RecoveryBackupSchedule
from .models.recovery_config_params import RecoveryConfigParams
from .models.recovery_restore import RecoveryRestore
from .models.resource_group import ResourceGroup
from .models.resource_group_member import ResourceGroupMember
from .models.resource_license_resource_count import ResourceLicenseResourceCount
from .models.resource_membership import ResourceMembership
from .models.resource_membership_holder import ResourceMembershipHolder
from .models.resource_per_type_combined_selector import ResourcePerTypeCombinedSelector
from .models.resource_selector import ResourceSelector
from .models.sdcard_partition import SdcardPartition
from .models.sdcard_virtual_drive import SdcardVirtualDrive
from .models.sdwan_network_configuration_type import SdwanNetworkConfigurationType
from .models.sdwan_router_node import SdwanRouterNode
from .models.sdwan_template_inputs_type import SdwanTemplateInputsType
from .models.search_search_item import SearchSearchItem
from .models.search_suggest_item import SearchSuggestItem
from .models.search_tag_item import SearchTagItem
from .models.server_config_import import ServerConfigImport
from .models.snmp_trap import SnmpTrap
from .models.snmp_user import SnmpUser
from .models.softwarerepository_authorization import SoftwarerepositoryAuthorization
from .models.softwarerepository_catalog import SoftwarerepositoryCatalog
from .models.softwarerepository_file import SoftwarerepositoryFile
from .models.softwarerepository_file_server import SoftwarerepositoryFileServer
from .models.softwarerepository_import_result import SoftwarerepositoryImportResult
from .models.storage_capacity import StorageCapacity
from .models.storage_host import StorageHost
from .models.storage_host_lun import StorageHostLun
from .models.storage_initiator import StorageInitiator
from .models.storage_local_disk import StorageLocalDisk
from .models.storage_physical_port import StoragePhysicalPort
from .models.storage_protection_group import StorageProtectionGroup
from .models.storage_protection_group_snapshot import StorageProtectionGroupSnapshot
from .models.storage_remote_key_setting import StorageRemoteKeySetting
from .models.storage_replication_blackout import StorageReplicationBlackout
from .models.storage_replication_schedule import StorageReplicationSchedule
from .models.storage_snapshot import StorageSnapshot
from .models.storage_snapshot_schedule import StorageSnapshotSchedule
from .models.storage_span_group import StorageSpanGroup
from .models.storage_virtual_drive_config import StorageVirtualDriveConfig
from .models.storage_volume import StorageVolume
from .models.syslog_local_client_base import SyslogLocalClientBase
from .models.syslog_remote_client_base import SyslogRemoteClientBase
from .models.tam_action import TamAction
from .models.tam_advisory import TamAdvisory
from .models.tam_advisory_count import TamAdvisoryCount
from .models.tam_advisory_info import TamAdvisoryInfo
from .models.tam_advisory_instance import TamAdvisoryInstance
from .models.tam_base_data_source import TamBaseDataSource
from .models.tam_identifiers import TamIdentifiers
from .models.tam_query_entry import TamQueryEntry
from .models.tam_severity import TamSeverity
from .models.task_pure_storage_scoped_inventory import TaskPureStorageScopedInventory
from .models.telemetry_time_series import TelemetryTimeSeries
from .models.terminal_audit_log import TerminalAuditLog
from .models.ucsd_connector_pack import UcsdConnectorPack
from .models.views_view import ViewsView
from .models.vmedia_mapping import VmediaMapping
from .models.vnic_arfs_settings import VnicArfsSettings
from .models.vnic_cdn import VnicCdn
from .models.vnic_completion_queue_settings import VnicCompletionQueueSettings
from .models.vnic_eth_if import VnicEthIf
from .models.vnic_eth_interrupt_settings import VnicEthInterruptSettings
from .models.vnic_eth_rx_queue_settings import VnicEthRxQueueSettings
from .models.vnic_eth_tx_queue_settings import VnicEthTxQueueSettings
from .models.vnic_fc_error_recovery_settings import VnicFcErrorRecoverySettings
from .models.vnic_fc_if import VnicFcIf
from .models.vnic_fc_interrupt_settings import VnicFcInterruptSettings
from .models.vnic_fc_queue_settings import VnicFcQueueSettings
from .models.vnic_flogi_settings import VnicFlogiSettings
from .models.vnic_nvgre_settings import VnicNvgreSettings
from .models.vnic_placement_settings import VnicPlacementSettings
from .models.vnic_plogi_settings import VnicPlogiSettings
from .models.vnic_roce_settings import VnicRoceSettings
from .models.vnic_scsi_queue_settings import VnicScsiQueueSettings
from .models.vnic_tcp_offload_settings import VnicTcpOffloadSettings
from .models.vnic_usnic_settings import VnicUsnicSettings
from .models.vnic_vlan_settings import VnicVlanSettings
from .models.vnic_vmq_settings import VnicVmqSettings
from .models.vnic_vsan_settings import VnicVsanSettings
from .models.vnic_vxlan_settings import VnicVxlanSettings
from .models.workflow_api import WorkflowApi
from .models.workflow_array_item import WorkflowArrayItem
from .models.workflow_base_data_type import WorkflowBaseDataType
from .models.workflow_batch_api_executor import WorkflowBatchApiExecutor
from .models.workflow_build_task_meta import WorkflowBuildTaskMeta
from .models.workflow_build_task_meta_owner import WorkflowBuildTaskMetaOwner
from .models.workflow_catalog import WorkflowCatalog
from .models.workflow_constraints import WorkflowConstraints
from .models.workflow_custom_data_property import WorkflowCustomDataProperty
from .models.workflow_custom_data_type_definition import WorkflowCustomDataTypeDefinition
from .models.workflow_decision_case import WorkflowDecisionCase
from .models.workflow_default_value import WorkflowDefaultValue
from .models.workflow_dynamic_workflow_action_task_list import WorkflowDynamicWorkflowActionTaskList
from .models.workflow_enum_entry import WorkflowEnumEntry
from .models.workflow_expect_prompt import WorkflowExpectPrompt
from .models.workflow_internal_properties import WorkflowInternalProperties
from .models.workflow_message import WorkflowMessage
from .models.workflow_mo_reference_property import WorkflowMoReferenceProperty
from .models.workflow_pending_dynamic_workflow_info import WorkflowPendingDynamicWorkflowInfo
from .models.workflow_primitive_data_property import WorkflowPrimitiveDataProperty
from .models.workflow_properties import WorkflowProperties
from .models.workflow_target_property import WorkflowTargetProperty
from .models.workflow_task_constraints import WorkflowTaskConstraints
from .models.workflow_task_definition import WorkflowTaskDefinition
from .models.workflow_task_info import WorkflowTaskInfo
from .models.workflow_task_meta import WorkflowTaskMeta
from .models.workflow_task_retry_info import WorkflowTaskRetryInfo
from .models.workflow_validation_error import WorkflowValidationError
from .models.workflow_validation_information import WorkflowValidationInformation
from .models.workflow_wait_task_prompt import WorkflowWaitTaskPrompt
from .models.workflow_workflow_definition import WorkflowWorkflowDefinition
from .models.workflow_workflow_info import WorkflowWorkflowInfo
from .models.workflow_workflow_info_properties import WorkflowWorkflowInfoProperties
from .models.workflow_workflow_meta import WorkflowWorkflowMeta
from .models.workflow_workflow_properties import WorkflowWorkflowProperties
from .models.workflow_workflow_task import WorkflowWorkflowTask
from .models.x509_certificate import X509Certificate
from .models.aaa_abstract_audit_record_ref import AaaAbstractAuditRecordRef
from .models.aaa_audit_record import AaaAuditRecord
from .models.aaa_audit_record_local_ref import AaaAuditRecordLocalRef
from .models.aaa_audit_record_ref import AaaAuditRecordRef
from .models.adapter_adapter_config_ref import AdapterAdapterConfigRef
from .models.adapter_config_policy import AdapterConfigPolicy
from .models.adapter_config_policy_ref import AdapterConfigPolicyRef
from .models.adapter_dce_interface_settings_ref import AdapterDceInterfaceSettingsRef
from .models.adapter_deploy_task_ref import AdapterDeployTaskRef
from .models.adapter_eth_settings_ref import AdapterEthSettingsRef
from .models.adapter_ext_eth_interface_ref import AdapterExtEthInterfaceRef
from .models.adapter_fc_settings_ref import AdapterFcSettingsRef
from .models.adapter_host_eth_interface_ref import AdapterHostEthInterfaceRef
from .models.adapter_host_fc_interface_ref import AdapterHostFcInterfaceRef
from .models.adapter_host_iscsi_interface_ref import AdapterHostIscsiInterfaceRef
from .models.adapter_port_channel_settings_ref import AdapterPortChannelSettingsRef
from .models.adapter_unit_ref import AdapterUnitRef
from .models.adapter_validation_task_ref import AdapterValidationTaskRef
from .models.ansibletaskexec_initialize_ansible_playbook_input_ref import AnsibletaskexecInitializeAnsiblePlaybookInputRef
from .models.ansibletaskexec_start_remote_ansible_playbook_ref import AnsibletaskexecStartRemoteAnsiblePlaybookRef
from .models.appliance_api_status_ref import ApplianceApiStatusRef
from .models.appliance_app_status_ref import ApplianceAppStatusRef
from .models.appliance_backup import ApplianceBackup
from .models.appliance_backup_base_ref import ApplianceBackupBaseRef
from .models.appliance_backup_policy import ApplianceBackupPolicy
from .models.appliance_backup_policy_ref import ApplianceBackupPolicyRef
from .models.appliance_backup_ref import ApplianceBackupRef
from .models.appliance_certificate_setting_ref import ApplianceCertificateSettingRef
from .models.appliance_claim_device_task_ref import ApplianceClaimDeviceTaskRef
from .models.appliance_configure_idp_task_ref import ApplianceConfigureIdpTaskRef
from .models.appliance_configure_ldap_task_ref import ApplianceConfigureLdapTaskRef
from .models.appliance_connection_state_verification_task_ref import ApplianceConnectionStateVerificationTaskRef
from .models.appliance_data_export_policy_ref import ApplianceDataExportPolicyRef
from .models.appliance_deploy_dns_task_ref import ApplianceDeployDnsTaskRef
from .models.appliance_deploy_ntp_task_ref import ApplianceDeployNtpTaskRef
from .models.appliance_deploy_trust_store_task_ref import ApplianceDeployTrustStoreTaskRef
from .models.appliance_device_bundle_ref import ApplianceDeviceBundleRef
from .models.appliance_device_claim_ref import ApplianceDeviceClaimRef
from .models.appliance_device_file_ref import ApplianceDeviceFileRef
from .models.appliance_device_state_ref import ApplianceDeviceStateRef
from .models.appliance_device_tier_ref import ApplianceDeviceTierRef
from .models.appliance_device_upgrade_ref import ApplianceDeviceUpgradeRef
from .models.appliance_device_upload_ref import ApplianceDeviceUploadRef
from .models.appliance_diag_setting_ref import ApplianceDiagSettingRef
from .models.appliance_domain_permission_ref import ApplianceDomainPermissionRef
from .models.appliance_endpoint_login_task_ref import ApplianceEndpointLoginTaskRef
from .models.appliance_endpoint_logout_task_ref import ApplianceEndpointLogoutTaskRef
from .models.appliance_file_gateway_ref import ApplianceFileGatewayRef
from .models.appliance_file_system_status_ref import ApplianceFileSystemStatusRef
from .models.appliance_group_status_ref import ApplianceGroupStatusRef
from .models.appliance_image_bundle_ref import ApplianceImageBundleRef
from .models.appliance_key_value_pair_ref import ApplianceKeyValuePairRef
from .models.appliance_manifest_ref import ApplianceManifestRef
from .models.appliance_node_info_ref import ApplianceNodeInfoRef
from .models.appliance_node_status_ref import ApplianceNodeStatusRef
from .models.appliance_proxy_end_point_ref import ApplianceProxyEndPointRef
from .models.appliance_release_note_ref import ApplianceReleaseNoteRef
from .models.appliance_resolve_device_identifier_task_ref import ApplianceResolveDeviceIdentifierTaskRef
from .models.appliance_resolve_device_token_task_ref import ApplianceResolveDeviceTokenTaskRef
from .models.appliance_restore import ApplianceRestore
from .models.appliance_restore_ref import ApplianceRestoreRef
from .models.appliance_setup_info_ref import ApplianceSetupInfoRef
from .models.appliance_signing_key_ref import ApplianceSigningKeyRef
from .models.appliance_status_check_ref import ApplianceStatusCheckRef
from .models.appliance_status_upload_data_ref import ApplianceStatusUploadDataRef
from .models.appliance_status_upload_ref import ApplianceStatusUploadRef
from .models.appliance_system_info_ref import ApplianceSystemInfoRef
from .models.appliance_system_status_ref import ApplianceSystemStatusRef
from .models.appliance_update_certificate_task_ref import ApplianceUpdateCertificateTaskRef
from .models.appliance_upgrade_policy_ref import ApplianceUpgradePolicyRef
from .models.appliance_upgrade_ref import ApplianceUpgradeRef
from .models.appliance_wait_for_upgrade_task_ref import ApplianceWaitForUpgradeTaskRef
from .models.artifactdownload_save_artifacts_ref import ArtifactdownloadSaveArtifactsRef
from .models.asset_address_information_ref import AssetAddressInformationRef
from .models.asset_cluster_member import AssetClusterMember
from .models.asset_cluster_member_ref import AssetClusterMemberRef
from .models.asset_confirm_intersight_assist_connectivity_ref import AssetConfirmIntersightAssistConnectivityRef
from .models.asset_connection_control_message_ref import AssetConnectionControlMessageRef
from .models.asset_contract_information_ref import AssetContractInformationRef
from .models.asset_customer_information_ref import AssetCustomerInformationRef
from .models.asset_device_claim_ref import AssetDeviceClaimRef
from .models.asset_device_configuration_ref import AssetDeviceConfigurationRef
from .models.asset_device_connection_ref import AssetDeviceConnectionRef
from .models.asset_device_connector_manager_ref import AssetDeviceConnectorManagerRef
from .models.asset_device_contract_information_ref import AssetDeviceContractInformationRef
from .models.asset_device_registration import AssetDeviceRegistration
from .models.asset_device_registration_ref import AssetDeviceRegistrationRef
from .models.asset_global_ultimate_ref import AssetGlobalUltimateRef
from .models.asset_managed_device_ref import AssetManagedDeviceRef
from .models.asset_managed_device_status_ref import AssetManagedDeviceStatusRef
from .models.asset_parent_connection_signature_ref import AssetParentConnectionSignatureRef
from .models.asset_product_information_ref import AssetProductInformationRef
from .models.asset_register_managed_device_ref import AssetRegisterManagedDeviceRef
from .models.asset_security_token_ref import AssetSecurityTokenRef
from .models.asset_send_managed_device_details_ref import AssetSendManagedDeviceDetailsRef
from .models.asset_sudi_info_ref import AssetSudiInfoRef
from .models.asset_unregister_managed_device_ref import AssetUnregisterManagedDeviceRef
from .models.asset_wait_for_managed_device_deletion_ref import AssetWaitForManagedDeviceDeletionRef
from .models.asset_wait_for_managed_device_end_point_connection_ref import AssetWaitForManagedDeviceEndPointConnectionRef
from .models.asset_wait_for_managed_device_registration_ref import AssetWaitForManagedDeviceRegistrationRef
from .models.bios_boot_mode import BiosBootMode
from .models.bios_boot_mode_ref import BiosBootModeRef
from .models.bios_deploy_task_ref import BiosDeployTaskRef
from .models.bios_policy import BiosPolicy
from .models.bios_policy_ref import BiosPolicyRef
from .models.bios_unit_ref import BiosUnitRef
from .models.bios_validation_task_ref import BiosValidationTaskRef
from .models.boot_bootloader_ref import BootBootloaderRef
from .models.boot_device_base_ref import BootDeviceBaseRef
from .models.boot_device_boot_mode import BootDeviceBootMode
from .models.boot_device_boot_mode_ref import BootDeviceBootModeRef
from .models.boot_iscsi import BootIscsi
from .models.boot_iscsi_ref import BootIscsiRef
from .models.boot_local_cdd import BootLocalCdd
from .models.boot_local_cdd_ref import BootLocalCddRef
from .models.boot_local_disk import BootLocalDisk
from .models.boot_local_disk_ref import BootLocalDiskRef
from .models.boot_nvme import BootNvme
from .models.boot_nvme_ref import BootNvmeRef
from .models.boot_pch_storage import BootPchStorage
from .models.boot_pch_storage_ref import BootPchStorageRef
from .models.boot_precision_deploy_task_ref import BootPrecisionDeployTaskRef
from .models.boot_precision_policy import BootPrecisionPolicy
from .models.boot_precision_policy_ref import BootPrecisionPolicyRef
from .models.boot_precision_validation_task_ref import BootPrecisionValidationTaskRef
from .models.boot_pxe import BootPxe
from .models.boot_pxe_ref import BootPxeRef
from .models.boot_san import BootSan
from .models.boot_san_ref import BootSanRef
from .models.boot_sd_card import BootSdCard
from .models.boot_sd_card_ref import BootSdCardRef
from .models.boot_uefi_shell import BootUefiShell
from .models.boot_uefi_shell_ref import BootUefiShellRef
from .models.boot_usb import BootUsb
from .models.boot_usb_ref import BootUsbRef
from .models.boot_virtual_media import BootVirtualMedia
from .models.boot_virtual_media_ref import BootVirtualMediaRef
from .models.cep_download_artifacts_ref import CepDownloadArtifactsRef
from .models.cep_initiate_cluster_bringup_ref import CepInitiateClusterBringupRef
from .models.cep_kv_pair_ref import CepKvPairRef
from .models.cep_spark_cluster_ref import CepSparkClusterRef
from .models.cep_verify_cluster_bringup_ref import CepVerifyClusterBringupRef
from .models.cmrf_cm_rf_ref import CmrfCmRfRef
from .models.comm_concrete_credential import CommConcreteCredential
from .models.comm_concrete_credential_ref import CommConcreteCredentialRef
from .models.comm_credential_ref import CommCredentialRef
from .models.comm_ip_v4_interface_ref import CommIpV4InterfaceRef
from .models.comm_ip_v6_interface_ref import CommIpV6InterfaceRef
from .models.comm_vmware_credential import CommVmwareCredential
from .models.comm_vmware_credential_ref import CommVmwareCredentialRef
from .models.compute_blade_ref import ComputeBladeRef
from .models.compute_board_ref import ComputeBoardRef
from .models.compute_ip_address_ref import ComputeIpAddressRef
from .models.compute_locator_led_operation_task_ref import ComputeLocatorLedOperationTaskRef
from .models.compute_one_time_boot_configuration_task_ref import ComputeOneTimeBootConfigurationTaskRef
from .models.compute_operation_result_handler_ref import ComputeOperationResultHandlerRef
from .models.compute_physical_ref import ComputePhysicalRef
from .models.compute_physical_summary import ComputePhysicalSummary
from .models.compute_physical_summary_ref import ComputePhysicalSummaryRef
from .models.compute_rack_unit_ref import ComputeRackUnitRef
from .models.compute_server_config_ref import ComputeServerConfigRef
from .models.compute_server_operation_task_ref import ComputeServerOperationTaskRef
from .models.compute_server_setting import ComputeServerSetting
from .models.compute_server_setting_ref import ComputeServerSettingRef
from .models.cond_alarm_ref import CondAlarmRef
from .models.cond_hcl_status_detail_ref import CondHclStatusDetailRef
from .models.cond_hcl_status_job_ref import CondHclStatusJobRef
from .models.cond_hcl_status_ref import CondHclStatusRef
from .models.connector_auth_message_ref import ConnectorAuthMessageRef
from .models.connector_base_message_ref import ConnectorBaseMessageRef
from .models.connector_close_stream_message_ref import ConnectorCloseStreamMessageRef
from .models.connector_collect_stat_ref import ConnectorCollectStatRef
from .models.connector_command_control_message_ref import ConnectorCommandControlMessageRef
from .models.connector_command_execute_result_ref import ConnectorCommandExecuteResultRef
from .models.connector_command_execute_stream_ref import ConnectorCommandExecuteStreamRef
from .models.connector_command_terminal_stream_ref import ConnectorCommandTerminalStreamRef
from .models.connector_common_inventory_collection_message_ref import ConnectorCommonInventoryCollectionMessageRef
from .models.connector_common_inventory_scoped_collection_ref import ConnectorCommonInventoryScopedCollectionRef
from .models.connector_common_inventory_start_message_ref import ConnectorCommonInventoryStartMessageRef
from .models.connector_common_inventory_trigger_collection_ref import ConnectorCommonInventoryTriggerCollectionRef
from .models.connector_common_inventory_trigger_sync_ref import ConnectorCommonInventoryTriggerSyncRef
from .models.connector_connector_techsupport_collect_ref import ConnectorConnectorTechsupportCollectRef
from .models.connector_control_message_ref import ConnectorControlMessageRef
from .models.connector_data_set_ref import ConnectorDataSetRef
from .models.connector_download_status_ref import ConnectorDownloadStatusRef
from .models.connector_event_channel_control_ref import ConnectorEventChannelControlRef
from .models.connector_event_ref import ConnectorEventRef
from .models.connector_fetch_stream_message_ref import ConnectorFetchStreamMessageRef
from .models.connector_file_checksum_ref import ConnectorFileChecksumRef
from .models.connector_file_download_message_ref import ConnectorFileDownloadMessageRef
from .models.connector_file_info_ref import ConnectorFileInfoRef
from .models.connector_file_message_ref import ConnectorFileMessageRef
from .models.connector_file_server_entry_ref import ConnectorFileServerEntryRef
from .models.connector_file_server_message_ref import ConnectorFileServerMessageRef
from .models.connector_file_upload_message_ref import ConnectorFileUploadMessageRef
from .models.connector_flow_control_ack_ref import ConnectorFlowControlAckRef
from .models.connector_flow_control_message_ref import ConnectorFlowControlMessageRef
from .models.connector_http_request_ref import ConnectorHttpRequestRef
from .models.connector_http_response_ref import ConnectorHttpResponseRef
from .models.connector_internal_event_ref import ConnectorInternalEventRef
from .models.connector_inventory_api_ref import ConnectorInventoryApiRef
from .models.connector_inventory_build_condition_ref import ConnectorInventoryBuildConditionRef
from .models.connector_inventory_build_string_ref import ConnectorInventoryBuildStringRef
from .models.connector_inventory_details_ref import ConnectorInventoryDetailsRef
from .models.connector_inventory_etl_init_ref import ConnectorInventoryEtlInitRef
from .models.connector_inventory_etl_object_ref import ConnectorInventoryEtlObjectRef
from | |
<reponame>jacarvalho/SimuRLacra<filename>Pyrado/pyrado/policies/environment_specific.py
import math
import numpy as np
import torch as to
import torch.nn as nn
import pyrado
from pyrado.utils.data_types import EnvSpec
from pyrado.environments.sim_base import SimEnv
from pyrado.environments.pysim.quanser_ball_balancer import QBallBalancerSim
from pyrado.environments.pysim.quanser_cartpole import QCartPoleSim
from pyrado.environments.pysim.quanser_qube import QQubeSim
from pyrado.environment_wrappers.utils import inner_env
from pyrado.policies.base import Policy
from pyrado.policies.features import FeatureStack, identity_feat, RBFFeat
from pyrado.policies.linear import LinearPolicy
from pyrado.utils.math import clamp_symm
from pyrado.utils.tensor import insert_tensor_col
class DualRBFLinearPolicy(LinearPolicy):
"""
A linear policy with RBF features which are also used to get the derivative of the features. The use-case in mind
is a simple policy which generates the joint position and joint velocity commands for the internal PD-controller
of a robot (e.g. <NAME>). By re-using the RBF, we reduce the number of parameters, while we can at the same
time get the velocity information from the features, i.e. the derivative of the normalized Gaussians.
"""
name: str = 'dual_rbf'
def __init__(self,
spec: EnvSpec,
rbf_hparam: dict,
dim_mask: int = 2,
init_param_kwargs: dict = None,
use_cuda: bool = False):
"""
Constructor
:param spec: specification of environment
:param rbf_hparam: hyper-parameters for the RBF-features, see `RBFFeat`
:param dim_mask: number of RBF features to mask out at the beginning and the end of every dimension,
pass 1 to remove the first and the last features for the policy, pass 0 to use all
RBF features. Masking out RBFs makes sense if you want to obtain a smooth starting behavior.
:param init_param_kwargs: additional keyword arguments for the policy parameter initialization
"""
if not (0 <= dim_mask <= rbf_hparam['num_feat_per_dim']//2):
raise pyrado.ValueErr(
given=dim_mask, ge_constraint='0', le_constraint=f"{rbf_hparam['num_feat_per_dim']//2}"
)
# Construct the RBF features
self._feats = RBFFeat(**rbf_hparam)
# Call LinearPolicy's constructor (custom parts will be overridden later)
super().__init__(spec, FeatureStack([self._feats]), init_param_kwargs, use_cuda)
if not self._num_act%2 == 0:
raise pyrado.ShapeErr(msg='DualRBFLinearPolicy only works with an even number of actions,'
'since we are using the time derivative of the features to create the second'
'half of the outputs. This is done to use forward() in order to obtain'
'the joint position and the joint velocities.')
# Override custom parts
self._feats = RBFFeat(**rbf_hparam)
self.dim_mask = dim_mask
if self.dim_mask > 0:
self.num_active_feat = self._feats.num_feat - 2*self.dim_mask*spec.obs_space.flat_dim
else:
self.num_active_feat = self._feats.num_feat
self.net = nn.Linear(self.num_active_feat, self._num_act//2, bias=False)
# Create mask to deactivate first and last feature of every input dimension
self.feats_mask = to.ones(self._feats.centers.shape, dtype=to.bool)
self.feats_mask[:self.dim_mask, :] = False
self.feats_mask[-self.dim_mask:, :] = False
self.feats_mask = self.feats_mask.t().reshape(-1, ) # reshape the same way as in RBFFeat
# Call custom initialization function after PyTorch network parameter initialization
init_param_kwargs = init_param_kwargs if init_param_kwargs is not None else dict()
self.init_param(None, **init_param_kwargs)
self.to(self.device)
def forward(self, obs: to.Tensor) -> to.Tensor:
"""
Evaluate the features at the given observation or use given feature values
:param obs: observations from the environment
:return: actions
"""
obs = obs.to(self.device)
batched = obs.ndimension() == 2 # number of dim is 1 if unbatched, dim > 2 is cought by features
feats_val = self._feats(obs)
feats_dot = self._feats.derivative(obs)
if self.dim_mask > 0:
# Mask out first and last feature of every input dimension
feats_val = feats_val[:, self.feats_mask]
feats_dot = feats_dot[:, self.feats_mask]
# Inner product between policy parameters and the value of the features
act_pos = self.net(feats_val)
act_vel = self.net(feats_dot)
act = to.cat([act_pos, act_vel], dim=1)
# Return the flattened tensor if not run in a batch mode to be compatible with the action spaces
return act.flatten() if not batched else act
class QBallBalancerPDCtrl(Policy):
"""
PD-controller for the Quanser Ball Balancer.
The only but significant difference of this controller to the other PD controller is the clipping of the actions.
.. note::
This class's desired state specification deviates from the Pyrado policies which interact with a `Task`.
"""
name: str = 'qbb_pd'
def __init__(self,
env_spec: EnvSpec,
state_des: to.Tensor = to.zeros(2),
kp: to.Tensor = None,
kd: to.Tensor = None):
"""
Constructor
:param env_spec: environment specification
:param state_des: tensor of desired x and y ball position [m]
:param kp: 2x2 tensor of constant controller feedback coefficients for error [V/m]
:param kd: 2x2 tensor of constant controller feedback coefficients for error time derivative [Vs/m]
"""
super().__init__(env_spec)
self.state_des = state_des
self.limit_rad = 0.52360 # limit for angle command; see the saturation block in the Simulink model
self.kp_servo = 14. # P-gain for the servo angle; see the saturation block the Simulink model
self.Kp, self.Kd = None, None
self.init_param(kp, kd)
def forward(self, obs: to.Tensor) -> to.Tensor:
"""
Calculate the controller output.
:param obs: observation from the environment
:return act: controller output [V]
"""
th_x, th_y, x, y, _, _, x_dot, y_dot = obs
err = to.tensor([self.state_des[0] - x, self.state_des[1] - y])
err_dot = to.tensor([0. - x_dot, 0. - y_dot])
th_des = self.Kp.mv(err) + self.Kd.mv(err_dot)
# Saturation for desired angular position
th_des = to.clamp(th_des, -self.limit_rad, self.limit_rad)
err_th = th_des - to.tensor([th_x, th_y])
# Return action, see "Actuator Electrical Dynamics" block in [1]
return err_th*self.kp_servo
def init_param(self, kp: to.Tensor = None, kd: to.Tensor = None, verbose: bool = False, **kwargs):
"""
Initialize controller parameters.
:param kp: 2x2 tensor of constant controller feedback coefficients for error [V/m]
:param kd: 2x2 tensor of constant controller feedback coefficients for error time derivative [Vs/m]
:param verbose: print the controller's gains
"""
self.Kp = to.diag(to.tensor([3.45, 3.45])) if kp is None else kp
self.Kd = to.diag(to.tensor([2.11, 2.11])) if kd is None else kd
if not self.Kp.shape == (2, 2):
raise pyrado.ShapeErr(given=self.Kp, expected_match=(2, 2))
if not self.Kd.shape == (2, 2):
raise pyrado.ShapeErr(given=self.Kd, expected_match=(2, 2))
if verbose:
print(f"Set Kp to\n{self.Kp.numpy()}\nand Kd to\n{self.Kd.numpy()}")
def reset(self, state_des: [np.ndarray, to.Tensor] = None):
"""
Set the controller's desired state.
:param state_des: tensor of desired x and y ball position [m], or None to keep the current desired state
"""
if state_des is not None:
if isinstance(state_des, to.Tensor):
pass
elif isinstance(state_des, np.ndarray):
self.state_des = to.from_numpy(state_des).type(to.get_default_dtype())
else:
raise pyrado.TypeErr(given=state_des, expected_type=[to.Tensor, np.ndarray])
self.state_des = state_des.clone()
class QCartPoleSwingUpAndBalanceCtrl(Policy):
""" Swing-up and balancing controller for the Quanser Cart-Pole """
name: str = 'qcp_sub'
def __init__(self,
env_spec: EnvSpec,
u_max: float = 18.,
v_max: float = 12.,
long: bool = False):
"""
Constructor
:param env_spec: environment specification
:param u_max: maximum energy gain
:param v_max: maximum voltage the control signal will be clipped to
:param long: flag for long or short pole
"""
super().__init__(env_spec)
# Store inputs
self.u_max = u_max
self.v_max = v_max
self.pd_control = False
self.pd_activated = False
self.long = long
self.dp_nom = QCartPoleSim.get_nominal_domain_param(self.long)
if long:
self.K_pd = to.tensor([-41.833, 189.8393, -47.8483, 28.0941])
else:
self.k_p = to.tensor(8.5) # former: 8.5
self.k_d = to.tensor(0.) # former: 0.
self.k_e = to.tensor(24.5) # former: 19.5 (frequency dependent)
self.K_pd = to.tensor([41., -200., 55., -16.]) # former: [+41.8, -173.4, +46.1, -16.2]
def init_param(self, init_values: to.Tensor = None, **kwargs):
pass
def forward(self, obs: to.Tensor) -> to.Tensor:
"""
Calculate the controller output.
:param obs: observation from the environment
:return act: controller output [V]
"""
x, sin_th, cos_th, x_dot, theta_dot = obs
theta = to.atan2(sin_th, cos_th)
alpha = (theta - math.pi) if theta > 0 else (theta + math.pi)
J_pole = self.dp_nom['l_pole'] ** 2*self.dp_nom['m_pole']/3.
J_eq = self.dp_nom['m_cart'] + (self.dp_nom['eta_g']*self.dp_nom['K_g'] ** 2*
self.dp_nom['J_m'])/self.dp_nom['r_mp'] ** 2
# Energy terms
E_kin = J_pole/2.*theta_dot ** 2
E_pot = self.dp_nom['m_pole']*self.dp_nom['g']*self.dp_nom['l_pole']*(1 - cos_th) # E(0) = 0., E(pi) = E(-pi) = 2 mgl
E_ref = 2.*self.dp_nom['m_pole']*self.dp_nom['g']*self.dp_nom['l_pole']
if to.abs(alpha) < 0.1745 or self.pd_control:
# Stabilize at the top
self.pd_activated = True
u = self.K_pd.dot(to.tensor([x, alpha, x_dot, theta_dot]))
else:
# Swing up
u = self.k_e*(E_kin + E_pot - E_ref)*to.sign(theta_dot*cos_th) + self.k_p*(0. - x) + self.k_d*(0. - x_dot)
u = u.clamp(-self.u_max, self.u_max)
if self.pd_activated:
self.pd_activated = False
act = (J_eq*self.dp_nom['R_m']*self.dp_nom['r_mp']*u)/ \
(self.dp_nom['eta_g']*self.dp_nom['K_g']*self.dp_nom['eta_m']*self.dp_nom['k_m']) + \
self.dp_nom['K_g']*self.dp_nom['k_m']*x_dot/self.dp_nom['r_mp']
# Return the clipped action
act = act.clamp(-self.v_max, self.v_max)
return act.view(1) # such that when act is later converted to numpy it does not become a float
class QQubeSwingUpAndBalanceCtrl(Policy):
""" Hybrid controller (QQubeEnergyCtrl, QQubePDCtrl) switching based on the pendulum pole angle alpha
.. note::
Extracted Quanser's values from q_qube2_swingup.mdl
"""
name: str = 'qq_sub'
def __init__(self,
env_spec: EnvSpec,
ref_energy: float = 0.04, # Quanser's value: 0.02
energy_gain: float = 30., # Quanser's value: 50
energy_th_gain: float = 0.4, # former: 0.4
acc_max: float = 5., # Quanser's value: | |
of sims) the goal soil moisture anomaly is calculated against the mean(axis=1)
of the soil moisture deficit array. The units should be in the same format as weather at home. assumes no
input data contains nan values
SMD is a monthly standardized dataset assumes a starting soil moisture for each month (see detrended_start_month
object in this file)
:param months: integer months (1-12) an array of len(rain) or None if dates is passed
:param days: integer days of the month an array of len(rain) or None if dates is passed
:param rain: precipitation (kg m-2 s-1), np.ndarray
:param radn: radiation (W m-2), np.ndarray
:param tmax: maximum temperature (k), np.ndarray
:param tmin: minimum temperature (k), np.ndarray
:param rh_min: maximum relative humidity (%), np.ndarray
:param rh_max: minimum relative humidity (%), np.ndarray
:param wind_10: 10m wind speed (m/s), np.ndarray
:param mslp: mean sea level pressure (Pa), np.ndarray
:param elv: elevation at site (m), float
:return: smd(mm), sma(mm), pet(mm/day)
"""
# check inputs
expected_shape = rain.shape
assert (expected_shape[0] == 366 or
expected_shape[0] == 365 or
expected_shape[0] == 360), 'axis 0 must be days and it is expected to be a full year (360 365 or 366 days)'
assert len(expected_shape) == 2, 'expected 2d array shape = (day of year, simulation number)'
for k in ['radn', 'rain', 'tmax', 'tmin', 'rh_min', 'rh_max', 'wind_10', 'mslp']:
assert eval(k).shape == expected_shape, '{} does not match rain shape'.format(k)
assert np.isfinite(
eval(k)).all(), 'nan values passed in {}, please remove otherwise they will impact the sma'.format(k)
# make mean values and convert units
temp = (tmax + tmin) / 2 - 273.15 # to C
rh = (rh_min + rh_max) / 2
rain = rain * 86400 # kg/m2/s to mm/day
radn = radn * 86400 * 1e-6 # from w/m2 to mj/m2/day
mslp = mslp / 1000 # Pa to kpa
# run SMD/SMA
pet = calc_penman_pet(rad=radn, temp=temp, rh=rh, wind_10=wind_10, wind_2=None, psurf=None, mslp=mslp,
elevation=elv)
smd = calc_smd_monthly(rain=rain, pet=pet, dates=None, months=months, days=days,
month_start=detrended_start_month,
h2o_cap=150,
a=0.0073,
p=1, return_drn_aet=False)
sma = smd - smd.mean(axis=1)[:, np.newaxis]
return smd, sma, pet
def calc_smd_sma_wah_depreciated(rain, radn, tmax, tmin, rh_min, rh_max, wind_10, mslp, elv):
"""
calculate soil moisture deficit, soil moisture anomaly, and pet for weather at home data. this is a convenience
function for Bodeker Scientific. the expected inputs which are nd arrays are expected to be 2d arrays of
shape (365, num of sims) the goal soil moisture anomaly is calculated against the mean(axis=1) of the soil moisture
deficit array. The units should be in the same format as weather at home. assumes no input data contains nan values
SMD assumes a starting soil moisture of 75mm and a water holding capacity of 150mm
:param rain: precipitation (kg m-2 s-1), np.ndarray
:param radn: radiation (W m-2), np.ndarray
:param tmax: maximum temperature (k), np.ndarray
:param tmin: minimum temperature (k), np.ndarray
:param rh_min: maximum relative humidity (%), np.ndarray
:param rh_max: minimum relative humidity (%), np.ndarray
:param wind_10: 10m wind speed (m/s), np.ndarray
:param mslp: mean sea level pressure (Pa), np.ndarray
:param elv: elevation at site (m), float
:return: smd(mm), sma(mm), pet(mm/day)
"""
raise ValueError('depreciated')
# check inputs
expected_shape = rain.shape
assert (expected_shape[0] == 366 or
expected_shape[0] == 365), 'axis 0 must be days and it is expected to be a full year (365 or 366 days)'
assert len(expected_shape) == 2, 'expected 2d array (day of year, simulation number)'
for k in ['radn', 'rain', 'tmax', 'tmin', 'rh_min', 'rh_max', 'wind_10', 'mslp']:
assert eval(k).shape == expected_shape, '{} does not match rain shape'.format(k)
assert np.isfinite(
eval(k)).all(), 'nan values passed in {}, please remove otherwise they will impact the sma'.format(k)
# make mean values and convert units
temp = (tmax + tmin) / 2 - 273.15 # to C
rh = (rh_min + rh_max) / 2
rain = rain * 86400 # kg/m2/s to mm/day
radn = radn * 86400 * 1e-6 # from w/m2 to mj/m2/day
mslp = mslp / 1000 # Pa to kpa
# run SMD/SMA
pet = calc_penman_pet(rad=radn, temp=temp, rh=rh, wind_10=wind_10, wind_2=None, psurf=None, mslp=mslp,
elevation=elv)
smd = calc_smd(rain=rain, pet=pet, h2o_cap=150, h2o_start=0.5, a=0.0073,
p=1, return_drn_aet=False)
sma = smd - smd.mean(axis=1)[:, np.newaxis]
return smd, sma, pet
def rough_testing_of_pet():
# rough testing, looks good enough, and matches external
import matplotlib.pyplot as plt
from Climate_Shocks.note_worthy_events.fao import fao56_penman_monteith, delta_svp, svp_from_t, psy_const, \
avp_from_rhmean
data = pd.read_csv(r"M:\Shared drives\Z2003_SLMACC\event_definition\hamilton_weather.csv").set_index(
['year', 'doy'])
temp = pd.read_csv(r"M:\Shared drives\Z2003_SLMACC\event_definition\penman.csv").set_index(['year', 'doy'])
data.loc[temp.index, 'penman'] = temp.loc[:, 'penman']
for i in data.index:
t = (data.loc[i, 'tmin'] + data.loc[i, 'tmax']) / 2 + 273
svp = svp_from_t(t - 273)
data.loc[i, 'penman_calc_ext'] = fao56_penman_monteith(
net_rad=data.loc[i, 'radn'],
t=t,
ws=data.loc[i, 'wind'],
svp=svp,
avp=avp_from_rhmean(svp, svp, data.loc[i, 'rh']),
delta_svp=delta_svp(t - 273),
psy=psy_const(data.loc[i, 'pmsl'] / 10),
)
data.loc[i, 'iter_penman'] = calc_penman_pet(rad=data.loc[i, 'radn'],
temp=t - 273,
rh=data.loc[i, 'rh'],
wind_2=data.loc[i, 'wind'],
mslp=data.loc[i, 'pmsl'] / 10,
elevation=45)
# my pet
data.loc[:, 'penman_calc'] = calc_penman_pet(rad=data['radn'].values,
temp=(data['tmin'].values + data['tmax'].values) / 2,
rh=data['rh'].values,
wind_2=data['wind'].values,
mslp=data['pmsl'].values / 10,
elevation=45
)
data = data.rolling(10).mean()
data.plot(y=['penman', 'penman_calc', 'penman_calc_ext', 'iter_penman'])
# plt.scatter(data['pet'], data['peyman_pet'])
plt.show()
pass
detrended_start_month = {
1: -50.0, 2: -52.0, 3: -41.0, 4: -36.0, 5: -22.0, 6: -11.0, 7: -7.0, 8: -8.0,
9: -14.0, 10: -30.0, 11: -41.0, 12: -49.0}
# calculated from historical SMD from detrended2 on first day of each month with a 10 day centered rolling window mean
def calc_smd_monthly(rain, pet, dates=None, months=None, days=None,
month_start=detrended_start_month,
h2o_cap=150,
a=0.0073,
p=1, return_drn_aet=False):
"""
calculate the soil moisture deficit from aet, assuems that if these are arrays axis 0 is time
sets the start of each month to the delta of the month start value and the rain/pet on that day.
:param rain: array of rain fall amounts, mm, shape = (time, *other dimensions (if needed))
:param pet: array of pet amounts, mm, shape = (time, *other dimensions (if needed))
:param dates: array of datetime objects, shape = (time,) or none and then months/days must be passed
:param months: integer months (1-12) an array of len(rain) or None if dates is passed
:param days: integer days of the month an array of len(rain) or None if dates is passed
:param month_start: the SMD value to start each month with
:param h2o_cap: maximum soil water capacity, mm, niwa uses 150mm as a standard
:param a: "readily available" water coefficient (d mm-1)
default value from woodward 2010
:param p: proportion of readilay avalible water (RAW) abe to be extracted in one day (d-1)
default value from woodward, 2010
:param return_drn_aet: boolean, if True return AET and drainage
:return: (soil moisture deficit, mm) or (soil moisture deficit, mm), (drainage, mm), (aet, mm)
"""
assert isinstance(month_start, dict)
assert set(month_start.keys()) == set(range(1, 13))
if dates is not None:
assert months is None and days is None, 'if dates is not none then months/days must be None'
dates = pd.Series(np.atleast_1d(dates))
months = np.array([d.month for d in dates])
days = np.array([d.day for d in dates])
else:
assert months is not None and days is not None, 'if dates is none then months/days must not be none'
months = np.atleast_1d(months)
days = np.atleast_1d(days)
# make this work if float/ndarray passed
if np.atleast_1d(pet).ndim == 1:
array_d = 1 # 1d array or list, return 1d data
pet = np.atleast_1d(pet)[:, np.newaxis]
rain = np.atleast_1d(rain)[:, np.newaxis]
else:
array_d = 2 # 2 or more dimensions return without modification
assert rain.shape == pet.shape, 'rain and PET must be same shape'
assert months.shape == days.shape == pet.shape[0:1], ('dates (or months and days) must be the same shape as '
'pet.shape[0:1]')
smd = np.zeros(pet.shape, float) * np.nan
if return_drn_aet:
drain = np.zeros(pet.shape, float) * np.nan
aet_out = np.zeros(pet.shape, float) * np.nan
iter_shp = pet.shape[1:]
soil_mois = np.zeros((iter_shp)) + month_start[months[0]] + h2o_cap
for i, (r, pe, m, d) in enumerate(zip(rain, pet, months, days)):
if d == 1: # set the soil moisture on the first day of the month
soil_mois[:] = month_start[m] + h2o_cap
aet = calc_aet(pe, p=p, a=a, AWHC=h2o_cap, W=soil_mois - h2o_cap)
soil_mois = soil_mois + r - aet
soil_mois[soil_mois | |
port, "submissions/{}/?index=7".format(submissions[0].id))
element_active = lambda browser: utils._get_active_element(browser) == utils._get_score_box(browser, 2)
WebDriverWait(browser, 10).until(element_active)
# Click the third comment box and navigate to the next submission
utils._get_comment_box(browser, 2).click()
utils._send_keys_to_body(browser, Keys.CONTROL, ".")
utils._wait_for_formgrader(browser, port, "submissions/{}/?index=11".format(submissions[1].id))
element_active = lambda browser: utils._get_active_element(browser) == utils._get_score_box(browser, 4)
WebDriverWait(browser, 10).until(element_active)
# Navigate to the previous submission
utils._send_keys_to_body(browser, Keys.CONTROL, ",")
utils._wait_for_formgrader(browser, port, "submissions/{}/?index=11".format(submissions[0].id))
element_active = lambda browser: utils._get_active_element(browser) == utils._get_score_box(browser, 4)
WebDriverWait(browser, 10).until(element_active)
@pytest.mark.nbextensions
def test_keyboard_help(browser, port, gradebook):
utils._load_formgrade(browser, port, gradebook)
# show the help dialog
utils._click_element(browser, ".help")
utils._wait_for_element(browser, "help-dialog")
WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#help-dialog button.btn-primary")))
# close it
utils._click_element(browser, "#help-dialog button.btn-primary")
modal_not_present = lambda browser: browser.execute_script("""return $("#help-dialog").length === 0;""")
WebDriverWait(browser, 10).until(modal_not_present)
@pytest.mark.nbextensions
def test_flag(browser, port, gradebook):
utils._load_formgrade(browser, port, gradebook)
# mark as flagged
assert utils._flag(browser) == "Submission flagged"
# mark as unflagged
assert utils._flag(browser) == "Submission unflagged"
# mark as flagged
assert utils._flag(browser) == "Submission flagged"
# mark as unflagged
assert utils._flag(browser) == "Submission unflagged"
@pytest.mark.nbextensions
def test_formgrade_show_hide_names(browser, port, gradebook):
utils._load_formgrade(browser, port, gradebook)
problem = gradebook.find_notebook("Problem 1", "Problem Set 1")
submissions = problem.submissions
submissions.sort(key=lambda x: x.id)
submission = submissions[0]
name = browser.find_elements_by_css_selector(".breadcrumb li")[-1]
hidden = browser.find_element_by_css_selector(".glyphicon.name-hidden")
shown = browser.find_element_by_css_selector(".glyphicon.name-shown")
# check that the name is hidden
assert name.text == "Submission #1"
assert not shown.is_displayed()
assert hidden.is_displayed()
# click the show icon
hidden.click()
# move the mouse to the first breadcrumb so it's not hovering over the tooltip
ActionChains(browser).move_to_element(browser.find_elements_by_css_selector(".breadcrumb li")[0]).perform()
WebDriverWait(browser, 10).until_not(EC.presence_of_element_located((By.CSS_SELECTOR, ".tooltip")))
# check that the name is shown
assert name.text == "{}, {}".format(submission.student.last_name, submission.student.first_name)
assert shown.is_displayed()
assert not hidden.is_displayed()
# click the hide icon
shown.click()
# move the mouse to the first breadcrumb so it's not hovering over the tooltip
ActionChains(browser).move_to_element(browser.find_elements_by_css_selector(".breadcrumb li")[0]).perform()
WebDriverWait(browser, 10).until_not(EC.presence_of_element_located((By.CSS_SELECTOR, ".tooltip")))
# check that the name is hidden
assert name.text == "Submission #1"
assert not shown.is_displayed()
assert hidden.is_displayed()
@pytest.mark.nbextensions
def test_before_add_new_assignment(browser, port, gradebook):
utils._load_gradebook_page(browser, port, "")
assert len(browser.find_elements_by_css_selector("tbody tr")) == 1
@pytest.mark.nbextensions
def test_add_new_assignment(browser, port, gradebook):
utils._load_gradebook_page(browser, port, "")
n = len(browser.find_elements_by_css_selector("tbody tr"))
# click the "add new assignment" button
utils._click_link(browser, "Add new assignment...")
utils._wait_for_element(browser, "add-assignment-modal")
WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#add-assignment-modal .save")))
# set the name and dudedate
elem = browser.find_element_by_css_selector("#add-assignment-modal .name")
elem.click()
elem.send_keys("ps2+a")
elem = browser.find_element_by_css_selector("#add-assignment-modal .duedate")
elem.click()
elem.send_keys("2017-07-05T17:00")
elem = browser.find_element_by_css_selector("#add-assignment-modal .timezone")
elem.click()
elem.send_keys("UTC")
# click save and wait for the error message to appear
utils._click_element(browser, "#add-assignment-modal .save")
WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#create-error")))
# set a valid name
elem = browser.find_element_by_css_selector("#add-assignment-modal .name")
elem.clear()
elem.click()
# check with a name containing whitespace, as this should be stripped
# away and handled by the interface
elem.send_keys("ps2 ")
# click save and wait for the modal to close
utils._click_element(browser, "#add-assignment-modal .save")
modal_not_present = lambda browser: browser.execute_script("""return $("#add-assignment-modal").length === 0;""")
WebDriverWait(browser, 10).until(modal_not_present)
# wait until both rows are present
rows_present = lambda browser: len(browser.find_elements_by_css_selector("tbody tr")) == (n + 1)
WebDriverWait(browser, 10).until(rows_present)
# check that the new row is correct
row = browser.find_elements_by_css_selector("tbody tr")[1]
assert row.find_element_by_css_selector(".name").text == "ps2"
assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 17:00:00 {}".format(tz)
assert row.find_element_by_css_selector(".status").text == "draft"
assert utils._child_exists(row, ".edit a")
assert utils._child_exists(row, ".assign a")
assert not utils._child_exists(row, ".preview a")
assert not utils._child_exists(row, ".release a")
assert not utils._child_exists(row, ".collect a")
assert row.find_element_by_css_selector(".num-submissions").text == "0"
# reload the page and make sure everything is still correct
utils._load_gradebook_page(browser, port, "")
row = browser.find_elements_by_css_selector("tbody tr")[1]
assert row.find_element_by_css_selector(".name").text == "ps2"
assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 17:00:00 {}".format(tz)
assert row.find_element_by_css_selector(".status").text == "draft"
assert utils._child_exists(row, ".edit a")
assert utils._child_exists(row, ".assign a")
assert not utils._child_exists(row, ".preview a")
assert not utils._child_exists(row, ".release a")
assert not utils._child_exists(row, ".collect a")
assert row.find_element_by_css_selector(".num-submissions").text == "0"
@pytest.mark.nbextensions
def test_edit_assignment(browser, port, gradebook):
utils._load_gradebook_page(browser, port, "")
# click on the edit button
row = browser.find_elements_by_css_selector("tbody tr")[1]
row.find_element_by_css_selector(".edit a").click()
utils._wait_for_element(browser, "edit-assignment-modal")
WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#edit-assignment-modal .save")))
# modify the duedate
elem = browser.find_element_by_css_selector("#edit-assignment-modal .modal-duedate")
elem.clear()
elem.click()
elem.send_keys("2017-07-05T18:00")
# click save and wait for the modal to close
utils._click_element(browser, "#edit-assignment-modal .save")
modal_not_present = lambda browser: browser.execute_script("""return $("#edit-assignment-modal").length === 0;""")
WebDriverWait(browser, 10).until(modal_not_present)
# check that the modified row is correct
row = browser.find_elements_by_css_selector("tbody tr")[1]
assert row.find_element_by_css_selector(".name").text == "ps2"
assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 18:00:00 {}".format(tz)
assert row.find_element_by_css_selector(".status").text == "draft"
assert utils._child_exists(row, ".edit a")
assert utils._child_exists(row, ".assign a")
assert not utils._child_exists(row, ".preview a")
assert not utils._child_exists(row, ".release a")
assert not utils._child_exists(row, ".collect a")
assert row.find_element_by_css_selector(".num-submissions").text == "0"
# reload the page and make sure everything is still correct
utils._load_gradebook_page(browser, port, "")
row = browser.find_elements_by_css_selector("tbody tr")[1]
assert row.find_element_by_css_selector(".name").text == "ps2"
assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 18:00:00 {}".format(tz)
assert row.find_element_by_css_selector(".status").text == "draft"
assert utils._child_exists(row, ".edit a")
assert utils._child_exists(row, ".assign a")
assert not utils._child_exists(row, ".preview a")
assert not utils._child_exists(row, ".release a")
assert not utils._child_exists(row, ".collect a")
assert row.find_element_by_css_selector(".num-submissions").text == "0"
@pytest.mark.nbextensions
def test_generate_assignment_fail(browser, port, gradebook):
utils._load_gradebook_page(browser, port, "")
# click on the generate button -- should produce an error because there
# are no notebooks for ps2 yet
row = browser.find_elements_by_css_selector("tbody tr")[1]
row.find_element_by_css_selector(".assign a").click()
utils._wait_for_element(browser, "error-modal")
WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#error-modal .close")))
utils._click_element(browser, "#error-modal .close")
modal_not_present = lambda browser: browser.execute_script("""return $("#error-modal").length === 0;""")
WebDriverWait(browser, 10).until(modal_not_present)
@pytest.mark.nbextensions
def test_generate_assignment_success(browser, port, gradebook):
utils._load_gradebook_page(browser, port, "")
# add a notebook for ps2
source_path = join(os.path.dirname(__file__), "..", "..", "docs", "source", "user_guide", "source", "ps1", "problem1.ipynb")
shutil.copy(source_path, join("source", "ps2", "Problem 1.ipynb"))
# click on the generate button -- should now succeed
row = browser.find_elements_by_css_selector("tbody tr")[1]
row.find_element_by_css_selector(".assign a").click()
utils._wait_for_element(browser, "success-modal")
WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#success-modal .close")))
utils._click_element(browser, "#success-modal .close")
modal_not_present = lambda browser: browser.execute_script("""return $("#success-modal").length === 0;""")
WebDriverWait(browser, 10).until(modal_not_present)
# check that the modified row is correct
row = browser.find_elements_by_css_selector("tbody tr")[1]
assert row.find_element_by_css_selector(".name").text == "ps2"
assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 18:00:00 {}".format(tz)
assert row.find_element_by_css_selector(".status").text == "draft"
assert utils._child_exists(row, ".edit a")
assert utils._child_exists(row, ".assign a")
assert utils._child_exists(row, ".preview a")
if sys.platform == 'win32':
assert not utils._child_exists(row, ".release a")
else:
assert utils._child_exists(row, ".release a")
assert not utils._child_exists(row, ".collect a")
assert row.find_element_by_css_selector(".num-submissions").text == "0"
# reload the page and make sure everything is still correct
utils._load_gradebook_page(browser, port, "")
row = browser.find_elements_by_css_selector("tbody tr")[1]
assert row.find_element_by_css_selector(".name").text == "ps2"
assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 18:00:00 {}".format(tz)
assert row.find_element_by_css_selector(".status").text == "draft"
assert utils._child_exists(row, ".edit a")
assert utils._child_exists(row, ".assign a")
assert utils._child_exists(row, ".preview a")
if sys.platform == 'win32':
assert not utils._child_exists(row, ".release a")
else:
assert utils._child_exists(row, ".release a")
assert not utils._child_exists(row, ".collect a")
assert row.find_element_by_css_selector(".num-submissions").text == "0"
@notwindows
@pytest.mark.nbextensions
def test_release_assignment(browser, port, gradebook):
utils._load_gradebook_page(browser, port, "")
# click on the release button
row = browser.find_elements_by_css_selector("tbody tr")[1]
row.find_element_by_css_selector(".release a").click()
utils._wait_for_element(browser, "success-modal")
WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#success-modal .close")))
utils._click_element(browser, "#success-modal .close")
modal_not_present = lambda browser: browser.execute_script("""return $("#success-modal").length === 0;""")
WebDriverWait(browser, 10).until(modal_not_present)
# check that the modified row is correct
row = browser.find_elements_by_css_selector("tbody tr")[1]
assert row.find_element_by_css_selector(".name").text == "ps2"
assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 18:00:00 {}".format(tz)
assert row.find_element_by_css_selector(".status").text == "released"
assert utils._child_exists(row, ".edit a")
assert utils._child_exists(row, ".assign a")
assert utils._child_exists(row, ".preview a")
assert utils._child_exists(row, ".release a")
assert utils._child_exists(row, ".collect a")
assert row.find_element_by_css_selector(".num-submissions").text == "0"
# reload the page and make sure everything is still correct
utils._load_gradebook_page(browser, port, "")
row = browser.find_elements_by_css_selector("tbody tr")[1]
assert row.find_element_by_css_selector(".name").text == "ps2"
assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 18:00:00 {}".format(tz)
assert row.find_element_by_css_selector(".status").text == "released"
assert utils._child_exists(row, ".edit a")
assert utils._child_exists(row, ".assign a")
assert utils._child_exists(row, ".preview a")
assert utils._child_exists(row, ".release a")
assert utils._child_exists(row, ".collect a")
assert row.find_element_by_css_selector(".num-submissions").text == "0"
@notwindows
@pytest.mark.nbextensions
def test_collect_assignment(browser, port, gradebook):
run_nbgrader(["fetch_assignment", "ps2"])
run_nbgrader(["submit", "ps2"])
rmtree("ps2")
utils._load_gradebook_page(browser, port, "")
# click on the collect button
row = browser.find_elements_by_css_selector("tbody tr")[1]
row.find_element_by_css_selector(".collect a").click()
utils._wait_for_element(browser, "success-modal")
WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#success-modal .close")))
utils._click_element(browser, "#success-modal .close")
modal_not_present = lambda browser: browser.execute_script("""return $("#success-modal").length === 0;""")
WebDriverWait(browser, 10).until(modal_not_present)
# check that the modified row is correct
row = browser.find_elements_by_css_selector("tbody tr")[1]
assert row.find_element_by_css_selector(".name").text == "ps2"
assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 18:00:00 {}".format(tz)
assert row.find_element_by_css_selector(".status").text == "released"
assert utils._child_exists(row, ".edit a")
assert utils._child_exists(row, ".assign a")
assert utils._child_exists(row, ".preview a")
assert utils._child_exists(row, ".release a")
assert utils._child_exists(row, ".collect a")
assert row.find_element_by_css_selector(".num-submissions").text == "1"
# reload the page and make sure everything is still correct
utils._load_gradebook_page(browser, port, "")
row = browser.find_elements_by_css_selector("tbody tr")[1]
assert row.find_element_by_css_selector(".name").text == "ps2"
assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 18:00:00 {}".format(tz)
assert row.find_element_by_css_selector(".status").text == "released"
assert utils._child_exists(row, ".edit a")
assert utils._child_exists(row, ".assign a")
assert utils._child_exists(row, ".preview a")
assert utils._child_exists(row, ".release a")
assert utils._child_exists(row, ".collect a")
assert row.find_element_by_css_selector(".num-submissions").text == "1"
@notwindows
@pytest.mark.nbextensions
def test_unrelease_assignment(browser, port, gradebook):
utils._load_gradebook_page(browser, port, "")
# click on the unrelease button
row = browser.find_elements_by_css_selector("tbody tr")[1]
row.find_element_by_css_selector(".release a").click()
utils._wait_for_element(browser, "success-modal")
WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#success-modal .close")))
utils._click_element(browser, "#success-modal .close")
modal_not_present = lambda browser: browser.execute_script("""return $("#success-modal").length === 0;""")
| |
<reponame>ouyang-w-19/decogo
# MINLP written by GAMS Convert at 04/21/18 13:51:48
#
# Equation counts
# Total E G L N X C B
# 4601 0 1 4600 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 301 1 300 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 14101 13801 300 0
from pyomo.environ import *
model = m = ConcreteModel()
m.b1 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b3 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b4 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b5 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b6 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b7 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b8 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b9 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b10 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b11 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b12 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b13 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b14 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b15 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b16 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b17 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b18 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b19 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b20 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b21 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b22 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b23 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b24 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b25 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b26 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b27 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b28 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b29 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b30 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b31 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b32 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b33 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b34 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b35 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b36 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b37 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b38 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b39 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b40 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b41 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b42 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b43 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b44 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b45 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b46 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b47 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b48 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b49 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b50 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b51 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b52 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b53 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b54 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b55 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b56 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b57 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b58 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b59 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b60 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b61 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b62 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b63 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b64 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b65 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b66 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b67 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b68 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b69 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b70 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b71 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b72 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b73 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b74 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b75 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b76 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b77 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b78 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b79 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b80 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b81 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b82 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b83 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b84 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b85 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b86 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b87 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b88 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b89 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b90 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b91 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b92 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b93 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b94 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b95 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b96 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b97 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b98 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b99 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b100 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b101 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b102 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b103 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b104 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b105 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b106 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b107 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b108 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b109 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b110 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b111 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b112 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b113 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b114 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b115 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b116 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b117 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b118 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b119 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b120 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b121 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b122 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b123 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b124 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b125 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b126 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b127 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b128 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b129 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b130 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b131 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b132 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b133 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b134 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b135 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b136 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b137 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b138 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b139 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b140 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b141 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b142 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b143 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b144 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b145 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b146 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b147 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b148 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b149 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b150 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b151 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b152 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b153 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b154 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b155 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b156 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b157 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b158 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b159 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b160 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b161 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b162 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b163 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b164 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b165 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b166 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b167 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b168 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b169 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b170 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b171 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b172 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b173 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b174 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b175 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b176 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b177 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b178 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b179 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b180 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b181 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b182 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b183 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b184 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b185 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b186 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b187 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b188 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b189 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b190 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b191 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b192 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b193 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b194 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b195 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b196 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b197 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b198 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b199 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b200 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b201 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b202 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b203 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b204 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b205 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b206 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b207 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b208 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b209 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b210 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b211 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b212 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b213 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b214 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b215 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b216 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b217 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b218 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b219 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b220 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b221 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b222 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b223 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b224 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b225 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b226 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b227 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b228 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b229 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b230 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b231 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b232 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b233 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b234 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b235 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b236 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b237 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b238 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b239 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b240 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b241 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b242 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b243 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b244 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b245 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b246 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b247 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b248 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b249 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b250 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b251 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b252 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b253 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b254 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b255 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b256 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b257 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b258 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b259 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b260 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b261 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b262 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b263 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b264 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b265 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b266 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b267 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b268 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b269 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b270 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b271 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b272 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b273 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b274 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b275 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b276 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b277 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b278 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b279 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b280 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b281 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b282 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b283 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b284 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b285 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b286 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b287 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b288 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b289 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b290 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b291 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b292 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b293 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b294 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b295 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b296 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b297 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b298 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b299 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b300 = Var(within=Binary,bounds=(0,1),initialize=0)
m.x301 = Var(within=Reals,bounds=(None,None),initialize=0)
m.obj = Objective(expr=m.x301, sense=minimize)
m.c1 = Constraint(expr= m.b1 - m.b2 + m.b25 <= 1)
m.c2 = Constraint(expr= m.b1 - m.b3 + m.b26 <= 1)
m.c3 = Constraint(expr= m.b1 - m.b4 + m.b27 <= 1)
m.c4 = Constraint(expr= m.b1 - m.b5 + m.b28 <= 1)
m.c5 = Constraint(expr= m.b1 - m.b6 + m.b29 <= 1)
m.c6 = Constraint(expr= m.b1 - m.b7 + m.b30 <= 1)
m.c7 = Constraint(expr= m.b1 - m.b8 + m.b31 <= 1)
m.c8 = Constraint(expr= m.b1 - m.b9 + m.b32 <= 1)
m.c9 = Constraint(expr= m.b1 - m.b10 + m.b33 <= 1)
m.c10 = Constraint(expr= m.b1 - m.b11 + m.b34 <= 1)
m.c11 = Constraint(expr= m.b1 - m.b12 + m.b35 <= 1)
m.c12 = Constraint(expr= m.b1 - m.b13 + m.b36 <= 1)
m.c13 = Constraint(expr= m.b1 - m.b14 + m.b37 <= 1)
m.c14 = Constraint(expr= m.b1 - m.b15 + m.b38 <= 1)
m.c15 = Constraint(expr= m.b1 - m.b16 + m.b39 <= 1)
m.c16 = Constraint(expr= m.b1 - m.b17 + m.b40 <= 1)
m.c17 = Constraint(expr= m.b1 - m.b18 + m.b41 <= 1)
m.c18 = Constraint(expr= m.b1 - m.b19 + m.b42 <= 1)
m.c19 = Constraint(expr= m.b1 - m.b20 + m.b43 <= 1)
m.c20 = Constraint(expr= m.b1 - m.b21 + m.b44 <= 1)
m.c21 = Constraint(expr= m.b1 - m.b22 + m.b45 <= 1)
m.c22 = Constraint(expr= m.b1 - m.b23 + m.b46 <= 1)
m.c23 = Constraint(expr= m.b1 - m.b24 + m.b47 <= 1)
m.c24 = Constraint(expr= m.b2 - m.b3 + m.b48 <= 1)
m.c25 = Constraint(expr= m.b2 - m.b4 + m.b49 <= 1)
m.c26 = Constraint(expr= m.b2 - m.b5 + m.b50 <= 1)
m.c27 = Constraint(expr= m.b2 - m.b6 + m.b51 <= 1)
m.c28 = Constraint(expr= m.b2 - m.b7 + m.b52 <= 1)
m.c29 = Constraint(expr= m.b2 - m.b8 + m.b53 <= 1)
m.c30 = Constraint(expr= m.b2 - m.b9 + m.b54 <= 1)
m.c31 = Constraint(expr= m.b2 - m.b10 + m.b55 <= 1)
m.c32 = Constraint(expr= m.b2 - m.b11 + m.b56 <= 1)
m.c33 = Constraint(expr= m.b2 - m.b12 + m.b57 <= 1)
m.c34 = Constraint(expr= m.b2 - m.b13 + m.b58 <= 1)
m.c35 = Constraint(expr= m.b2 - m.b14 + m.b59 <= 1)
m.c36 = Constraint(expr= m.b2 - m.b15 + m.b60 <= 1)
m.c37 = Constraint(expr= m.b2 - m.b16 + m.b61 <= 1)
m.c38 = Constraint(expr= m.b2 - m.b17 + m.b62 <= 1)
m.c39 = Constraint(expr= m.b2 - m.b18 + m.b63 <= 1)
m.c40 = Constraint(expr= m.b2 - m.b19 + m.b64 <= 1)
m.c41 = Constraint(expr= m.b2 - m.b20 + m.b65 <= 1)
m.c42 = Constraint(expr= m.b2 - m.b21 + m.b66 <= 1)
m.c43 = Constraint(expr= m.b2 - m.b22 + m.b67 <= 1)
m.c44 = Constraint(expr= m.b2 - m.b23 + m.b68 <= 1)
m.c45 = Constraint(expr= m.b2 - m.b24 + m.b69 <= 1)
m.c46 = Constraint(expr= m.b3 - m.b4 + m.b70 <= 1)
m.c47 = Constraint(expr= m.b3 - m.b5 + m.b71 <= 1)
m.c48 = Constraint(expr= m.b3 - m.b6 + m.b72 <= 1)
m.c49 = Constraint(expr= m.b3 - m.b7 + m.b73 <= 1)
m.c50 = Constraint(expr= m.b3 - m.b8 + m.b74 <= 1)
m.c51 = Constraint(expr= m.b3 - m.b9 + m.b75 <= 1)
m.c52 = Constraint(expr= m.b3 - m.b10 + m.b76 <= 1)
m.c53 = Constraint(expr= m.b3 - m.b11 + m.b77 <= 1)
m.c54 = Constraint(expr= m.b3 - m.b12 + m.b78 <= 1)
m.c55 = Constraint(expr= m.b3 - m.b13 + m.b79 <= 1)
m.c56 = Constraint(expr= m.b3 - m.b14 + m.b80 <= 1)
m.c57 = Constraint(expr= m.b3 - m.b15 + m.b81 <= 1)
m.c58 = Constraint(expr= m.b3 - m.b16 + m.b82 <= 1)
m.c59 = Constraint(expr= m.b3 - m.b17 + m.b83 <= 1)
m.c60 = Constraint(expr= m.b3 - m.b18 + m.b84 <= 1)
m.c61 = Constraint(expr= m.b3 - m.b19 + m.b85 <= 1)
m.c62 = Constraint(expr= m.b3 - m.b20 + m.b86 <= 1)
m.c63 = Constraint(expr= m.b3 - m.b21 + m.b87 <= 1)
m.c64 = Constraint(expr= m.b3 - m.b22 + m.b88 <= 1)
m.c65 = Constraint(expr= m.b3 - m.b23 + m.b89 <= 1)
m.c66 = Constraint(expr= m.b3 - m.b24 + m.b90 <= 1)
m.c67 | |
or h5 object
"""
if type(h5_obj_or_filepath) is str:
h5_obj = _h5py.File(h5_obj_or_filepath, mode)
else:
h5_obj = h5_obj_or_filepath
for key, value in sorted(self.items()):
if isinstance(value, Container):
if key not in h5_obj.keys():
h5_group = h5_obj.create_group(key)
else:
h5_group = h5_obj[key]
value.to_disk_as_hdf5(h5_group, starting_index, mode, max_batch_size)
else:
value_as_np = self._ivy.to_numpy(value)
value_shape = value_as_np.shape
this_batch_size = value_shape[0]
if not max_batch_size:
max_batch_size = starting_index + this_batch_size
if key not in h5_obj.keys():
dataset_shape = [max_batch_size] + list(value_shape[1:])
maxshape = ([None for _ in dataset_shape])
h5_obj.create_dataset(key, dataset_shape, dtype=value_as_np.dtype, maxshape=maxshape)
space_left = max_batch_size - starting_index
amount_to_write = min(this_batch_size, space_left)
h5_obj[key][starting_index:starting_index + amount_to_write] = value_as_np[0:amount_to_write]
def to_disk_as_pickled(self, pickle_filepath):
"""
Save container object to disk, as an pickled file, at the specified filepath.
:param pickle_filepath: Filepath for where to save the container to disk.
:type pickle_filepath: str
"""
if _ivy.wrapped_mode():
_pickle.dump(self.to_native().to_dict(), open(pickle_filepath, 'wb'))
else:
_pickle.dump(self.to_dict(), open(pickle_filepath, 'wb'))
def to_jsonable(self, return_dict=None):
"""
Return container with non-jsonable elements converted to string representations, which are jsonable.
"""
if return_dict is None:
return_dict = self.copy()
for k, v in return_dict.items():
if not _is_jsonable(v):
if isinstance(v, dict):
return_dict[k] = self.to_jsonable(v)
else:
return_dict[k] = str(v)
return return_dict
def to_disk_as_json(self, json_filepath):
"""
Save container object to disk, as an json file, at the specified filepath.
:param json_filepath: Filepath for where to save the container to disk.
:type json_filepath: str
"""
with open(json_filepath, 'w+') as json_data_file:
_json.dump(self.to_jsonable().to_dict(), json_data_file, indent=4)
def to_list(self):
"""
Return nested list representation of container object.
:return: Container as nested list.
"""
return_list = list()
for key, value in sorted(self.items()):
if isinstance(value, Container):
return_list.append(value.to_list())
elif value is not None and key != '_f':
return_list.append(value)
return return_list
def to_raw(self):
"""
Return nested raw representation of container object. This includes restoring lists and tuples passed in the
constructor to their original form.
:return: Container data in it's raw form.
"""
return_item = dict()
for i, (key, value) in enumerate(sorted(self.items())):
if isinstance(value, Container):
return_item[key] = value.to_raw()
elif key[0:3] == 'it_' and tuple(self._types_to_iteratively_nest):
return_item = list([v.to_raw() if isinstance(v, Container) else v for v in self.values()])
break
else:
return_item[key] = value
return return_item
def to_dict(self):
"""
Return nested pure dict representation of container object.
:return: Container as nested dict.
"""
return_dict = dict()
for key, value in sorted(self.items()):
if isinstance(value, Container):
return_dict[key] = value.to_dict()
else:
return_dict[key] = value
return return_dict
def to_iterator(self, key_chain='', leaf_keys_only=False):
"""
Return iterator for traversing through the nested elements of container object.
:return: Iterator for the container elements.
"""
for key, value in sorted(self.items()):
if leaf_keys_only:
kc = key
else:
kc = key_chain + '/' + key if key_chain != '' else key
if isinstance(value, Container):
# noinspection PyCompatibility
yield from value.to_iterator(kc, leaf_keys_only)
else:
yield kc, value
def to_iterator_values(self):
"""
Return iterator for traversing through the nested values of container object.
:return: Iterator for the container values.
"""
for key, value in sorted(self.items()):
if isinstance(value, Container):
# noinspection PyCompatibility
yield from value.to_iterator_values()
else:
yield value
def to_iterator_keys(self, key_chain='', leaf_keys_only=False):
"""
Return iterator for traversing through the nested keys of container object.
:return: Iterator for the container elements.
"""
for key, value in sorted(self.items()):
if leaf_keys_only:
kc = key
else:
kc = key_chain + '/' + key if key_chain != '' else key
if isinstance(value, Container):
# noinspection PyCompatibility
yield from value.to_iterator_keys(kc, leaf_keys_only)
else:
yield kc
def to_flat_list(self):
"""
Return flat list representation of container object.
:return: Container as flat list.
"""
return list([item for key, item in self.to_iterator()])
def from_flat_list(self, flat_list):
"""
Return new container object with the same hierarchy, but with values replaced from flat list.
:param flat_list: flat list of values to populate container with.
:type flat_list: sequence of arrays
:return: Container.
"""
new_dict = dict()
for key, value in sorted(self.items()):
if isinstance(value, Container):
new_value = value.from_flat_list(flat_list)
else:
new_value = flat_list.pop(0)
new_dict[key] = new_value
return Container(new_dict, **self._config)
def has_key(self, query_key):
"""
Determine whether container object has specified key somewhere in the nested structure
:return: Boolean
"""
has_key = False
def map_fn(x, kc):
nonlocal has_key
if query_key in kc:
has_key = True
return x
self.map(map_fn)
return has_key
def has_key_chain(self, key_chain):
"""
Determine whether container object has specified key-chain
:return: Boolean
"""
keys = re.split('[/.]', key_chain)
ret = self
for key in keys:
try:
ret = ret[key]
except KeyError:
return False
return True
def has_nans(self, include_infs=True, leafwise=False):
"""
Determine whether arrays in the container contain any nans, as well as infs or -infs if specified.
:param include_infs: Whether to include infs and -infs in the check. Default is True.
:type include_infs: bool, optional
:param leafwise: Whether to apply the check leaf-wise, and return a container of booleans. Default is False,
in which case the check is applied across the entire container, returning a single boolean.
:type leafwise: bool, optional
:return: Whether the container has any nans, applied either leafwise or across the entire container.
"""
leafwise_res = self.map(lambda x, kc: _ivy.has_nans(x, include_infs))
if leafwise:
return leafwise_res
return max([v for k, v in leafwise_res.to_iterator()])
def at_keys(self, queries, ignore_none=True, containing=False, ignore_key_errors=False):
"""
Query container object at specified keys, either as list or nested dict.
:param queries: The keys to query.
:type queries: sequence of strs or single str
:param ignore_none: Whether to ignore None input. Default is True.
:type ignore_none: bool, optional
:param containing: Whether to include keys which only contain the query substrings. Default is False.
:type containing: bool, optional
:param ignore_key_errors: Whether to ignore Key-errors when trying to access the dict. Default is False.
:type ignore_key_errors: bool, optional
:return: sub-container containing only key-chains containing the specified keys.
"""
if queries is None and ignore_none:
return self
key_chains_to_keep = list()
if isinstance(queries, str):
queries = [queries]
def map_fn(x, kc):
nonlocal key_chains_to_keep
kc_split = re.split('[/.]', kc)
for query_key in queries:
if query_key in kc_split or (containing and min([query_key in k for k in kc_split])):
key_chains_to_keep.append(kc)
return x
self.map(map_fn)
return self.at_key_chains(key_chains_to_keep, ignore_key_errors=ignore_key_errors)
def at_key_chain(self, key_chain, ignore_key_errors=False):
"""
Query container object at a specified key-chain
:return: sub-container or value at specified key chain
"""
keys = re.split('[/.]', key_chain)
ret = self
for key in keys:
try:
ret = ret[key]
except KeyError as e:
if ignore_key_errors:
return
raise e
return ret
def at_key_chains(self, key_chains, ignore_none=True, ignore_key_errors=False):
"""
Query container object at specified key-chains, either as list or nested dict.
:return: sub-container containing only the specified key chains
"""
if key_chains is None and ignore_none:
return self
if isinstance(key_chains, (list, tuple)):
return self._at_key_chains_input_as_seq(key_chains, ignore_key_errors=ignore_key_errors)
elif isinstance(key_chains, dict):
return self._at_key_chains_input_as_dict(key_chains, ignore_key_errors=ignore_key_errors)
elif isinstance(key_chains, str):
return self._at_key_chains_input_as_seq([key_chains], ignore_key_errors=ignore_key_errors)
else:
raise Exception('Invalid type for input key_chains, must either be a list, tuple, dict, or ivy.Container,'
'but found type {}'.format(type(key_chains)))
def set_at_keys(self, target_dict):
"""
Set values of container object at specified keys
:return: new container with updated value at each key
"""
return_dict = dict()
for key, val in self.items():
if key in target_dict:
return_dict[key] = target_dict[key]
elif isinstance(val, Container):
return_dict[key] = val.set_at_keys(target_dict)
else:
return_dict[key] = val
return Container(return_dict, **self._config)
def set_at_key_chain(self, key_chain, val, inplace=False):
"""
Set value of container object at a specified key-chain
:return: new container with updated value at key chain
"""
keys = re.split('[/.]', key_chain)
if inplace:
cont = self
else:
cont = self.copy()
sub_cont = cont
for key in keys[:-1]:
if key not in sub_cont:
sub_cont[key] = Container(**self._config)
sub_cont = sub_cont[key]
sub_cont[keys[-1]] = val
return cont
def overwrite_at_key_chain(self, key_chain, val, inplace=False):
"""
Overwrite value of container object at a specified key-chain
:return: new container with updated value at key chain, provided it existed before.
"""
keys = re.split('[/.]', key_chain)
if inplace:
cont = self
else:
cont = self.copy()
sub_cont = cont
for key in keys[:-1]:
if key not in sub_cont:
raise Exception('key chain must already exist in container in order to call overwrite_at_key_chain')
sub_cont = sub_cont[key]
if keys[-1] not in sub_cont:
raise Exception('key chain must already exist in container in order to call overwrite_at_key_chain')
sub_cont[keys[-1]] = val
return cont
def set_at_key_chains(self, target_dict, return_dict=None, inplace=False):
"""
Set values of container object at specified key-chains
:return: | |
0 for seven days and above after."""
frozen = x == np.array([0, 0, 0, 0, 0, 0, 0, 1], bool)
return frozen.all(axis=axis)
tcond = (tas > frz).rolling(time=8).reduce(func)
pcond = pr > t
out = (tcond * pcond * 1).resample(time=freq).sum(dim="time")
return to_agg_units(out, tas, "count")
@declare_units(
pr="[precipitation]",
tas="[temperature]",
pr_thresh="[precipitation]",
tas_thresh="[temperature]",
)
def high_precip_low_temp(
pr: xarray.DataArray,
tas: xarray.DataArray,
pr_thresh: str = "0.4 mm/d",
tas_thresh: str = "-0.2 degC",
freq: str = "YS",
) -> xarray.DataArray: # noqa: D401
"""Number of days with precipitation above threshold and temperature below threshold.
Number of days where precipitation is greater or equal to some threshold, and temperatures are colder than some
threshold. This can be used for example to identify days with the potential for freezing rain or icing conditions.
Parameters
----------
pr : xarray.DataArray
Mean daily precipitation flux.
tas : xarray.DataArray
Daily mean, minimum or maximum temperature.
pr_thresh : str
Precipitation threshold to exceed.
tas_thresh : str
Temperature threshold not to exceed.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Count of days with high precipitation and low temperatures.
Example
-------
To compute the number of days with intense rainfall while minimum temperatures dip below -0.2C:
>>> pr = xr.open_dataset(path_to_pr_file).pr
>>> tasmin = xr.open_dataset(path_to_tasmin_file).tasmin
>>> high_precip_low_temp(pr, tas=tasmin, pr_thresh="10 mm/d", tas_thresh="-0.2 degC")
"""
pr_thresh = convert_units_to(pr_thresh, pr)
tas_thresh = convert_units_to(tas_thresh, tas)
cond = (pr >= pr_thresh) * (tas < tas_thresh) * 1
out = cond.resample(time=freq).sum(dim="time")
return to_agg_units(out, pr, "count")
@declare_units(pr="[precipitation]", per="[precipitation]", thresh="[precipitation]")
def days_over_precip_thresh(
pr: xarray.DataArray,
per: xarray.DataArray,
thresh: str = "1 mm/day",
freq: str = "YS",
) -> xarray.DataArray: # noqa: D401
r"""Number of wet days with daily precipitation over a given percentile.
Number of days over period where the precipitation is above a threshold defining wet days and above a given
percentile for that day.
Parameters
----------
pr : xarray.DataArray
Mean daily precipitation flux.
per : xarray.DataArray
Daily percentile of wet day precipitation flux.
thresh : str
Precipitation value over which a day is considered wet.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Count of days with daily precipitation above the given percentile [days].
Examples
--------
>>> from xclim.indices import days_over_precip_thresh
>>> pr = xr.open_dataset(path_to_pr_file).pr
>>> p75 = pr.quantile(.75, dim="time", keep_attrs=True)
>>> r75p = days_over_precip_thresh(pr, p75)
"""
per = convert_units_to(per, pr)
thresh = convert_units_to(thresh, pr)
tp = np.maximum(per, thresh)
if "dayofyear" in per.coords:
# Create time series out of doy values.
tp = resample_doy(tp, pr)
# Compute the days where precip is both over the wet day threshold and the percentile threshold.
out = threshold_count(pr, ">", tp, freq)
return to_agg_units(out, pr, "count")
@declare_units(pr="[precipitation]", per="[precipitation]", thresh="[precipitation]")
def fraction_over_precip_thresh(
pr: xarray.DataArray,
per: xarray.DataArray,
thresh: str = "1 mm/day",
freq: str = "YS",
) -> xarray.DataArray:
r"""Fraction of precipitation due to wet days with daily precipitation over a given percentile.
Percentage of the total precipitation over period occurring in days where the precipitation is above a threshold
defining wet days and above a given percentile for that day.
Parameters
----------
pr : xarray.DataArray
Mean daily precipitation flux.
per : xarray.DataArray
Daily percentile of wet day precipitation flux.
thresh : str
Precipitation value over which a day is considered wet.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [dimensionless]
Fraction of precipitation over threshold during wet days days.
"""
per = convert_units_to(per, pr)
thresh = convert_units_to(thresh, pr)
tp = np.maximum(per, thresh)
if "dayofyear" in per.coords:
# Create time series out of doy values.
tp = resample_doy(tp, pr)
# Total precip during wet days over period
total = pr.where(pr > thresh).resample(time=freq).sum(dim="time")
# Compute the days where precip is both over the wet day threshold and the percentile threshold.
over = pr.where(pr > tp).resample(time=freq).sum(dim="time")
out = over / total
out.attrs["units"] = ""
return out
@declare_units(tas="[temperature]", t90="[temperature]")
def tg90p(
tas: xarray.DataArray, t90: xarray.DataArray, freq: str = "YS"
) -> xarray.DataArray: # noqa: D401
r"""Number of days with daily mean temperature over the 90th percentile.
Number of days with daily mean temperature over the 90th percentile.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature.
t90 : xarray.DataArray
90th percentile of daily mean temperature.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Count of days with daily mean temperature below the 10th percentile [days].
Notes
-----
The 90th percentile should be computed for a 5 day window centered on each calendar day for a reference period.
Examples
--------
>>> from xclim.core.calendar import percentile_doy
>>> from xclim.indices import tg90p
>>> tas = xr.open_dataset(path_to_tas_file).tas
>>> t90 = percentile_doy(tas, per=90).sel(percentiles=90)
>>> hot_days = tg90p(tas, t90)
"""
t90 = convert_units_to(t90, tas)
# Create time series out of doy values.
thresh = resample_doy(t90, tas)
# Identify the days over the 90th percentile
out = threshold_count(tas, ">", thresh, freq)
return to_agg_units(out, tas, "count")
@declare_units(tas="[temperature]", t10="[temperature]")
def tg10p(
tas: xarray.DataArray, t10: xarray.DataArray, freq: str = "YS"
) -> xarray.DataArray: # noqa: D401
r"""Number of days with daily mean temperature below the 10th percentile.
Number of days with daily mean temperature below the 10th percentile.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature.
t10 : xarray.DataArray
10th percentile of daily mean temperature.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Count of days with daily mean temperature below the 10th percentile [days].
Notes
-----
The 10th percentile should be computed for a 5 day window centered on each calendar day for a reference period.
Examples
--------
>>> from xclim.core.calendar import percentile_doy
>>> from xclim.indices import tg10p
>>> tas = xr.open_dataset(path_to_tas_file).tas
>>> t10 = percentile_doy(tas, per=10).sel(percentiles=10)
>>> cold_days = tg10p(tas, t10)
"""
t10 = convert_units_to(t10, tas)
# Create time series out of doy values.
thresh = resample_doy(t10, tas)
# Identify the days below the 10th percentile
out = threshold_count(tas, "<", thresh, freq)
return to_agg_units(out, tas, "count")
@declare_units(tasmin="[temperature]", t90="[temperature]")
def tn90p(
tasmin: xarray.DataArray, t90: xarray.DataArray, freq: str = "YS"
) -> xarray.DataArray: # noqa: D401
r"""Number of days with daily minimum temperature over the 90th percentile.
Number of days with daily minimum temperature over the 90th percentile.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature.
t90 : xarray.DataArray
90th percentile of daily minimum temperature.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Count of days with daily minimum temperature below the 10th percentile [days].
Notes
-----
The 90th percentile should be computed for a 5 day window centered on each calendar day for a reference period.
Examples
--------
>>> from xclim.core.calendar import percentile_doy
>>> from xclim.indices import tn90p
>>> tas = xr.open_dataset(path_to_tas_file).tas
>>> t90 = percentile_doy(tas, per=90).sel(percentiles=90)
>>> hot_days = tn90p(tas, t90)
"""
t90 = convert_units_to(t90, tasmin)
# Create time series out of doy values.
thresh = resample_doy(t90, tasmin)
# Identify the days with min temp above 90th percentile.
out = threshold_count(tasmin, ">", thresh, freq)
return to_agg_units(out, tasmin, "count")
@declare_units(tasmin="[temperature]", t10="[temperature]")
def tn10p(
tasmin: xarray.DataArray, t10: xarray.DataArray, freq: str = "YS"
) -> xarray.DataArray: # noqa: D401
r"""Number of days with daily minimum temperature below the 10th percentile.
Number of days with daily minimum temperature below the 10th percentile.
Parameters
----------
tasmin : xarray.DataArray
Mean daily temperature.
t10 : xarray.DataArray
10th percentile of daily minimum temperature.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Count of days with daily minimum temperature below the 10th percentile [days].
Notes
-----
The 10th percentile should be computed for a 5 day window centered on each calendar day for a reference period.
Examples
--------
>>> from xclim.core.calendar import percentile_doy
>>> from xclim.indices import tn10p
>>> tas = xr.open_dataset(path_to_tas_file).tas
>>> t10 = percentile_doy(tas, per=10).sel(percentiles=10)
>>> cold_days = tn10p(tas, t10)
"""
t10 = convert_units_to(t10, tasmin)
# Create time series out of doy values.
thresh = resample_doy(t10, tasmin)
# Identify the days below the 10th percentile
out = threshold_count(tasmin, "<", thresh, freq)
return to_agg_units(out, tasmin, "count")
@declare_units(tasmax="[temperature]", t90="[temperature]")
def tx90p(
tasmax: xarray.DataArray, t90: xarray.DataArray, freq: str = "YS"
) -> xarray.DataArray: # noqa: D401
r"""Number of days with daily maximum temperature over the 90th percentile.
Number of days with daily maximum temperature over the 90th percentile.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature.
t90 : xarray.DataArray
90th percentile | |
and no part of the geometry lies in the
exterior of the shape.
This means that a shape does not contain its own boundary, nor points
nor lines lying entirely on the boundary.
A shape does contain itself.
"""
raise NotImplementedError()
def covers(self, other):
"""Return whether this shape covers some geometry.
A shape covers another geometry if no part of that geometry lies in
the exterior of the shape. That is, the entire geometry is within the
interior and/or the boundary of the shape.
"""
raise NotImplementedError()
class BoundingBox(Shape):
__slots__ = ['min_x', 'min_y', 'max_x', 'max_y']
def __init__(self, min_x, min_y, max_x, max_y):
self.min_x = min_x
self.min_y = min_y
self.max_x = max_x
self.max_y = max_y
@property
def bbox(self):
return self
@property
def points(self):
"""Return the corners of this box's boundary as an iterable of Points.
The Points are returned starting with the lower-left corner and
proceeding clockwise.
"""
return [
Point(self.min_x, self.min_y),
Point(self.min_x, self.max_y),
Point(self.max_x, self.max_y),
Point(self.max_x, self.min_y),
]
@property
def boundary(self):
"""Return this box's boundary, as an iterable of Lines.
The Lines are returned starting from the lower-left corner and
proceeding clockwise.
Note that a BoundingBox boundary does not necessarily consitute a valid
Polygon. A Polygon must enclose some amount of interior space, whereas
a BoundingBox need not.
"""
a, b, c, d = self.points
return [
Line(a, b),
Line(b, c),
Line(c, d),
Line(d, a),
]
def disjoint(self, other):
"""Return whether the two geometries are spatially disjoint.
This is False if the geometries have any kind of contact, be it by
overlapping, crossing or touching.
"""
if isinstance(other, Point):
return (
float_lt(other.x, self.min_x) or
float_gt(other.x, self.max_x) or
float_lt(other.y, self.min_y) or
float_gt(other.y, self.max_y))
if isinstance(other, BoundingBox):
return (
float_lt(other.max_x, self.min_x) or
float_gt(other.min_x, self.max_x) or
float_lt(other.max_y, self.min_y) or
float_gt(other.min_y, self.max_y))
return not self.intersects(other)
def intersects(self, other):
"""Return whether this box intersects some other geometry."""
if isinstance(other, (Point, BoundingBox)):
return not self.disjoint(other)
if isinstance(other, (Shape, Collection)):
if self.disjoint(other.bbox):
return False
if self.contains(other):
return True
if isinstance(other, Line):
for line in self.boundary:
if line.intersects(other):
return True
return False
if isinstance(other, Polygon):
if self.covers(other) or other.covers(self):
return True
for box_line in self.boundary:
for poly_line in other.lines:
if box_line.intersects(poly_line):
return True
return False
if isinstance(other, Collection):
return any([self.intersects(x) for x in other])
def contains(self, other):
if isinstance(other, Point):
# Points on the boundary are not contained
return (
float_gt(other.x, self.min_x) and
float_lt(other.x, self.max_x) and
float_gt(other.y, self.min_y) and
float_lt(other.y, self.max_y))
if isinstance(other, Line):
# The line is contained if neither of its points is outside the
# box, and also it doesn't lie on the boundary.
if self.disjoint(other.a) or self.disjoint(other.b):
return False
return not (
(other.is_horizontal and (
float_close(other.a.y, self.min_y) or
float_close(other.a.y, self.max_y))) or
(other.is_vertical and (
float_close(other.a.x, self.min_x) or
float_close(other.a.x, self.max_x))))
if isinstance(other, BoundingBox):
return not (
float_lt(other.min_x, self.min_x) or
float_gt(other.max_x, self.max_x) or
float_lt(other.min_y, self.min_y) or
float_gt(other.max_y, self.max_y))
if isinstance(other, Polygon):
for p in other.points:
if self.disjoint(p):
return False
return True
if isinstance(other, Collection):
return (
self.covers(other) and
any([self.contains(x) for x in other]))
def covers(self, other):
if isinstance(other, Point):
return not self.disjoint(other)
if isinstance(other, Line):
# The line is covered if neither of its points is outside the box.
return not (self.disjoint(other.a) or self.disjoint(other.b))
if isinstance(other, BoundingBox):
return not (
float_lt(other.min_x, self.min_x) or
float_gt(other.max_x, self.max_x) or
float_lt(other.min_y, self.min_y) or
float_gt(other.max_y, self.max_y))
if isinstance(other, Polygon):
for p in other.points:
if self.disjoint(p):
return False
return True
if isinstance(other, Collection):
return all([self.covers(x) for x in other])
def intersection_line(self, other):
"""Return the intersection of this box with a Line.
The result will be either None, a Point or a Line.
"""
if self.disjoint(other):
return None
if self.covers(other):
return other
if other.is_vertical:
x = other.a.x
if other.a.y < other.b.y:
a = (x, max(other.a.y, self.min_y))
b = (x, min(other.b.y, self.max_y))
else:
b = (x, max(other.b.y, self.min_y))
a = (x, min(other.a.y, self.max_y))
return Line(a, b)
a, b = other.a, other.b
for boundary in self.boundary:
sect = boundary.intersection(Line(a, b))
if isinstance(sect, Line):
return sect
if isinstance(sect, Point):
if boundary.in_bound(a) is False:
a = sect
if boundary.in_bound(b) is False:
b = sect
if a.nearly_equal(b):
return a
return Line(a, b)
def intersection_bbox(self, other):
"""Return the intersection of this box with another box.
The result can be None, a Point, a Line or a BoundingBox.
"""
if self == other or self.nearly_equal(other):
return self
if self.disjoint(other):
return None
return BoundingBox(
max(self.min_x, other.min_x),
max(self.min_y, other.min_y),
min(self.max_x, other.max_x),
min(self.max_y, other.max_y))
def intersection(self, other):
if isinstance(other, Point):
return other if self.intersects(other) else None
if isinstance(other, Line):
return self.intersection_line(other)
if isinstance(other, BoundingBox):
return self.intersection_bbox(other)
if isinstance(other, Collection):
return self.intersection_collection(other)
return other.intersection(self)
def as_tuple(self):
return (self.min_x, self.min_y, self.max_x, self.max_y)
def __eq__(self, other):
if not isinstance(other, BoundingBox):
return False
return self.as_tuple() == other.as_tuple()
def __hash__(self):
return hash(tuple('BoundingBox') + self.as_tuple())
def nearly_equal(self, other):
if not isinstance(other, BoundingBox):
return False
z = zip(self.as_tuple(), other.as_tuple())
return all([float_close(a, b) for a, b in z])
def __str__(self):
return f"{self.min_x},{self.min_y},{self.max_x},{self.max_y}"
def __repr__(self):
return f"BoundingBox({self})"
class Polygon(Shape):
"""A shape enclosed by straight line segments.
The polygon must be given as a sequence of points forming a clockwise
exterior linear ring, with the interior of the polygon on the right-hand
side from a perspective travelling along the line.
When a polygon is initialised, we remove any consecutive duplicate or
redundant points, and close the polygon if it is not already closed (i.e.
ensure that the last point in the polygon is the same as the first point).
"""
__slots__ = ['points']
def __init__(self, value):
if isinstance(value, Polygon):
self.points = value.points
return
points = [Point(x) for x in value]
# Filter out consecutive identical points.
last = None
distinct = []
for p in points:
if last is None or p != last:
distinct.append(p)
last = p
points = distinct
# Filter out redundant points.
length = len(points)
boundary = []
for i, p in enumerate(points):
if i > 0 and i < length - 1:
# If the boundary doesn't change angle after this point,
# then it makes no difference to the shape whether it is
# included or not. So don't.
a = Line(points[i-1], p)
b = Line(p, points[i+1])
if a.angle == b.angle:
continue
boundary.append(p)
# If the polygon isn't closed, close it now.
if boundary and boundary[0] != boundary[-1]:
boundary.append(boundary[0])
if len(boundary) < 4:
raise ValueError("Not enough valid points for a closed polygon.")
self.points = tuple(boundary)
# Disallow backtracking along the same line
lines = self.lines
length = len(lines)
for i in range(length - 1):
if (lines[i].angle == (-lines[i+1]).angle):
raise ValueError(
f"Line {lines[i+1]} backtracks "
"along the previous line.")
# Disallow self-intersection
for i in range(length):
for j in range(length):
if i in {j, (j + 1) % length, (j - 1) % length}:
continue
if lines[i].intersects(lines[j]):
raise ValueError(
f"Line {lines[i]} intersects with {lines[j]}.")
def __len__(self):
return len(self.points)
def __getitem__(self, key):
if isinstance(key, (int, slice)):
return self.points[key]
raise TypeError()
def __contains__(self, point):
"""Return whether 'point' is nearly equal to any of the polygon's vertices.
Despite the name of the Python magic method, this doesn't test
"containment" in the geometric sense, but rather it tests *membership*.
To test whether a geometry is spatially contained in the polygon, use
contains().
"""
p = Point(point)
return any([p.nearly_equal(x) for x in self.points])
def __str__(self):
return " β ".join(map(str, self.points))
def __eq__(self, other):
"""Return whether this polygon is equal to another.
The polygons are considered equal if all their vertices are exactly
equal, and appear in the same order. The two polygons do not need to
begin at the same point.
"""
if not isinstance(other, Polygon):
return False
if len(self) != len(other):
return False
return self.points_standard == other.points_standard
def __hash__(self):
return hash(tuple('Polygon') + self.points_standard)
@property
def points_standard(self):
"""Return this polygon's points with a standardised starting point.
The result is a linear ring of the points in this polygon, with the
starting point adjusted to be the point with the lowest 'x' value. If
multiple points tie | |
canvas.
"""
layer = self.__class__() # Create instance of the derived class, not Layer.
layer.duration = 0 # Copy all transitions instantly.
layer.canvas = canvas
layer.parent = parent
layer.name = self.name
layer._x = self._x.copy()
layer._y = self._y.copy()
layer._width = self._width.copy()
layer._height = self._height.copy()
layer._origin = self._origin
layer._dx = self._dx.copy()
layer._dy = self._dy.copy()
layer._scale = self._scale.copy()
layer._rotation = self._rotation.copy()
layer._opacity = self._opacity.copy()
layer.duration = self.duration
layer.top = self.top
layer.flipped = self.flipped
layer.clipped = self.clipped
layer.hidden = self.hidden
layer.enabled = self.enabled
# Use base Layer.extend(), we don't care about what subclass.extend() does.
Layer.extend(layer, [child.copy() for child in self])
# Inherit all the dynamic properties and methods.
Prototype.inherit(layer, self)
return layer
def __getattr__(self, key):
""" Returns the given property, or the layer with the given name.
"""
if key in self.__dict__:
return self.__dict__[key]
for layer in self:
if layer.name == key:
return layer
raise AttributeError, "%s instance has no attribute '%s'" % (self.__class__.__name__, key)
def _set_container(self, key, value):
# If Layer.canvas is set to None, the canvas should no longer contain the layer.
# If Layer.canvas is set to Canvas, this canvas should contain the layer.
# Remove the layer from the old canvas/parent.
# Append the layer to the new container.
if self in (self.__dict__.get(key) or ()):
self.__dict__[key].remove(self)
if isinstance(value, list) and self not in value:
list.append(value, self)
self.__dict__[key] = value
def _get_canvas(self):
return self.__dict__.get("canvas")
def _get_parent(self):
return self.__dict__.get("parent")
def _set_canvas(self, canvas):
self._set_container("canvas", canvas)
def _set_parent(self, layer):
self._set_container("parent", layer)
canvas = property(_get_canvas, _set_canvas)
parent = property(_get_parent, _set_parent)
@property
def root(self):
return self.parent and self.parent.root or self
@property
def layers(self):
return self
def insert(self, index, layer):
list.insert(self, index, layer)
layer.__dict__["parent"] = self
def append(self, layer):
list.append(self, layer)
layer.__dict__["parent"] = self
def extend(self, layers):
for layer in layers:
Layer.append(self, layer)
def remove(self, layer):
list.remove(self, layer)
layer.__dict__["parent"] = None
def pop(self, index):
layer = list.pop(self, index)
layer.__dict__["parent"] = None
return layer
def _get_x(self):
return self._x.get()
def _get_y(self):
return self._y.get()
def _get_width(self):
return self._width.get()
def _get_height(self):
return self._height.get()
def _get_scale(self):
return self._scale.get()
def _get_rotation(self):
return self._rotation.get()
def _get_opacity(self):
return self._opacity.get()
def _set_x(self, x):
self._transform_cache = None
self._x.set(x, self.duration)
def _set_y(self, y):
self._transform_cache = None
self._y.set(y, self.duration)
def _set_width(self, width):
self._transform_cache = None
self._width.set(width, self.duration)
def _set_height(self, height):
self._transform_cache = None
self._height.set(height, self.duration)
def _set_scale(self, scale):
self._transform_cache = None
self._scale.set(scale, self.duration)
def _set_rotation(self, rotation):
self._transform_cache = None
self._rotation.set(rotation, self.duration)
def _set_opacity(self, opacity):
self._opacity.set(opacity, self.duration)
x = property(_get_x, _set_x)
y = property(_get_y, _set_y)
width = property(_get_width, _set_width)
height = property(_get_height, _set_height)
scaling = property(_get_scale, _set_scale)
rotation = property(_get_rotation, _set_rotation)
opacity = property(_get_opacity, _set_opacity)
def _get_xy(self):
return (self.x, self.y)
def _set_xy(self, (x,y)):
self.x = x
self.y = y
xy = property(_get_xy, _set_xy)
def _get_origin(self, relative=False):
""" Returns the point (x,y) from which all layer transformations originate.
When relative=True, x and y are defined percentually (0.0-1.0) in terms of width and height.
In some cases x=0 or y=0 is returned:
- For an infinite layer (width=None or height=None), we can't deduct the absolute origin
from coordinates stored relatively (e.g. what is infinity*0.5?).
- Vice versa, for an infinite layer we can't deduct the relative origin from coordinates
stored absolute (e.g. what is 200/infinity?).
"""
dx = self._dx.current
dy = self._dy.current
w = self._width.current
h = self._height.current
# Origin is stored as absolute coordinates and we want it relative.
if self._origin == ABSOLUTE and relative:
if w is None: w = 0
if h is None: h = 0
dx = w!=0 and dx/w or 0
dy = h!=0 and dy/h or 0
# Origin is stored as relative coordinates and we want it absolute.
elif self._origin == RELATIVE and not relative:
dx = w is not None and dx*w or 0
dy = h is not None and dy*h or 0
return dx, dy
def _set_origin(self, x, y, relative=False):
""" Sets the transformation origin point in either absolute or relative coordinates.
For example, if a layer is 400x200 pixels, setting the origin point to (200,100)
all transformations (translate, rotate, scale) originate from the center.
"""
self._transform_cache = None
self._dx.set(x, self.duration)
self._dy.set(y, self.duration)
self._origin = relative and RELATIVE or ABSOLUTE
def origin(self, x=None, y=None, relative=False):
""" Sets or returns the point (x,y) from which all layer transformations originate.
"""
if x is not None:
if x == CENTER:
x, y, relative = 0.5, 0.5, True
if y is not None:
self._set_origin(x, y, relative)
return self._get_origin(relative)
def _get_relative_origin(self):
return self.origin(relative=True)
def _set_relative_origin(self, xy):
self._set_origin(xy[0], xy[1], relative=True)
relative_origin = property(_get_relative_origin, _set_relative_origin)
def _get_absolute_origin(self):
return self.origin(relative=False)
def _set_absolute_origin(self, xy):
self._set_origin(xy[0], xy[1], relative=False)
absolute_origin = property(_get_absolute_origin, _set_absolute_origin)
def _get_visible(self):
return not self.hidden
def _set_visible(self, b):
self.hidden = not b
visible = property(_get_visible, _set_visible)
def translate(self, x, y):
self.x += x
self.y += y
def rotate(self, angle):
self.rotation += angle
def scale(self, f):
self.scaling *= f
def flip(self):
self.flipped = not self.flipped
def _update(self):
""" Called each frame from canvas._update() to update the layer transitions.
"""
done = self._x.update()
done &= self._y.update()
done &= self._width.update()
done &= self._height.update()
done &= self._dx.update()
done &= self._dy.update()
done &= self._scale.update()
done &= self._rotation.update()
if not done: # i.e. the layer is being transformed
self._transform_cache = None
self._opacity.update()
self.update()
for layer in self:
layer._update()
def update(self):
"""Override this method to provide custom updating code.
"""
pass
@property
def done(self):
""" Returns True when all transitions have finished.
"""
return self._x.done \
and self._y.done \
and self._width.done \
and self._height.done \
and self._dx.done \
and self._dy.done \
and self._scale.done \
and self._rotation.done \
and self._opacity.done
def _draw(self):
""" Draws the transformed layer and all of its children.
"""
if self.hidden:
return
glPushMatrix()
# Be careful that the transformations happen in the same order in Layer._transform().
# translate => flip => rotate => scale => origin.
# Center the contents around the origin point.
dx, dy = self.origin(relative=False)
glTranslatef(round(self._x.current), round(self._y.current), 0)
if self.flipped:
glScalef(-1, 1, 1)
glRotatef(self._rotation.current, 0, 0, 1)
glScalef(self._scale.current, self._scale.current, 1)
# Enable clipping mask if Layer.clipped=True.
if self.clipped:
beginclip(self._clipping_mask)
# Draw child layers below.
for layer in self:
if layer.top is False:
layer._draw()
# Draw layer.
global _alpha
_alpha = self._opacity.current # XXX should also affect child layers?
glPushMatrix()
glTranslatef(-round(dx), -round(dy), 0) # Layers are drawn relative from parent origin.
self.draw()
glPopMatrix()
_alpha = 1
# Draw child layers on top.
for layer in self:
if layer.top is True:
layer._draw()
if self.clipped:
endclip()
glPopMatrix()
def draw(self):
"""Override this method to provide custom drawing code for this layer.
At this point, the layer is correctly transformed.
"""
pass
def render(self):
""" Returns the layer as a flattened image.
The layer and all of its children need to have width and height set.
"""
b = self.bounds
if geometry.INFINITE in (b.x, b.y, b.width, b.height):
raise LayerRenderError, "can't render layer of infinite size"
return render(lambda: (translate(-b.x,-b.y), self._draw()), b.width, b.height)
def layer_at(self, x, y, clipped=False, enabled=False, transformed=True, _covered=False):
""" Returns the topmost layer containing the mouse position, None otherwise.
With clipped=True, no parts of child layers outside the parent's bounds are checked.
With enabled=True, only enabled layers are checked (useful for events).
"""
if self.hidden:
# Don't do costly operations on layers the user can't see.
return None
if enabled and not self.enabled:
# Skip disabled layers during event propagation.
return None
if _covered:
# An ancestor is blocking this layer, so we can't select it.
return None
hit = self.contains(x, y, transformed)
if clipped:
# If (x,y) is not inside the clipped bounds, return None.
# If children protruding beyond the layer's bounds are clipped,
# we only need to look at children on top of the layer.
# Each child is drawn on top of the previous child,
# so we hit test | |
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite.structure.io.pdbqt"
__author__ = "<NAME>, <NAME>"
__all__ = ["PDBQTFile"]
import warnings
import numpy as np
import networkx as nx
from ....file import TextFile, InvalidFileError
from ...error import BadStructureError
from ...atoms import AtomArray, AtomArrayStack
from ...charges import partial_charges
from ...bonds import BondList, BondType, find_connected, find_rotatable_bonds
PARAMETRIZED_ELEMENTS = [
"H", "C", "N", "O", "P", "S",
"F", "CL", "BR", "I",
"MG", "CA", "MN", "FE", "ZN"
]
class PDBQTFile(TextFile):
"""
This class represents an *AutoDock* PDBQT file.
This class only provides rudimentary support for reading/writing
the pure atom information.
EXPERIMENTAL: Future API changes are probable.
Examples
--------
Write biotin as flexible ligand into a PDBQT file:
>>> import os.path
>>> ligand = residue("BTN")
>>> file = PDBQTFile()
>>> mask = file.set_structure(ligand, rotatable_bonds="all")
>>> # Print removed nonpolar hydrogen atoms
>>> print(ligand[~mask])
HET 0 BTN H101 H 3.745 1.171 0.974
HET 0 BTN H102 H 4.071 1.343 -0.767
HET 0 BTN H91 H 2.802 -0.740 -1.211
HET 0 BTN H92 H 2.476 -0.912 0.530
HET 0 BTN H81 H 1.289 1.265 0.523
HET 0 BTN H82 H 1.616 1.437 -1.218
HET 0 BTN H71 H 0.346 -0.646 -1.662
HET 0 BTN H72 H 0.020 -0.818 0.079
HET 0 BTN H2 H -0.838 1.576 -1.627
HET 0 BTN H61 H -3.797 1.837 1.286
HET 0 BTN H62 H -3.367 2.738 -0.205
HET 0 BTN H5 H -4.307 0.812 -1.205
HET 0 BTN H4 H -2.451 -0.038 -2.252
>>> print(file)
ROOT
HETATM 1 C11 BTN 0 5.089 -0.280 0.173 1.00 0.00 0.258 C
HETATM 2 O11 BTN 0 4.956 -1.473 0.030 1.00 0.00 -0.264 OA
ENDROOT
BRANCH 1 3
HETATM 3 O12 BTN 0 6.299 0.233 0.444 1.00 0.00 -0.331 OA
HETATM 17 HO2 BTN 0 7.034 -0.391 0.517 1.00 0.00 0.221 HD
ENDBRANCH 1 3
BRANCH 1 4
HETATM 4 C10 BTN 0 3.896 0.631 0.039 1.00 0.00 0.105 C
BRANCH 4 5
HETATM 5 C9 BTN 0 2.651 -0.200 -0.276 1.00 0.00 0.010 C
BRANCH 5 6
HETATM 6 C8 BTN 0 1.440 0.725 -0.412 1.00 0.00 0.002 C
BRANCH 6 7
HETATM 7 C7 BTN 0 0.196 -0.106 -0.727 1.00 0.00 0.016 C
BRANCH 7 8
HETATM 8 C2 BTN 0 -1.015 0.819 -0.863 1.00 0.00 0.065 C
HETATM 9 S1 BTN 0 -1.419 1.604 0.751 1.00 0.00 -0.154 SA
HETATM 10 C6 BTN 0 -3.205 1.827 0.371 1.00 0.00 0.090 C
HETATM 11 C5 BTN 0 -3.530 0.581 -0.476 1.00 0.00 0.091 C
HETATM 12 N1 BTN 0 -3.970 -0.507 0.412 1.00 0.00 -0.239 NA
HETATM 13 C3 BTN 0 -3.141 -1.549 0.271 1.00 0.00 0.272 C
HETATM 14 O3 BTN 0 -3.271 -2.589 0.888 1.00 0.00 -0.259 OA
HETATM 15 N2 BTN 0 -2.154 -1.343 -0.612 1.00 0.00 -0.239 NA
HETATM 16 C4 BTN 0 -2.289 0.010 -1.175 1.00 0.00 0.093 C
HETATM 18 HN1 BTN 0 -4.738 -0.474 1.004 1.00 0.00 0.132 HD
HETATM 19 HN2 BTN 0 -1.462 -1.982 -0.843 1.00 0.00 0.132 HD
ENDBRANCH 7 8
ENDBRANCH 6 7
ENDBRANCH 5 6
ENDBRANCH 4 5
ENDBRANCH 1 4
TORSDOF 6
>>> file.write(os.path.join(path_to_directory, "1l2y_mod.pdb"))
"""
def get_remarks(self, model=None):
"""
Get the content of ``REMARKS`` lines.
Parameters
----------
model : int, optional
If this parameter is given, the function will return a
string from the remarks corresponding to the given
model number (starting at 1).
Negative values are used to index models starting from the
last model insted of the first model.
If this parameter is omitted, a list of strings
containing all models will be returned, even if the
structure contains only one model.
Returns
-------
lines : str or list of str
The content of ``REMARKS`` lines, without the leading
``'REMARKS'``.
"""
# Line indices where a new model starts
model_start_i = np.array([i for i in range(len(self.lines))
if self.lines[i].startswith(("MODEL"))],
dtype=int)
# Line indices with ATOM or HETATM records
remark_line_i = np.array([i for i in range(len(self.lines)) if
self.lines[i].startswith("REMARK")],
dtype=int)
# Structures containing only one model may omit MODEL record
# In these cases model starting index is set to 0
if len(model_start_i) == 0:
model_start_i = np.array([0])
if model is None:
# Add exclusive end of file
model_start_i = np.concatenate((model_start_i, [len(self.lines)]))
model_i = 0
remarks = []
for i in range(len(model_start_i) - 1):
start = model_start_i[i]
stop = model_start_i[i+1]
model_remark_line_i = remark_line_i[
(remark_line_i >= start) & (remark_line_i < stop)
]
remarks.append(
"\n".join([self.lines[i][7:] for i in model_remark_line_i])
)
return remarks
else:
last_model = len(model_start_i)
if model == 0:
raise ValueError("The model index must not be 0")
# Negative models mean index starting from last model
model = last_model + model + 1 if model < 0 else model
if model < last_model:
line_filter = ( ( remark_line_i >= model_start_i[model-1] ) &
( remark_line_i < model_start_i[model ] ) )
elif model == last_model:
line_filter = (remark_line_i >= model_start_i[model-1])
else:
raise ValueError(
f"The file has {last_model} models, "
f"the given model {model} does not exist"
)
remark_line_i = remark_line_i[line_filter]
# Do not include 'REMARK ' itself -> begin from pos 8
return "\n".join([self.lines[i][7:] for i in remark_line_i])
def get_structure(self, model=None):
"""
Get an :class:`AtomArray` or :class:`AtomArrayStack` from the
PDBQT file.
Parameters
----------
model : int, optional
If this parameter is given, the function will return an
:class:`AtomArray` from the atoms corresponding to the given
model number (starting at 1).
Negative values are used to index models starting from the
last model insted of the first model.
If this parameter is omitted, an :class:`AtomArrayStack`
containing all models will be returned, even if the
structure contains only one model.
Returns
-------
array : AtomArray or AtomArrayStack
The return type depends on the `model` parameter.
"""
# Line indices where a new model starts
model_start_i = np.array([i for i in range(len(self.lines))
if self.lines[i].startswith(("MODEL"))],
dtype=int)
# Line indices with ATOM or HETATM records
atom_line_i = np.array([i for i in range(len(self.lines)) if
self.lines[i].startswith(("ATOM", "HETATM"))],
dtype=int)
# Structures containing only one model may omit MODEL record
# In these cases model starting index is set to 0
if len(model_start_i) == 0:
model_start_i = np.array([0])
if model is None:
depth = len(model_start_i)
length = self._get_model_length(model_start_i, atom_line_i)
array = AtomArrayStack(depth, length)
# Line indices for annotation determination
# Annotation is determined from model 1,
# therefore from ATOM records before second MODEL record
if len(model_start_i) == 1:
annot_i = atom_line_i
else:
annot_i = atom_line_i[atom_line_i < model_start_i[1]]
# Line indices for coordinate determination
coord_i = atom_line_i
else:
last_model = len(model_start_i)
if model == 0:
raise ValueError("The model index must not be 0")
# Negative models mean index starting from last model
model = last_model + model + 1 if model < 0 else model
if model < last_model:
line_filter = ( ( atom_line_i >= model_start_i[model-1] ) &
( atom_line_i < model_start_i[model ] ) )
elif model == last_model:
line_filter = (atom_line_i >= model_start_i[model-1])
else:
raise ValueError(
f"The file has {last_model} models, "
f"the given model {model} does not exist"
)
annot_i = coord_i = atom_line_i[line_filter]
array = AtomArray(len(coord_i))
# Save atom IDs for later sorting into the original atom order
atom_id = np.zeros(array.array_length(), int)
# Create annotation arrays
chain_id = np.zeros(array.array_length(), array.chain_id.dtype)
res_id = np.zeros(array.array_length(), array.res_id.dtype)
ins_code = np.zeros(array.array_length(), array.ins_code.dtype)
res_name = np.zeros(array.array_length(), array.res_name.dtype)
hetero = np.zeros(array.array_length(), array.hetero.dtype)
atom_name = np.zeros(array.array_length(), array.atom_name.dtype)
element = np.zeros(array.array_length(), array.element.dtype)
# Fill annotation array
# i is index in array, line_i is line index
for i, line_i in enumerate(annot_i):
line = self.lines[line_i]
atom_id[i] = int(line[6:11])
chain_id[i] = line[21].upper().strip()
res_id[i] = int(line[22:26])
ins_code[i] = line[26].strip()
res_name[i] = line[17:20].strip()
hetero[i] = (False if line[0:4] == "ATOM" else True)
atom_name[i] = line[12:16].strip()
element[i] = line[76:78].strip()
# Add annotation arrays to atom array (stack)
| |
from __future__ import annotations
import asyncio
import logging
import signal
from collections import defaultdict
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Dict,
List,
Optional,
overload,
)
from ferris.types import message
from .channel import Channel
from .connection import Connection
from .errors import Reconnect, WebsocketException
from .guild import Guild
from .invite import Invite
from .message import Message
from .user import ClientUser, User
from .utils import sanitize_id
from .websocket import Websocket
if TYPE_CHECKING:
from .types import Id
log = logging.getLogger(__name__)
__all__ = ('Dispatcher', 'Client')
# https://github.com/Rapptz/discord.py/blob/main/discord/client.py#L81
# https://github.com/python/cpython/blob/main/Lib/asyncio/runners.py
def _cancel_tasks(loop: asyncio.AbstractEventLoop) -> None:
tasks = {t for t in asyncio.all_tasks(loop=loop) if not t.done()}
if not tasks:
return
log.info(f'Cancelling {len(tasks)} tasks.')
for task in tasks:
task.cancel()
loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))
log.info('Cancelled all tasks.')
for task in tasks:
if task.cancelled():
continue
if task.exception() is not None:
loop.call_exception_handler(
{
'message': 'Unhandled exception during Client.run shutdown.',
'exception': task.exception(),
'task': task,
}
)
def _cleanup_loop(loop: asyncio.AbstractEventLoop) -> None:
try:
_cancel_tasks(loop)
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
log.info('Closing the event loop.')
loop.close()
class Dispatcher:
def __init__(self, loop: asyncio.AbstractEventLoop):
self.loop: asyncio.AbstractEventLoop = loop
self.event_handlers: defaultdict[
str, List[Callable[..., Awaitable]]
] = defaultdict(list)
async def wrap_event(
self, coro: Callable[..., Awaitable]
) -> Callable[..., Awaitable]:
try:
await coro
except asyncio.CancelledError:
pass
except Exception as exc:
self.dispatch('error', exc)
def dispatch(self, event: str, *args, **kwargs) -> asyncio.Future:
coros = []
if callback := getattr(self, f'on_{event}', False):
coros.append(callback(*args, **kwargs))
if callbacks := self.event_handlers.get(event):
coros += [self.wrap_event(cb(*args, **kwargs)) for cb in callbacks]
return asyncio.ensure_future(asyncio.gather(*coros))
def add_listener(self, event: str, callback: Callable[..., Awaitable]) -> None:
if event.startswith('on_'):
event = event[3:]
self.event_handlers[event].append(callback)
def remove_listener(self, event: str, callback: Callable[..., Awaitable]) -> None:
if event.startswith('on_'):
event = event[3:]
self.event_handlers[event].remove(callback)
def clear_listeners(self) -> None:
self.event_handlers.clear()
def stop_listening_to(self, event: str, /) -> None:
if event.startswith('on_'):
event = event[3:]
self.event_handlers[event].clear()
del self.event_handlers[event]
def listen(self, event: Optional[str] = None):
def decorator(func: Callable[..., Awaitable]) -> Callable[..., Awaitable]:
store = event
if store is None:
store = func.__name__
if store.startswith('on_'):
store = store[3:]
self.add_listener(store, func)
return func
return decorator
def event(self, func: Callable[..., Awaitable]) -> Callable[..., Awaitable]:
event = func.__name__
if not event.startswith('on_'):
event = 'on_' + event
setattr(self, event, func)
return func
class EventTemplateMixin:
async def on_login(self) -> None:
"""|coro|
Called when the client has logged in.
"""
pass
async def on_close(self) -> None:
"""|coro|
Called when the client is closing.
This could be used for clean-up/memory freeing.
"""
pass
async def on_connect(self) -> None:
"""|coro|
Called when the client has connected to FerrisChat ws.
"""
pass
async def on_error(self, error: Exception) -> None:
"""|coro|
Called when an exception is raised in an event handler.
"""
pass
class Client(Dispatcher, EventTemplateMixin):
"""Represents a client connection to FerrisChat.
Parameters
----------
loop: Optional[:class:`asyncio.AbstractEventLoop`]
The event loop to use for the client. If not passed, then the default event loop is used.
max_messages_count: Optional[int]
The maximum number of messages to store in the internal message buffer.
Defaults to ``1000``.
max_heartbeat_timeout: Optional[int]
The maximum timeout in seconds between sending a heartbeat to the server.
If heartbeat took longer than this timeout, the client will attempt to reconnect.
"""
def __init__(
self, /, loop: Optional[asyncio.AbstractEventLoop] = None, **options
) -> None:
self.loop = loop or asyncio.get_event_loop()
self._is_closed: bool = False
self._connection: Connection = Connection(self.loop, self.dispatch, **options)
self._is_ready = self._connection._is_ready
super().__init__(self.loop)
@property
def user(self) -> Optional[ClientUser]:
"""Returns the connected :class:`~ClientUser`"""
return self._connection.user
@property
def latency(self) -> float:
"""Returns the websocket latency between the client and the server."""
if ws := getattr(self, 'ws', None):
return self.ws._heartbeat_manager.latency
return float('inf')
@property
def is_ready(self) -> bool:
"""Returns whether the client is ready to use."""
return self._is_ready.done()
@property
def is_closed(self) -> bool:
"""Returns whether the client is closed."""
return self._is_closed
async def wait_until_ready(self) -> None:
"""|coro|
Waits until the client is ready to use.
"""
return await self._is_ready
@property
def guilds(self) -> List[Guild]:
"""Returns a list of the client's guilds."""
return list(self._connection._guilds.values())
async def join_guild(self, code: str) -> None:
"""|coro|
Joins a guild with an invite code.
Parameters
----------
code: str
The invite code to use.
"""
await self._connection.api.invites(code).post()
async def create_bot(self, name: str) -> User:
"""|coro|
Creates a bot account.
Parameters
----------
name: str
The name of the bot.
Returns
-------
:class:`User`
"""
u = await self._connection.api.users(self.user.id).bots.post({'username': name})
return User(u, self._connection)
async def fetch_invite(self, code: str) -> None:
"""|coro|
Fetches an invite by code.
Parameters
----------
code: str
The invite code to fetch.
"""
i = await self._connection.api.invites(code).get()
return Invite(self._connection, i)
async def create_guild(self, name: str) -> Guild:
"""|coro|
Creates a new guild.
Parameters
----------
name: str
The name of the guild.
Returns
-------
:class:`Guild`
The guild created.
"""
g = await self._connection.api.guilds.post(json={'name': name})
return Guild(self._connection, g)
async def fetch_self(self, cache: bool = True) -> ClientUser:
"""|coro|
Fetches the client's user.
Parameters
----------
cache: bool
Whether to cache the user. Defaults to ``True``.
Returns
-------
:class:`ClientUser`
The client's user.
"""
_u = await self._connection.api.users.me.get()
if cache:
u = self._connection.user
u._process_data(_u)
else:
u = ClientUser(self._connection, _u)
return u
def get_message(self, id: Id) -> Optional[Message]:
"""
Gets a message from the internal message buffer.
Parameters
----------
id: int
The ID of the message to get.
Returns
-------
Optional[:class:`Message`]
The message with the given ID, or ``None`` if it does not exist.
"""
id = sanitize_id(id)
return self._connection.get_message(id)
def get_channel(self, id: Id) -> Optional[Channel]:
"""
Gets a channel from the internal channel buffer.
Parameters
----------
id: int
The ID of the channel to get.
Returns
-------
Optional[:class:`Channel`]
The channel with the given ID, or ``None`` if it does not exist.
"""
id = sanitize_id(id)
return self._connection.get_channel(id)
def get_user(self, id: Id) -> Optional[User]:
"""
Gets a user from the internal user buffer.
Parameters
----------
id: int
The ID of the user to get.
Returns
-------
Optional[:class:`User`]
The user with the given ID, or ``None`` if it does not exist.
"""
id = sanitize_id(id)
return self._connection.get_user(id)
async def fetch_message(self, id: Id, *, cache: bool = False) -> Message:
"""|coro|
Fetches a message from the internal message buffer.
Parameters
----------
id: int
The ID of the message to fetch.
cache: Optional[bool]
Whether to cache the message in the internal message cache.
Defaults to ``False``.
Returns
-------
:class:`Message`
The message with the given ID.
"""
id = sanitize_id(id)
m = await self._connection.api.messages(id).get()
m = Message(self._connection, m)
if cache:
self._connection.store_message(m)
return m
async def ping(self) -> None:
"""|coro|
Pings the server. This will ping the REST API not ws.
If you want ws latency, use :attr:`latency` instead.
"""
await self._connection.api.ping.get()
async def fetch_channel(self, id: Id, *, cache: bool = False) -> Channel:
"""|coro|
Fetches a channel by ID.
Parameters
----------
id: int
The ID of the channel to fetch.
cache: Optional[bool]
Whether to cache the channel in the internal channel cache.
Returns
-------
:class:`Channel`
The channel with the given ID.
"""
id = sanitize_id(id)
c = await self._connection.api.channels(id).get()
c = Channel(self._connection, c)
if cache:
self._connection.store_channel(c)
return c
async def fetch_user(self, id: Id, *, cache: bool = False) -> User:
"""|coro|
Fetches a user by ID.
Parameters
----------
id: int
The ID of the user to fetch.
cache: Optional[bool]
Whether to cache the user in the internal user cache.
Returns
-------
:class:`User`
The user with the given ID.
"""
id = sanitize_id(id)
u = await self._connection.api.users(id).get()
u = User(self._connection, u)
if cache:
self._connection.store_user(u)
return u
async def fetch_guild(
self,
id: Id,
*,
fetch_members: bool = False,
fetch_channels: bool = True,
cache: bool = False,
) -> Guild:
"""|coro|
Fetches a guild by ID.
Parameters
----------
id: int
The guild's ID.
fetch_members: Optional[bool], default False
Whether to fetch the guild's members. Defaults to ``False``.
fetch_channels: Optional[bool], default True
Whether to fetch the guild's channels. Defaults to ``True``.
cache: Optional[bool], default False
Whether to cache the guild in the internal guild cache.
Returns
-------
:class:`Guild`
The guild fetched.
Raises
------
:exc:`NotFound`
A guild with the given ID was not found.
"""
id = sanitize_id(id)
g = await self._connection.api.guilds(id).get(
params={
'members': str(fetch_members).lower(),
'channels': str(fetch_channels).lower(),
}
)
g = Guild(self._connection, g)
if cache:
self._connection.store_guild(g)
return g
async def close(self) -> None:
if self.is_closed:
return
self._is_closed = True
if hasattr(self, 'ws'):
await self.ws.close(code=1000)
| |
numeric_times=args.numeric_times,
age=args.age,
tags=args.tags,
status=args.status))
except Exception as e:
if args.verbosity and args.verbosity > 2:
sys.stderr.write(get_traceback())
env.logger.error(e)
sys.exit(1)
#
# command purge
#
def get_purge_parser(desc_only=False):
parser = argparse.ArgumentParser(
'purge', description='''Remove local or remote tasks''')
if desc_only:
return parser
parser.add_argument(
'tasks',
nargs='*',
help='''ID of the tasks to be removed.
There is no need to specify compelete task IDs because SoS will match specified
name with tasks starting with these names. If no task ID is specified,
all tasks related to specified workflows (option -w) will be removed.'''
)
parser.add_argument(
'-a',
'--all',
action='store_true',
help='''Clear all task information on local or specified remote task queue,
including tasks created by other workflows.''')
parser.add_argument(
'--age',
help='''Limit to tasks that are created more than
(default) or within specified age. Value of this parameter can be in units
s (second), m (minute), h (hour), or d (day, default), or in the foramt of
HH:MM:SS, with optional prefix + for older (default) and - for newer than
specified age.''')
parser.add_argument(
'-s',
'--status',
nargs='+',
help='''Only remove tasks with
specified status, which can be pending, submitted, running, completed, failed,
and aborted. One of more status can be specified.''')
parser.add_argument(
'-t',
'--tags',
nargs='*',
help='''Only remove tasks with
one of the specified tags.''')
parser.add_argument(
'-q',
'--queue',
help='''Remove tasks on specified tasks queue or remote host
if the tasks . The queue can be defined in global or local sos
configuration file, or a file specified by option --config. A host is
assumed to be a remote machine with process type if no configuration
is found. ''')
parser.add_argument(
'-c',
'--config',
help='''A configuration file with host
definitions, in case the definitions are not defined in global sos config.yml files.'''
)
parser.add_argument(
'-v',
dest='verbosity',
type=int,
choices=range(5),
default=2,
help='''Output error (0), warning (1), info (2), debug (3) and trace (4)
information to standard output (default to 2).''')
parser.set_defaults(func=cmd_purge)
return parser
def cmd_purge(args, workflow_args):
from .tasks import purge_tasks
from .utils import env, load_config_files, get_traceback
from .hosts import Host
#from .monitor import summarizeExecution
env.verbosity = args.verbosity
try:
if not args.queue:
purge_tasks(args.tasks, args.all, args.age, args.status, args.tags,
args.verbosity)
else:
# remote host?
load_config_files(args.config)
host = Host(args.queue)
print(
host._task_engine.purge_tasks(args.tasks, args.all, args.age,
args.status, args.tags,
args.verbosity))
except Exception as e:
if args.verbosity and args.verbosity > 2:
sys.stderr.write(get_traceback())
env.logger.error(e)
sys.exit(1)
#
# command kill
#
#
def get_kill_parser(desc_only=False):
parser = argparse.ArgumentParser(
'kill', description='''Stop the execution of running task''')
if desc_only:
return parser
parser.add_argument(
'tasks',
nargs='*',
help='''IDs of the tasks
that will be killed. There is no need to specify compelete task IDs because
SoS will match specified name with tasks starting with these names.''')
parser.add_argument(
'-a',
'--all',
action='store_true',
help='''Kill all tasks in local or specified remote task queue''')
parser.add_argument(
'-q',
'--queue',
help='''Kill jobs on specified tasks queue or remote host
if the tasks . The queue can be defined in global or local sos
configuration file, or a file specified by option --config. A host is
assumed to be a remote machine with process type if no configuration
is found.''')
parser.add_argument(
'-t',
'--tags',
nargs='*',
help='''Only kill tasks with
one of the specified tags.''')
parser.add_argument(
'-c',
'--config',
help='''A configuration file with host
definitions, in case the definitions are not defined in global sos config.yml files.'''
)
parser.add_argument(
'-v',
'--verbosity',
type=int,
choices=range(5),
default=2,
help='''Output error (0), warning (1), info (2) and debug (3)
information to standard output (default to 2). More debug information could be
generated by setting environmental variable SOS_DEBUG to comma separated topics
of GENERAL, WORKER, CONTROLLER, STEP, VARIABLE, EXECUTOR, TARGET, ZERONQ, TASK,
DAG, and ACTION, or ALL for all debug information''')
parser.set_defaults(func=cmd_kill)
return parser
def cmd_kill(args, workflow_args):
from .tasks import kill_tasks
from .utils import env, load_config_files
from .hosts import Host
env.verbosity = args.verbosity
if not args.queue:
if args.all:
if args.tasks:
env.logger.warning(
'Task ids "{}" are ignored with option --all'.format(
' '.join(args.tasks)))
if args.tags:
env.logger.warning('Option tags is ignored with option --all')
kill_tasks([])
else:
if not args.tasks and not args.tags:
env.logger.warning(
'Please specify task id, or one of options --all and --tags'
)
else:
kill_tasks(tasks=args.tasks, tags=args.tags)
else:
# remote host?
load_config_files(args.config)
host = Host(args.queue)
print(
host._task_engine.kill_tasks(
tasks=args.tasks, tags=args.tags, all_tasks=args.all))
#
# command remove
#
def get_remove_parser(desc_only=False):
parser = argparse.ArgumentParser(
'remove',
description='''Remove specified files and/or their signatures''')
if desc_only:
return parser
parser.add_argument(
'targets',
nargs='*',
metavar='FILE_OR_DIR',
help='''Files and directories to be removed. Directories will be
scanned for files to removed but no directory will be removed.''')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
'-t',
'--tracked',
action='store_true',
default=False,
help='''Limit files to only files tracked by SoS, namely files that are
input, output, or dependent files of steps.''')
group.add_argument(
'-u',
'--untracked',
action='store_true',
default=False,
help='''Limit files to untracked files, namely files that are not
tracked by SoS steps.''')
group.add_argument(
'-s',
'--signature',
action='store_true',
default=False,
help='''Remove signatures of specified files (not files themselves).
As a special case, all local signatures will be removed if this option
is specified without target.''')
group.add_argument(
'-z',
'--zap',
action='store_true',
default=False,
help='''Replace files with their signatures. The file will not be
regenerated by SoS unless is it actually needed by other steps. This
option is usually used to remove large intermediate files from
completed workflows while allowing relevant steps to be skipped
during re-execution of the workflow.''')
group.add_argument(
'-p',
'--placeholders',
action='store_true',
default=False,
help='''Remove placeholder files that might have been left
uncleaned after an interrupted dryrun.''')
parser.add_argument(
'-e',
'--external',
action='store_true',
default=False,
help='''By default the remove command will only remove files and
signatures under the current project directory. This option allows
sos to remove files and/or signature of external files.''')
parser.add_argument(
'--size',
help='''Limit to files that exceed or smaller than specified size.
Value of option should be in unit K, M, KB, MB, MiB, GB, etc, with
optional prefix + for larger than (default), or - for smaller than
specified size.''')
parser.add_argument(
'--age',
help='''Limit to files that are modified more than
(default) or within specified age. Value of this parameter can be in units
s (second), m (minute), h (hour), or d (day, default), or in the foramt of
HH:MM:SS, with optional prefix + for older (default) and - for newer than
specified age.''')
parser.add_argument(
'-n',
'--dryrun',
action='store_true',
help='''List files or directories to be removed, without actually
removing them.''')
parser.add_argument(
'-y',
'--yes',
action='store_true',
dest='__confirm__',
help='''Remove files without confirmation, suitable for batch removal
of files.''')
parser.add_argument(
'-v',
'--verbosity',
type=int,
choices=range(5),
default=2,
help='''Output error (0), warning (1), info (2) and debug (3)
information to standard output (default to 2). More debug information could be
generated by setting environmental variable SOS_DEBUG to comma separated topics
of GENERAL, WORKER, CONTROLLER, STEP, VARIABLE, EXECUTOR, TARGET, ZERONQ, TASK,
DAG, and ACTION, or ALL for all debug information''')
parser.set_defaults(func=cmd_remove)
return parser
class AnswerMachine:
def __init__(self, always_yes=False, confirmed=False):
self._always_yes = always_yes
self._confirmed = confirmed
def get(self, msg):
if self._always_yes:
return True
if self._confirmed:
print(msg)
return True
while True:
res = input('{} (y/n/a)? '.format(msg))
if res == 'a':
self._confirmed = True
return True
elif res == 'y':
return True
elif res == 'n':
return False
# def get_tracked_files(workflow_id):
# from .workflow_report import WorkflowSig
# sig = WorkflowSig(workflow_id)
# tracked_files = set([x['filename'] for x in sig.tracked_files()])
# placeholder_files = set(sig.placeholders())
# return set(), tracked_files, placeholder_files
def cmd_remove(args, unknown_args):
from .utils import env
from .targets import file_target
from .signatures import StepSignatures, WorkflowSignatures
env.verbosity = args.verbosity
workflow_signatures = WorkflowSignatures()
if args.placeholders:
placeholder_files = workflow_signatures.placeholders()
removed: int = 0
for ph in sorted(placeholder_files):
p = file_target(ph)
if not p.target_exists('any'):
continue
if p.size() == 0:
try:
if 'GENERAL' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('GENERAL',
f'Remove placeholder file {ph}')
p.unlink()
removed += 1
except Exception as e:
env.logger.debug(
f'Failed to remove placeholder file {ph}: {e}')
else:
env.logger.debug(
f'Keep placeholder {ph} because it is non-empty.')
if removed:
env.logger.info(
f'{removed} placeholder file{"s are" if removed > 1 else "is"} removed'
)
else:
env.logger.info('No remaining placeholder file exists.')
return
sig_files = workflow_signatures.files()
if args.signature:
# a special case where all file and runtime signatures are removed.
# no other options are allowed.
if sig_files:
sig_ids = list(set([x[0] for x in sig_files]))
step_signatures = StepSignatures()
num_removed_steps = step_signatures.remove_many(sig_ids)
if not num_removed_steps:
env.logger.info(
'No signature is | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Function(pulumi.CustomResource):
arn: pulumi.Output[str]
"""
The Amazon Resource Name (ARN) identifying your Lambda Function.
"""
dead_letter_config: pulumi.Output[dict]
"""
Nested block to configure the function's *dead letter queue*. See details below.
* `target_arn` (`str`) - The ARN of an SNS topic or SQS queue to notify when an invocation fails. If this
option is used, the function's IAM role must be granted suitable access to write to the target object,
which means allowing either the `sns:Publish` or `sqs:SendMessage` action on this ARN, depending on
which service is targeted.
"""
description: pulumi.Output[str]
"""
Description of what your Lambda Function does.
"""
environment: pulumi.Output[dict]
"""
The Lambda environment's configuration settings. Fields documented below.
* `variables` (`dict`) - A map that defines environment variables for the Lambda function.
"""
code: pulumi.Output[pulumi.Archive]
"""
The path to the function's deployment package within the local filesystem. If defined, The `s3_`-prefixed options cannot be used.
"""
name: pulumi.Output[str]
"""
A unique name for your Lambda Function.
"""
handler: pulumi.Output[str]
"""
The function [entrypoint][3] in your code.
"""
invoke_arn: pulumi.Output[str]
"""
The ARN to be used for invoking Lambda Function from API Gateway - to be used in [`apigateway.Integration`](https://www.terraform.io/docs/providers/aws/r/api_gateway_integration.html)'s `uri`
"""
kms_key_arn: pulumi.Output[str]
"""
The ARN for the KMS encryption key.
"""
last_modified: pulumi.Output[str]
"""
The date this resource was last modified.
"""
layers: pulumi.Output[list]
"""
List of Lambda Layer Version ARNs (maximum of 5) to attach to your Lambda Function. See [Lambda Layers][10]
"""
memory_size: pulumi.Output[float]
"""
Amount of memory in MB your Lambda Function can use at runtime. Defaults to `128`. See [Limits][5]
"""
publish: pulumi.Output[bool]
"""
Whether to publish creation/change as new Lambda Function Version. Defaults to `false`.
"""
qualified_arn: pulumi.Output[str]
"""
The Amazon Resource Name (ARN) identifying your Lambda Function Version
(if versioning is enabled via `publish = true`).
"""
reserved_concurrent_executions: pulumi.Output[float]
"""
The amount of reserved concurrent executions for this lambda function. A value of `0` disables lambda from being triggered and `-1` removes any concurrency limitations. Defaults to Unreserved Concurrency Limits `-1`. See [Managing Concurrency][9]
"""
role: pulumi.Output[str]
"""
IAM role attached to the Lambda Function. This governs both who / what can invoke your Lambda Function, as well as what resources our Lambda Function has access to. See [Lambda Permission Model][4] for more details.
"""
runtime: pulumi.Output[str]
"""
See [Runtimes][6] for valid values.
"""
s3_bucket: pulumi.Output[str]
"""
The S3 bucket location containing the function's deployment package. Conflicts with `filename`. This bucket must reside in the same AWS region where you are creating the Lambda function.
"""
s3_key: pulumi.Output[str]
"""
The S3 key of an object containing the function's deployment package. Conflicts with `filename`.
"""
s3_object_version: pulumi.Output[str]
"""
The object version containing the function's deployment package. Conflicts with `filename`.
"""
source_code_hash: pulumi.Output[str]
"""
Used to trigger updates. Must be set to a base64-encoded SHA256 hash of the package file specified with either `filename` or `s3_key`. The usual way to set this is `filebase64sha256("file.zip")` (this provider 0.11.12 and later) or `base64sha256(file("file.zip"))` (this provider 0.11.11 and earlier), where "file.zip" is the local filename of the lambda function source archive.
"""
source_code_size: pulumi.Output[float]
"""
The size in bytes of the function .zip file.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the object.
"""
timeout: pulumi.Output[float]
"""
The amount of time your Lambda Function has to run in seconds. Defaults to `3`. See [Limits][5]
"""
tracing_config: pulumi.Output[dict]
version: pulumi.Output[str]
"""
Latest published version of your Lambda Function.
"""
vpc_config: pulumi.Output[dict]
"""
Provide this to allow your function to access your VPC. Fields documented below. See [Lambda in VPC][7]
* `security_group_ids` (`list`) - A list of security group IDs associated with the Lambda function.
* `subnet_ids` (`list`) - A list of subnet IDs associated with the Lambda function.
* `vpc_id` (`str`)
"""
def __init__(__self__, resource_name, opts=None, dead_letter_config=None, description=None, environment=None, code=None, name=None, handler=None, kms_key_arn=None, layers=None, memory_size=None, publish=None, reserved_concurrent_executions=None, role=None, runtime=None, s3_bucket=None, s3_key=None, s3_object_version=None, source_code_hash=None, tags=None, timeout=None, tracing_config=None, vpc_config=None, __props__=None, __name__=None, __opts__=None):
"""
Create a Function resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] dead_letter_config: Nested block to configure the function's *dead letter queue*. See details below.
:param pulumi.Input[str] description: Description of what your Lambda Function does.
:param pulumi.Input[dict] environment: The Lambda environment's configuration settings. Fields documented below.
:param pulumi.Input[pulumi.Archive] code: The path to the function's deployment package within the local filesystem. If defined, The `s3_`-prefixed options cannot be used.
:param pulumi.Input[str] name: A unique name for your Lambda Function.
:param pulumi.Input[str] handler: The function [entrypoint][3] in your code.
:param pulumi.Input[str] kms_key_arn: The ARN for the KMS encryption key.
:param pulumi.Input[list] layers: List of Lambda Layer Version ARNs (maximum of 5) to attach to your Lambda Function. See [Lambda Layers][10]
:param pulumi.Input[float] memory_size: Amount of memory in MB your Lambda Function can use at runtime. Defaults to `128`. See [Limits][5]
:param pulumi.Input[bool] publish: Whether to publish creation/change as new Lambda Function Version. Defaults to `false`.
:param pulumi.Input[float] reserved_concurrent_executions: The amount of reserved concurrent executions for this lambda function. A value of `0` disables lambda from being triggered and `-1` removes any concurrency limitations. Defaults to Unreserved Concurrency Limits `-1`. See [Managing Concurrency][9]
:param pulumi.Input[str] role: IAM role attached to the Lambda Function. This governs both who / what can invoke your Lambda Function, as well as what resources our Lambda Function has access to. See [Lambda Permission Model][4] for more details.
:param pulumi.Input[str] runtime: See [Runtimes][6] for valid values.
:param pulumi.Input[str] s3_bucket: The S3 bucket location containing the function's deployment package. Conflicts with `filename`. This bucket must reside in the same AWS region where you are creating the Lambda function.
:param pulumi.Input[str] s3_key: The S3 key of an object containing the function's deployment package. Conflicts with `filename`.
:param pulumi.Input[str] s3_object_version: The object version containing the function's deployment package. Conflicts with `filename`.
:param pulumi.Input[str] source_code_hash: Used to trigger updates. Must be set to a base64-encoded SHA256 hash of the package file specified with either `filename` or `s3_key`. The usual way to set this is `filebase64sha256("file.zip")` (this provider 0.11.12 and later) or `base64sha256(file("file.zip"))` (this provider 0.11.11 and earlier), where "file.zip" is the local filename of the lambda function source archive.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the object.
:param pulumi.Input[float] timeout: The amount of time your Lambda Function has to run in seconds. Defaults to `3`. See [Limits][5]
:param pulumi.Input[dict] vpc_config: Provide this to allow your function to access your VPC. Fields documented below. See [Lambda in VPC][7]
The **dead_letter_config** object supports the following:
* `target_arn` (`pulumi.Input[str]`) - The ARN of an SNS topic or SQS queue to notify when an invocation fails. If this
option is used, the function's IAM role must be granted suitable access to write to the target object,
which means allowing either the `sns:Publish` or `sqs:SendMessage` action on this ARN, depending on
which service is targeted.
The **environment** object supports the following:
* `variables` (`pulumi.Input[dict]`) - A map that defines environment variables for the Lambda function.
The **tracing_config** object supports the following:
* `mode` (`pulumi.Input[str]`) - Can be either `PassThrough` or `Active`. If PassThrough, Lambda will only trace
the request from an upstream service if it contains a tracing header with
"sampled=1". If Active, Lambda will respect any tracing header it receives
from an upstream service. If no tracing header is received, Lambda will call
X-Ray for a tracing decision.
The **vpc_config** object supports the following:
* | |
stage.
input_coords (str, optional): The box coordinate format that the model outputs. Can be either 'centroids'
for the format `(cx, cy, w, h)` (box center coordinates, width, and height), 'minmax' for the format
`(xmin, xmax, ymin, ymax)`, or 'corners' for the format `(xmin, ymin, xmax, ymax)`.
normalize_coords (bool, optional): Set to `True` if the model outputs relative coordinates (i.e. coordinates in [0,1])
and you wish to transform these relative coordinates back to absolute coordinates. If the model outputs
relative coordinates, but you do not want to convert them back to absolute coordinates, set this to `False`.
Do not set this to `True` if the model already outputs absolute coordinates, as that would result in incorrect
coordinates. Requires `img_height` and `img_width` if set to `True`.
img_height (int, optional): The height of the input images. Only needed if `normalize_coords` is `True`.
img_width (int, optional): The width of the input images. Only needed if `normalize_coords` is `True`.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
Returns:
A python list of length `batch_size` where each list element represents the predicted boxes
for one image and contains a Numpy array of shape `(boxes, 7)` where each row is a box prediction for
a non-background class for the respective image in the format `[box_id, class_id, confidence, xmin, ymin, xmax, ymax]`.
'''
if normalize_coords and ((img_height is None) or (img_width is None)):
raise ValueError("If relative box coordinates are supposed to be converted to absolute coordinates, the decoder needs the image size in order to decode the predictions, but `img_height == {}` and `img_width == {}`".format(img_height, img_width))
# 1: Convert the box coordinates from the predicted anchor box offsets to predicted absolute coordinates
y_pred_decoded_raw = np.copy(y_pred[:,:,:-8]) # Slice out the classes and the four offsets, throw away the anchor coordinates and variances, resulting in a tensor of shape `[batch, n_boxes, n_classes + 4 coordinates]`
if input_coords == 'centroids':
if variance_encoded_in_target:
# Decode the predicted box center x and y coordinates.
y_pred_decoded_raw[:,:,[-4,-3]] = y_pred_decoded_raw[:,:,[-4,-3]] * y_pred[:,:,[-6,-5]] + y_pred[:,:,[-8,-7]]
# Decode the predicted box width and heigt.
y_pred_decoded_raw[:,:,[-2,-1]] = np.exp(y_pred_decoded_raw[:,:,[-2,-1]]) * y_pred[:,:,[-6,-5]]
else:
# Decode the predicted box center x and y coordinates.
y_pred_decoded_raw[:,:,[-4,-3]] = y_pred_decoded_raw[:,:,[-4,-3]] * y_pred[:,:,[-6,-5]] * y_pred[:,:,[-4,-3]] + y_pred[:,:,[-8,-7]]
# Decode the predicted box width and heigt.
y_pred_decoded_raw[:,:,[-2,-1]] = np.exp(y_pred_decoded_raw[:,:,[-2,-1]] * y_pred[:,:,[-2,-1]]) * y_pred[:,:,[-6,-5]]
y_pred_decoded_raw = convert_coordinates(y_pred_decoded_raw, start_index=-4, conversion='centroids2corners')
elif input_coords == 'minmax':
y_pred_decoded_raw[:,:,-4:] *= y_pred[:,:,-4:] # delta(pred) / size(anchor) / variance * variance == delta(pred) / size(anchor) for all four coordinates, where 'size' refers to w or h, respectively
y_pred_decoded_raw[:,:,[-4,-3]] *= np.expand_dims(y_pred[:,:,-7] - y_pred[:,:,-8], axis=-1) # delta_xmin(pred) / w(anchor) * w(anchor) == delta_xmin(pred), delta_xmax(pred) / w(anchor) * w(anchor) == delta_xmax(pred)
y_pred_decoded_raw[:,:,[-2,-1]] *= np.expand_dims(y_pred[:,:,-5] - y_pred[:,:,-6], axis=-1) # delta_ymin(pred) / h(anchor) * h(anchor) == delta_ymin(pred), delta_ymax(pred) / h(anchor) * h(anchor) == delta_ymax(pred)
y_pred_decoded_raw[:,:,-4:] += y_pred[:,:,-8:-4] # delta(pred) + anchor == pred for all four coordinates
y_pred_decoded_raw = convert_coordinates(y_pred_decoded_raw, start_index=-4, conversion='minmax2corners')
elif input_coords == 'corners':
y_pred_decoded_raw[:,:,-4:] *= y_pred[:,:,-4:] # delta(pred) / size(anchor) / variance * variance == delta(pred) / size(anchor) for all four coordinates, where 'size' refers to w or h, respectively
y_pred_decoded_raw[:,:,[-4,-2]] *= np.expand_dims(y_pred[:,:,-6] - y_pred[:,:,-8], axis=-1) # delta_xmin(pred) / w(anchor) * w(anchor) == delta_xmin(pred), delta_xmax(pred) / w(anchor) * w(anchor) == delta_xmax(pred)
y_pred_decoded_raw[:,:,[-3,-1]] *= np.expand_dims(y_pred[:,:,-5] - y_pred[:,:,-7], axis=-1) # delta_ymin(pred) / h(anchor) * h(anchor) == delta_ymin(pred), delta_ymax(pred) / h(anchor) * h(anchor) == delta_ymax(pred)
y_pred_decoded_raw[:,:,-4:] += y_pred[:,:,-8:-4] # delta(pred) + anchor == pred for all four coordinates
else:
raise ValueError("Unexpected value for `input_coords`. Supported input coordinate formats are 'minmax', 'corners' and 'centroids'.")
# 2: If the model predicts normalized box coordinates and they are supposed to be converted back to absolute coordinates, do that
if normalize_coords:
y_pred_decoded_raw[:,:,[-4,-2]] *= img_width # Convert xmin, xmax back to absolute coordinates
y_pred_decoded_raw[:,:,[-3,-1]] *= img_height # Convert ymin, ymax back to absolute coordinates
# 3: For each batch item, prepend each box's internal index to its coordinates.
y_pred_decoded_raw2 = np.zeros((y_pred_decoded_raw.shape[0], y_pred_decoded_raw.shape[1], y_pred_decoded_raw.shape[2] + 1)) # Expand the last axis by one.
y_pred_decoded_raw2[:,:,1:] = y_pred_decoded_raw
y_pred_decoded_raw2[:,:,0] = np.arange(y_pred_decoded_raw.shape[1]) # Put the box indices as the first element for each box via broadcasting.
y_pred_decoded_raw = y_pred_decoded_raw2
# 4: Apply confidence thresholding and non-maximum suppression per class
n_classes = y_pred_decoded_raw.shape[-1] - 5 # The number of classes is the length of the last axis minus the four box coordinates and minus the index
y_pred_decoded = [] # Store the final predictions in this list
for batch_item in y_pred_decoded_raw: # `batch_item` has shape `[n_boxes, n_classes + 4 coords]`
pred = [] # Store the final predictions for this batch item here
for class_id in range(1, n_classes): # For each class except the background class (which has class ID 0)...
single_class = batch_item[:,[0, class_id + 1, -4, -3, -2, -1]] # ...keep only the confidences for that class, making this an array of shape `[n_boxes, 6]` and...
threshold_met = single_class[single_class[:,1] > confidence_thresh] # ...keep only those boxes with a confidence above the set threshold.
if threshold_met.shape[0] > 0: # If any boxes made the threshold...
maxima = _greedy_nms_debug(threshold_met, iou_threshold=iou_threshold, coords='corners', border_pixels=border_pixels) # ...perform NMS on them.
maxima_output = np.zeros((maxima.shape[0], maxima.shape[1] + 1)) # Expand the last dimension by one element to have room for the class ID. This is now an arrray of shape `[n_boxes, 6]`
maxima_output[:,0] = maxima[:,0] # Write the box index to the first column...
maxima_output[:,1] = class_id # ...and write the class ID to the second column...
maxima_output[:,2:] = maxima[:,1:] # ...and write the rest of the maxima data to the other columns...
pred.append(maxima_output) # ...and append the maxima for this class to the list of maxima for this batch item.
# Once we're through with all classes, keep only the `top_k` maxima with the highest scores
pred = np.concatenate(pred, axis=0)
if pred.shape[0] > top_k: # If we have more than `top_k` results left at this point, otherwise there is nothing to filter,...
top_k_indices = np.argpartition(pred[:,2], kth=pred.shape[0]-top_k, axis=0)[pred.shape[0]-top_k:] # ...get the indices of the `top_k` highest-score maxima...
pred = pred[top_k_indices] # ...and keep only those entries of `pred`...
y_pred_decoded.append(pred) # ...and now that we're done, append the array of final predictions for this batch item to the output list
return y_pred_decoded
def _greedy_nms_debug(predictions, iou_threshold=0.45, coords='corners', border_pixels='half'):
'''
The same greedy non-maximum suppression algorithm as above, but slightly modified for use as an internal
function for per-class NMS in `decode_detections_debug()`. The difference is that it keeps the indices of all
left-over boxes for each batch item, which allows you to know which predictor layer predicted a given output
box and is thus useful for debugging.
'''
boxes_left = np.copy(predictions)
maxima = [] # This is where we store the boxes that make it through the non-maximum suppression
while boxes_left.shape[0] > 0: # While there are still boxes left to compare...
maximum_index = np.argmax(boxes_left[:,1]) # ...get the index of the next box with the highest confidence...
maximum_box = np.copy(boxes_left[maximum_index]) # ...copy that box and...
maxima.append(maximum_box) # ...append it to `maxima` because we'll definitely keep it
boxes_left = np.delete(boxes_left, maximum_index, axis=0) # Now remove the maximum box from `boxes_left`
if boxes_left.shape[0] == 0: break # If there are no boxes left after this step, break. Otherwise...
similarities = iou(boxes_left[:,2:], maximum_box[2:], coords=coords, mode='element-wise', border_pixels=border_pixels) # ...compare (IoU) the other left over boxes to the maximum box...
boxes_left = boxes_left[similarities <= iou_threshold] # ...so that we can remove the ones that overlap too much with the maximum box
return np.array(maxima)
def get_num_boxes_per_pred_layer(predictor_sizes, aspect_ratios, two_boxes_for_ar1):
'''
Returns a list of the number of boxes that each predictor layer predicts.
`aspect_ratios` | |
<filename>Producten/management/commands/verwerk_opdrachten.py
# -*- coding: utf-8 -*-
# Copyright (c) 2020-2021 <NAME>.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
# werk de tussenstand bij voor deelcompetities die niet afgesloten zijn
# zodra er nieuwe ScoreHist records zijn
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db.models import F
from Account.models import Account
from Mailer.models import Inbox, mailer_email_is_valide, mailer_queue_email
from Overig.background_sync import BackgroundSync
from Producten.models import (Product, Opdracht, Levering, BerichtTemplate,
get_path_to_product_bestand)
import django.db.utils
import datetime
import logging
import json
import os
my_logger = logging.getLogger('DOF.Opdrachten')
class Command(BaseCommand):
help = "Verwerk opdrachten (achtergrondtaak)"
def __init__(self, stdout=None, stderr=None, no_color=False, force_color=False):
super().__init__(stdout, stderr, no_color, force_color)
self.stop_at = datetime.datetime.now()
self._verbose = False
self._sync = BackgroundSync(settings.BACKGROUND_SYNC__VERWERK_OPDRACHTEN)
self._count_ping = 0
def add_arguments(self, parser):
parser.add_argument('duration', type=int,
choices={1, 2, 5, 7, 10, 15, 20, 30, 45, 60},
help="Aantal minuten actief blijven")
parser.add_argument('--quick', action='store_true') # for testing
def _maak_opdracht(self, inbox, items, order, template_taal):
try:
email = items['E-mail']
naam = items['Naam']
except KeyError:
if self._verbose:
self.stderr.write('[ERROR] Inbox pk=%s heeft niet alle benodigde items' % inbox.pk)
self.stderr.write('Dit zijn de items:')
for key, value in items.items():
self.stderr.write(' %s / %s' % (key, value))
my_logger.error('Inbox pk=%s heeft niet alle benodigde items' % inbox.pk)
return False # faal
email = email.strip()
if not mailer_email_is_valide(email):
self.stderr.write('[ERROR] Inbox pk=%s heeft geen valide e-mail: %s' % (inbox.pk, repr(email)))
my_logger.error('Inbox pk=%s heeft geen valide e-mail: %s' % (inbox.pk, repr(email)))
return False # faal
try:
opdracht = Opdracht.objects.get(bron=inbox)
except Opdracht.DoesNotExist:
# maak een nieuwe opdracht aan
opdracht = Opdracht()
opdracht.bron = inbox
else:
# hergebruik de opdracht (voorkom duplicates)
opdracht.producten.clear()
opdracht.eigenaar = Account.objects.get(username=settings.DEFAULT_EIGENAAR)
opdracht.to_email = email
opdracht.to_naam = naam
opdracht.regels = "\n".join([regel for _, regel in order])
opdracht.regels += '\n\nGekozen taal voor de levering: %s' % template_taal
opdracht.save()
opdracht.is_vrijgegeven_voor_levering = True
# zoek matchende producten
papieren_product_gevonden = False
prod_links = list()
for taal, regel in order:
# print('taal: %s, regel: %s' % (taal, repr(regel)))
for prod in (Product
.objects
.filter(eigenaar=opdracht.eigenaar,
taal=taal)):
if prod.is_match(regel):
# match!
if prod.papieren_product:
# niet digitaal te leveren
opdracht.producten.add(prod)
papieren_product_gevonden = True
break # from the for
else:
# controleer dat het bestand bestaat, anders niet leveren
fpath, _ = get_path_to_product_bestand(prod)
if os.path.exists(fpath):
opdracht.producten.add(prod)
# levering aanmaken (of hergebruiken)
try:
levering = Levering.objects.get(opdracht=opdracht,
product=prod)
except Levering.DoesNotExist:
levering = Levering(opdracht=opdracht,
product=prod,
eigenaar=opdracht.eigenaar,
to_email=email)
levering.maak_url_code()
levering.download_count = settings.DOWNLOAD_CREDITS
levering.save()
url = settings.SITE_URL + '/code/%s/' % levering.url_code
link = '%s: %s' % (prod.korte_beschrijving, url)
if link not in prod_links:
prod_links.append(link)
else:
self.stderr.write('[ERROR] Kan bestand %s niet vinden' % repr(fpath))
if prod.handmatig_vrijgeven:
opdracht.is_vrijgegeven_voor_levering = False
# for
# for
if len(prod_links) == 0:
opdracht.is_vrijgegeven_voor_levering = False
# te vaak.. my_logger.warning('Opdracht pk=%s niet kunnen koppelen aan een product' % opdracht.pk)
if papieren_product_gevonden:
# we hebben een match gehad op een papieren product
# verder geen matches, dus deze kan helemaal weg
opdracht.is_papieren_levering = True
opdracht.is_afgehandeld = True
opdracht.save()
return True # success
# geen producten kunnen matchen
opdracht.save()
return False # faal
try:
template = (BerichtTemplate
.objects
.get(eigenaar=opdracht.eigenaar,
taal=template_taal))
except BerichtTemplate.DoesNotExist:
# geen template kunnen maken
opdracht.is_vrijgegeven_voor_levering = False
my_logger.error('Geen template voor taal %s en eigenaar %s' % (
repr(template_taal),
opdracht.eigenaar.get_first_name()))
opdracht.save()
return False
if len(prod_links) > 1:
msg = template.plural
else:
msg = template.singular
msg = msg.replace('%NAME%', opdracht.to_naam)
msg = msg.replace('%LINKS%', "\n".join(prod_links))
opdracht.mail_body = msg
opdracht.subject = template.subject
opdracht.save()
# indien automatisch vrijgegeven, verstuur meteen de e-mail
if opdracht.is_vrijgegeven_voor_levering:
mailer_queue_email(opdracht.to_email,
opdracht.subject,
opdracht.mail_body)
# success
return True
def _verwerk_mail_body(self, inbox, body):
# in de body zit een vrij tekstveld waar we niet per ongeluk op willen matchen
# daar achter staat niets nuttigs meer, dus kap daar op af
pos = body.find('Eventuele opmerkingen')
if pos > 0:
body = body[:pos + 21]
pos = body.find('een nieuwe bestelling met ordernummer')
if pos < 0:
my_logger.info('Inbox pk=%s is geen bestelling' % inbox.pk)
return True # niet meer naar kijken
# remove garbage
body = body.replace('\xa0', ' ')
for field in ('Naam', 'E-mail', 'Telefoon', 'Straat', 'Postcode', 'Plaats', 'Land'):
body = body.replace(' %s: ' % field, '\n%s:\n' % field)
body = body.replace(' %s:' % field, '\n%s:' % field)
# for
# in welke taal moeten we de e-mail sturen?
if "Zwischensumme" in body and "Insgesamt" in body:
template_taal = 'DU'
elif "Totaal" in body and "Subtotaal" in body:
template_taal = 'NL'
else:
template_taal = 'EN'
# de body bestaat uit regels met tekst met 'foute' newlines
# opsplitsen en deze newlines dumpen
lines = body.splitlines()
# delete empty lines
lines = [line for line in lines if len(line) > 0]
# in de body vinden we key-value pairs op aparte regels
# de keys eindigen op een dubbele punt
items = dict()
line_nr = 0
while line_nr < len(lines):
line = lines[line_nr]
if line[-1] == ':' and line_nr+1 < len(lines):
key = line[:-1] # verwijder de dubbele punt
value = lines[line_nr + 1]
line_nr += 2
if key in items:
my_logger.warning('Inbox pk=%s geeft onverwacht een dupe item (key=%s)' % (inbox.pk, repr(key)))
else:
items[key] = value
else:
line_nr += 1
# while
# een bestelling staat soms op twee regels
order = list()
# voeg daarom alles weer samen en ga op zoek naar de producten
# elk product eindigt met een ":"<spatie>taal<spatie>
body = " ".join(lines)
for taal_code, taal_label in (('NL', 'Sprache: Nederlands '),
('DU', 'Sprache: Deutsch '),
('EN', 'Sprache: English '),
('NL', 'Taal E-book: Nederlands '),
('DU', 'Taal E-book: Deutsch '),
('EN', 'Taal E-book: English '),
('NL', 'Language: Nederlands '),
('DU', 'Language: Deutsch '),
('EN', 'Language: English '),
):
start = 0
pos = body.find(taal_label, start)
while pos >= 0:
# zoek nu het begin van de regel: <spatie>x<spatie>
sub = body[start:pos + len(taal_label)]
pos2 = sub.rfind(' x ')
if pos2 >= 0:
if pos2 > 3:
pos2 -= 3 # aantal ook mee krijgen
regel = sub[pos2:]
tup = (taal_code, regel)
order.append(tup)
start = pos + 1
pos = body.find(taal_label, start)
# while
# for
# special case: als er geen taal aangegeven is in de engelstalige webshop
if len(order) == 0:
taal_code = 'EN'
taal_label = 'Subtotal (incl. VAT)'
start = 0
pos = body.find(taal_label, start)
while pos >= 0:
# zoek nu het begin van de regel: <spatie>x<spatie>
sub = body[start:pos + len(taal_label)]
pos2 = sub.rfind(' x ')
if pos2 >= 0:
if pos2 > 3:
pos2 -= 3 # aantal ook mee krijgen
regel = sub[pos2:]
tup = (taal_code, regel)
order.append(tup)
start = pos + 1
pos = body.find(taal_label, start)
# while
return self._maak_opdracht(inbox, items, order, template_taal)
def _verwerk_mail_html(self, inbox, html):
html = html.replace('\r\n', ' ')
html = html.replace('\xa0', ' ')
# begin bij een keyword in een heading
pos1 = html.find('aantal')
if pos1 < 0:
# belangrijke info niet kunnen vinden
return False
# pak alles tot het einde van een table body
pos2 = html.find('</tbody>', pos1)
body = html[pos1:pos2]
# doorloop het gekozen deel van de body
lines = list()
while len(body):
if body[0] != '<':
tag_start = body.find('<')
if tag_start < 0:
body = ''
else:
data = body[:tag_start].strip()
if data:
# print('data: %s' % repr(data))
lines.append(data)
body = body[tag_start:]
continue
tag_end = body.find('>')
# tag = body[:tag_end+1]
body = body[tag_end + 1:]
# while
# print('lines: %s' % repr(lines))
# zoek de key-value pairs
items = dict()
eerste_nr = len(lines)
for nr in range(len(lines) - 1):
line = lines[nr]
if line[-1] == ':':
eerste_nr = min(eerste_nr, nr)
key = line[:-1] # verwijder de dubbele punt
value = lines[nr + 1]
if key in items:
my_logger.warning('Inbox pk=%s geeft onverwacht een dupe item (key=%s)' % (inbox.pk, repr(key)))
else:
items[key] = value
# for
# print('items: %s' % repr(items))
# kap de items weg van de regels waarin de producten staan
# print('eerste_nr: %s' % eerste_nr)
if eerste_nr < 1:
return False
lines = lines[:eerste_nr - 1]
# print('lines: %s' % repr(lines))
prods = list()
prod = None
for nr in range(len(lines)):
if lines[nr] in ('1 x', '2 x', '3 x', '4 x'):
# begin van een nieuw product
prod = list()
prods.append(prod)
if | |
modulestore().get_item(
BlockUsageLocator(course_locator, 'chapter', block_id='chapter3')
)
assert ch3.has_children_at_depth(0)
assert not ch3.has_children_at_depth(1)
@ddt.ddt
class SplitModuleCourseTests(SplitModuleTest):
'''
Course CRUD operation tests
'''
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_get_courses(self, _from_json):
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT)
# should have gotten 3 draft courses
assert len(courses) == 3, 'Wrong number of courses'
# check metadata -- NOTE no promised order
course = self.findByIdInResult(courses, "head12345")
assert course.location.org == 'testx'
assert course.category == 'course', 'wrong category'
assert len(course.tabs) == 6, 'wrong number of tabs'
assert course.display_name == 'The Ancient Greek Hero', 'wrong display name'
assert course.advertised_start == 'Fall 2013', 'advertised_start'
assert len(course.children) == 4, 'children'
# check dates and graders--forces loading of descriptor
assert course.edited_by == TEST_ASSISTANT_USER_ID
self.assertDictEqual(course.grade_cutoffs, {"Pass": 0.45})
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_get_courses_with_same_course_index(self, _from_json):
"""
Test that if two courses point to same course index,
`get_courses` should return both courses.
"""
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT)
# Should have gotten 3 draft courses.
assert len(courses) == 3
course_index = modulestore().get_course_index_info(courses[0].id)
# Creating a new course with same course index of another course.
new_draft_course = modulestore().create_course(
'testX', 'rerun_2.0', 'run_q2', 1, BRANCH_NAME_DRAFT, versions_dict=course_index['versions']
)
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT)
# Should have gotten 4 draft courses.
assert len(courses) == 4
assert new_draft_course.id.version_agnostic() in [c.id for c in courses]
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_get_org_courses(self, _from_json):
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT, org='guestx')
# should have gotten 1 draft courses
assert len(courses) == 1
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT, org='testx')
# should have gotten 2 draft courses
assert len(courses) == 2
# although this is already covered in other tests, let's
# also not pass in org= parameter to make sure we get back
# 3 courses
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT)
assert len(courses) == 3
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_branch_requests(self, _from_json):
# query w/ branch qualifier (both draft and published)
def _verify_published_course(courses_published):
""" Helper function for verifying published course. """
assert len(courses_published) == 1, len(courses_published)
course = self.findByIdInResult(courses_published, "head23456")
assert course is not None, 'published courses'
assert course.location.course_key.org == 'testx'
assert course.location.course_key.course == 'wonderful'
assert course.category == 'course', 'wrong category'
assert len(course.tabs) == 4, 'wrong number of tabs'
assert course.display_name == 'The most wonderful course', course.display_name
assert course.advertised_start is None
assert len(course.children) == 0, 'children'
_verify_published_course(modulestore().get_courses(branch=BRANCH_NAME_PUBLISHED))
def test_has_course(self):
'''
Test the various calling forms for has_course
'''
check_has_course_method(
modulestore(),
CourseLocator(org='testx', course='wonderful', run="run", branch=BRANCH_NAME_DRAFT),
locator_key_fields=['org', 'course', 'run']
)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_get_course(self, _from_json):
'''
Test the various calling forms for get_course
'''
locator = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
head_course = modulestore().get_course(locator)
assert head_course.location.version_guid != head_course.previous_version
locator = CourseLocator(version_guid=head_course.previous_version)
course = modulestore().get_course(locator)
assert course.location.course_key.org is None
assert course.location.version_guid == head_course.previous_version
assert course.category == 'course'
assert len(course.tabs) == 6
assert course.display_name == 'The Ancient Greek Hero'
assert course.graceperiod == datetime.timedelta(hours=2)
assert course.advertised_start is None
assert len(course.children) == 0
assert course.definition_locator.definition_id != head_course.definition_locator.definition_id
# check dates and graders--forces loading of descriptor
assert course.edited_by == TEST_ASSISTANT_USER_ID
self.assertDictEqual(course.grade_cutoffs, {"Pass": 0.55})
locator = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
course = modulestore().get_course(locator)
assert course.location.course_key.org == 'testx'
assert course.location.course_key.course == 'GreekHero'
assert course.location.course_key.run == 'run'
assert course.category == 'course'
assert len(course.tabs) == 6
assert course.display_name == 'The Ancient Greek Hero'
assert course.advertised_start == 'Fall 2013'
assert len(course.children) == 4
# check dates and graders--forces loading of descriptor
assert course.edited_by == TEST_ASSISTANT_USER_ID
self.assertDictEqual(course.grade_cutoffs, {"Pass": 0.45})
locator = CourseLocator(org='testx', course='wonderful', run="run", branch=BRANCH_NAME_PUBLISHED)
course = modulestore().get_course(locator)
published_version = course.location.version_guid
locator = CourseLocator(org='testx', course='wonderful', run="run", branch=BRANCH_NAME_DRAFT)
course = modulestore().get_course(locator)
assert course.location.version_guid != published_version
def test_get_course_negative(self):
# Now negative testing
with pytest.raises(InsufficientSpecificationError):
modulestore().get_course(CourseLocator(org='edu', course='meh', run='blah'))
with pytest.raises(ItemNotFoundError):
modulestore().get_course(CourseLocator(org='edu', course='nosuchthing', run="run", branch=BRANCH_NAME_DRAFT)) # lint-amnesty, pylint: disable=line-too-long
with pytest.raises(ItemNotFoundError):
modulestore().get_course(CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_PUBLISHED)) # lint-amnesty, pylint: disable=line-too-long
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_cache(self, _from_json):
"""
Test that the mechanics of caching work.
"""
locator = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
course = modulestore().get_course(locator)
block_map = modulestore().cache_items(
course.system, [BlockKey.from_usage_key(child) for child in course.children], course.id, depth=3
)
assert BlockKey('chapter', 'chapter1') in block_map
assert BlockKey('problem', 'problem3_2') in block_map
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_persist_dag(self, _from_json):
"""
try saving temporary xblocks
"""
test_course = modulestore().create_course(
course='course', run='2014', org='testx',
display_name='fun test course', user_id=TEST_OTHER_USER_ID,
master_branch=ModuleStoreEnum.BranchName.draft
)
test_chapter = modulestore().create_xblock(
test_course.system, test_course.id, 'chapter', fields={'display_name': 'chapter n'},
parent_xblock=test_course
)
assert test_chapter.display_name == 'chapter n'
test_def_content = '<problem>boo</problem>'
# create child
new_block = modulestore().create_xblock(
test_course.system, test_course.id,
'problem',
fields={
'data': test_def_content,
'display_name': 'problem'
},
parent_xblock=test_chapter
)
assert new_block.definition_locator is not None
assert isinstance(new_block.definition_locator.definition_id, LocalId)
# better to pass in persisted parent over the subdag so
# subdag gets the parent pointer (otherwise 2 ops, persist dag, update parent children,
# persist parent
persisted_course = modulestore().persist_xblock_dag(test_course, TEST_OTHER_USER_ID)
assert len(persisted_course.children) == 1
persisted_chapter = persisted_course.get_children()[0]
assert persisted_chapter.category == 'chapter'
assert persisted_chapter.display_name == 'chapter n'
assert len(persisted_chapter.children) == 1
persisted_problem = persisted_chapter.get_children()[0]
assert persisted_problem.category == 'problem'
assert persisted_problem.data == test_def_content
# update it
persisted_problem.display_name = 'altered problem'
persisted_problem = modulestore().update_item(persisted_problem, TEST_OTHER_USER_ID)
assert persisted_problem.display_name == 'altered problem'
@ddt.data(
("course-v1:edx+test_course+test_run", BlockUsageLocator),
("ccx-v1:edX+test_course+test_run+ccx@1", CCXBlockUsageLocator),
)
@ddt.unpack
def test_make_course_usage_key(self, course_id, root_block_cls):
"""Test that we get back the appropriate usage key for the root of a course key.
In particular, we want to make sure that it properly handles CCX courses.
"""
course_key = CourseKey.from_string(course_id)
root_block_key = modulestore().make_course_usage_key(course_key)
assert isinstance(root_block_key, root_block_cls)
assert root_block_key.block_type == 'course'
assert root_block_key.block_id == 'course'
class TestCourseStructureCache(CacheIsolationMixin, SplitModuleTest):
"""Tests for the CourseStructureCache"""
# CacheIsolationMixin will reset the cache between test cases
# We'll use the "default" cache as a valid cache, and the "course_structure_cache" as a dummy cache
ENABLED_CACHES = ["default"]
def setUp(self):
# make a new course:
self.user = random.getrandbits(32)
self.new_course = modulestore().create_course(
'org', 'course', 'test_run', self.user, BRANCH_NAME_DRAFT,
)
super().setUp()
@patch('xmodule.modulestore.split_mongo.mongo_connection.get_cache')
def test_course_structure_cache(self, mock_get_cache):
# force get_cache to return the default cache so we can test
# its caching behavior
enabled_cache = caches['default']
mock_get_cache.return_value = enabled_cache
with check_mongo_calls(1):
not_cached_structure = self._get_structure(self.new_course)
# when cache is warmed, we should have one fewer mongo call
with check_mongo_calls(0):
cached_structure = self._get_structure(self.new_course)
# now make sure that you get the same structure
assert cached_structure == not_cached_structure
# If data is corrupted, get it from mongo again.
cache_key = self.new_course.id.version_guid
enabled_cache.set(cache_key, b"bad_data")
with check_mongo_calls(1):
not_corrupt_structure = self._get_structure(self.new_course)
# now make sure that you get the same structure
assert not_corrupt_structure == not_cached_structure
@patch('xmodule.modulestore.split_mongo.mongo_connection.get_cache')
def test_course_structure_cache_no_cache_configured(self, mock_get_cache):
mock_get_cache.side_effect = InvalidCacheBackendError
with check_mongo_calls(1):
not_cached_structure = self._get_structure(self.new_course)
# if the cache isn't configured, we expect to have to make
# another mongo call here if we want the same course structure
with check_mongo_calls(1):
cached_structure = self._get_structure(self.new_course)
# now make sure that you get the same structure
assert cached_structure == not_cached_structure
def test_dummy_cache(self):
with check_mongo_calls(1):
not_cached_structure = self._get_structure(self.new_course)
# Since the test is using the dummy cache, it's not actually caching
# anything
with check_mongo_calls(1):
cached_structure = self._get_structure(self.new_course)
# now make sure that you get the same structure
assert cached_structure == not_cached_structure
def _get_structure(self, course):
"""
Helper function to get a structure from a course.
"""
return modulestore().db_connection.get_structure(
course.location.as_object_id(course.location.version_guid)
)
class SplitModuleItemTests(SplitModuleTest):
'''
Item read tests including inheritance
'''
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_has_item(self, _from_json):
'''
has_item(BlockUsageLocator)
'''
org = 'testx'
course = 'GreekHero'
run = 'run'
course_locator = CourseLocator(org=org, course=course, run=run, branch=BRANCH_NAME_DRAFT)
course = modulestore().get_course(course_locator)
previous_version = course.previous_version
# positive tests of various forms
locator = course.location.map_into_course(CourseLocator(version_guid=previous_version))
assert modulestore().has_item(locator), ("couldn't find in %s" % previous_version)
locator = course.location.version_agnostic()
assert modulestore().has_item(locator)
assert not modulestore()\
.has_item(BlockUsageLocator(locator.course_key.for_branch(BRANCH_NAME_PUBLISHED),
block_type=locator.block_type,
block_id=locator.block_id)), 'found in published head'
# not a course obj
locator = BlockUsageLocator(course_locator, block_type='chapter', block_id='chapter1')
assert modulestore().has_item(locator), "couldn't find chapter1"
# in published course
locator = BlockUsageLocator(
CourseLocator(org="testx", course="wonderful", run="run", branch=BRANCH_NAME_DRAFT),
block_type="course",
block_id="head23456"
)
assert modulestore().has_item(locator.for_branch(BRANCH_NAME_PUBLISHED))
def test_negative_has_item(self):
# negative tests--not found
# no such course or block
locator = BlockUsageLocator(
CourseLocator(org="foo", course="doesnotexist", run="run", branch=BRANCH_NAME_DRAFT),
block_type="course",
block_id="head23456"
)
assert not modulestore().has_item(locator)
locator = BlockUsageLocator(
CourseLocator(org="testx", course="wonderful", run="run", branch=BRANCH_NAME_DRAFT),
block_type="vertical",
block_id="doesnotexist"
)
assert not modulestore().has_item(locator)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_get_item(self, _from_json):
'''
get_item(blocklocator)
'''
hero_locator = CourseLocator(org="testx", course="GreekHero", run="run", branch=BRANCH_NAME_DRAFT)
course = modulestore().get_course(hero_locator)
previous_version = course.previous_version
# positive tests of various forms
locator = course.location.map_into_course(CourseLocator(version_guid=previous_version))
block = modulestore().get_item(locator)
assert isinstance(block, CourseBlock)
assert isinstance(modulestore().get_item(locator), CourseBlock)
def verify_greek_hero(block):
"""
Check contents of block
"""
assert block.location.org == 'testx'
assert block.location.course == 'GreekHero'
assert block.location.run == 'run'
assert len(block.tabs) == 6, 'wrong number of tabs'
assert | |
to lsit resources of.
embed (list): ?
Returns:
dict: server_response
"""
api_id = AWS.APIGateway.GetId(apig,api_name)
return apig.client.get_resources(restApiId=api_id,embed=embed)['items']
def Create(apig,api_name: str,name: str,parent_name='/'):
"""Creates a resource for an API.
Args:
apig (APIGateway): Instantiated APIGateway credential access object.
api_name (str): Name of API to create resource for.
name (str): Name of the resource to create.
parent_name (str): Name of a parent resource to nest this as child.
Returns:
dict: resposne from server.
TODO:
Return rest api id from list so dont have to call list twice (its called in resource list too)
What about CORs?
"""
#get rest api id from name
api_id = AWS.APIGateway.GetId(apig,api_name)
#get resource list to get parent_id from parent_name
resource_list = AWS.APIGateway.Resource.List(apig,api_name)
#TODO: check if resource exists already? wont make duplicates?
resource_id = ''
for resource in resource_list:
if resource['path'] == (parent_name + name):
return resource #return existing resource
if parent_name != '/':
if resource['pathPart'] == parent_name:
resource_id = resource['id']
else:
if resource['path'] == parent_name:
resource_id = resource['id']
return apig.client.create_resource(restApiId=api_id,parentId=resource_id,pathPart=name)
def Delete(apig,api_name: str,resource_name: str):
"""Deletes a resource for an API.
Args:
apig (APIGateway): Instantiated APIGateway credential access object.
api_name (str): Name of specified API.
resource_name (str): Name of specified resouce to delete.
Returns:
dict: response from server.
TODO:
Return rest api id from list so dont have to call list twice (its called in resource list too)
"""
#get rest api list to get api_id from name
api_id = AWS.APIGateway.GetId(apig,api_name)
resource_list = AWS.APIGateway.Resource.List(apig,api_name)
resource_id = ''
for resource in resource_list:
if resource['pathPart'] == resource_name:
resource_id = resource['id']
return apig.client.delete_resource(restApiId=api_id,resourceId=resource_id)
class Method:
"""Sub class for managing Methods for REST Api Resources.
"""
def Get(apig,api_name: str,resource_name: str,http_method: str,parent_name='/'):
"""Gets a Resource Method.
Args:
apig (APIGateway): Instantiated APIGateway credential access object.
api_name (str): Name of API.
resource_name (str): Name of the resource.
http_method (str): 'GET'|'POST'|'PUT'|'PATCH'|'DELETE'|'HEAD'|'OPTIONS'|'ANY'
parent_name (str): Name of the parent resource.
Returns:
dict: method.
"""
api_id = AWS.APIGateway.GetId(apig,api_name)
resource_list = AWS.APIGateway.Resource.List(apig,api_name)
resource_id = ''
for resource in resource_list:
try:
if resource['pathPart'] == resource_name:
resource_id = resource['id']
except:
#not all resources have a path part (like / root)
a = 5
response = apig.client.get_method(
restApiId=api_id,
resourceId=resource_id,
httpMethod=http_method)
return response
def Create(apig,api_name: str,resource_name: str,http_method: str,parent_name='/',key_req=True,authorizationtype='NONE',req_model={'application/json':'Empty'}):
"""Creates a Resource Method.
Args:
apig (APIGateway): Instantiated APIGateway credential access object.
api_name (str): Name of the API.
resource_name (str): Name of the resource.
http_method (str): 'GET'|'POST'|'PUT'|'PATCH'|'DELETE'|'HEAD'|'OPTIONS'|'ANY'
key_req (bool): True to require an API key to access.
authorizationtype (str): 'NONE'.
req_model (dict): {'application/json':'Empty'}
Returns:
dict: server reponse
"""
api_id = AWS.APIGateway.GetId(apig,api_name)
try:
method = AWS.APIGateway.Method.Get(apig,api_name,resource_name,http_method,parent_name)
if method['ResponseMetadata']['HTTPStatusCode'] == 200:
return method
except:
a = 5
resource_list = AWS.APIGateway.Resource.List(apig,api_name)
resource_id = ''
for resource in resource_list:
try:
if resource['pathPart'] == resource_name:
resource_id = resource['id']
except:
a = 5
#resp_model = {'200':{'responseModels':{'application/json':'Empty'},'statusCode':'200'}}
#authorizerId='string',
#operationName='string',
#requestParameters={'string': True|False},
#requestValidatorId='string',
#authorizationScopes=['string']
return apig.client.put_method(
restApiId=api_id,
resourceId=resource_id,
httpMethod=http_method,
authorizationType=authorizationtype,
apiKeyRequired=key_req,
requestModels=req_model)
def Add_Integration(apig,api_name: str,resource_id: str,http_method: str,lambda_arn: str,integration_type='AWS',enable_CORs=True):
"""Adds an Lambda Function Integration to a Resource Method.
Args:
apig (APIGateway): Instantiated APIGateway credential access object.
api_name (str): API specified by name.
resource_id (str): Resource Id.
http_method (str): 'GET'|'POST'|'PUT'|'PATCH'|'DELETE'|'HEAD'|'OPTIONS'|'ANY'
lambda_arn (str): ARN of the lambda function
integration_type (str): 'HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY'
enable_CORs (bool): Default True enables CORs so lambda functions may be invoked AND x-api-key headers accepted for auth
Returns:
dict: server respnse
Ref:
http://boto3.readthedocs.io/en/latest/reference/services/apigateway.html#APIGateway.Client.put_integration
https://github.com/boto/boto3/issues/572
https://stackoverflow.com/questions/38052953/automating-cors-using-boto3-for-aws-api-gateway
Notes:
Enabling CORs is required to use API for invoking lambda function.
"""
#get the id of the api by name
api_id = AWS.APIGateway.GetId(apig,api_name)
#get the version to use for the method integration
#version = apig.client.meta.service_model.api_version
version = '2015-03-31' #latest 2015-07-09 failed to properly invoke lambda
#remove the latest alias funciton tag
#TODO: why? - didnt work otherwise
lambda_arn = lambda_arn.replace(':$LATEST','')
#build the lambda uri
uri = 'arn:aws:apigateway:' + apig.region + ':lambda:path/' + version + '/functions/' + lambda_arn + '/invocations'
#uri arn:aws:apigateway:$REGION:lambda:path/2015-03-31/functions/arn:aws:lambda:$REGION:$ACCOUNT:function:LambdaFunctionOverHttps/invocations
if enable_CORs:
#add integration
add_response = apig.client.put_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod=http_method,
integrationHttpMethod='POST',#http_method, #must change to POST as this is how lambda functions are invoked?
uri=uri,
type=integration_type)
#add the method response
method_response = apig.client.put_method_response(
restApiId=api_id,
resourceId=resource_id,
httpMethod=http_method,
statusCode='200',
responseParameters={
'method.response.header.Access-Control-Allow-Origin': False
},
responseModels={
'application/json': 'Empty'
})
#add the integration response
integration_response = apig.client.put_integration_response(
restApiId=api_id,
resourceId=resource_id,
httpMethod=http_method,
statusCode='200',
responseParameters={
'method.response.header.Access-Control-Allow-Origin': '\'*\''
},
responseTemplates={
'application/json': ''
}
)
#add an OPTION method
option_response = apig.client.put_method(
restApiId=api_id,
resourceId=resource_id,
httpMethod='OPTIONS',
authorizationType='NONE'
)
# Set the put integration of the OPTIONS method
opt_int_response = apig.client.put_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod='OPTIONS',
type='MOCK',
requestTemplates={
'application/json': '{"statusCode": 200}'
}
)
# Set the put method response of the OPTIONS method
opt_resp_response = apig.client.put_method_response(
restApiId=api_id,
resourceId=resource_id,
httpMethod='OPTIONS',
statusCode='200',
responseParameters={
'method.response.header.Access-Control-Allow-Headers': False,
'method.response.header.Access-Control-Allow-Origin': False,
'method.response.header.Access-Control-Allow-Methods': False
},
responseModels={
'application/json': 'Empty'
}
)
# Set the put integration response of the OPTIONS method
opt_int_resp_response = apig.client.put_integration_response(
restApiId=api_id,
resourceId=resource_id,
httpMethod='OPTIONS',
statusCode='200',
responseParameters={
'method.response.header.Access-Control-Allow-Headers': '\'Content-Type,X-Amz-Date,Authorization,X-Api-Key,x-api-key,X-Amz-Security-Token\'',
'method.response.header.Access-Control-Allow-Methods': '\'' + http_method + ',OPTIONS\'',
'method.response.header.Access-Control-Allow-Origin': '\'*\''
},
responseTemplates={
'application/json': ''
}
)
else:
add_response = apig.client.put_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod=http_method,
integrationHttpMethod=http_method,
uri=uri,
type=integration_type)
resp_response = apig.client.put_integration_response(
restApiId=api_id,
resourceId=resource_id,
httpMethod=http_method,
statusCode='200',
selectionPattern=''
)
# create POST method response
try:
method_response = apig.client.put_method_response(
restApiId=api_id,
resourceId=resource_id,
httpMethod=http_method,
statusCode='200',
responseModels={
'application/json': 'Empty' #TODO: make like in console
})
except:
a = 5
#TODO: update because http_method could change?
return add_response
class UsagePlan:
"""Sub class for managing a UsagePlan
TODO:
Remove_Stage
update, with remaining options like quota/throttle etc
"""
def List(apig):
"""List all usage plans.
Args:
apig (APIGateway): Instantiated APIGateway credential access object.
Returns:
list: List of usage plans.
"""
return apig.client.get_usage_plans()['items']
def GetId(apig,usageplan_name: str):
"""Get Id of a usage plan specified by name.
Args:
apig (APIGateway): Instantiated APIGateway credential access object.
usageplan_name (str): Name of specified usageplan.
Returns:
str: id
"""
usageplan_list = AWS.APIGateway.UsagePlan.List(apig)
usageplan_id = ''
for usp in usageplan_list:
if usp['name'] == usageplan_name:
usageplan_id = usp['id']
return usageplan_id
def List_Keys(apig,usageplan_name: str):
"""List all usage plan keys.
Args:
apig (APIGateway): Instantiated APIGateway credential access object.
usageplan_name (str): Name of specified usageplan.
Returns:
dict: server_response
"""
usage_plan_id = AWS.APIGateway.UsagePlan.GetId(apig,usageplan_name)
return apig.client.get_usage_plan_keys(usagePlanId=usage_plan_id)
def Usage(apig,usageplan_name: str,start: str,end: str):
"""Gets the usage data of a usage plan in a specified time interval.
Args:
apig (APIGateway): Instantiated APIGateway credential access object.
usageplan_name (str): Name of specified usageplan.
start (str): '2016-01-01'
end (str): '2016-12-31'
Returns:
list: usage
Notes:
Only past 90 days.
"""
usageplan_id = AWS.APIGateway.UsagePlan.GetId(apig,usageplan_name)
return apig.client.get_usage(usagePlanId=usageplan_id,startDate=start,endDate=end)['items']
def Create(apig,usage_name: str,purpose: str,overwrite=False,throttle_rate=20.0,throttle_burst=50,quota_limit=1000,quota_period='DAY',quota_offset=0):
"""Create a Usage Plan with throttle and quota limits.
Args:
apig (APIGateway): Instantiated APIGateway credential access object.
usage_name (str):
purpose (str):
overwrite (bool): True if to overwrite if usage plan already exists.
throttle_rate (float): The steady-state rate limit.
throttle_burst (int): The maximum rate limit over a time ranging from one to a few seconds.
quota_limit (int): The maximum number of requests that can be made in a given time period.
quota_period (str): 'DAY'|'WEEK'|'MONTH'; The time period in which the limit applies.
quota_offset (int): The number of requests subtracted from the given limit in the initial time period.
Returns:
dict: server response to creation OR None if already an active usage plan.
Note:
If overwrite is True, will overwrite an existing usage plan. If not, then will return the information of that usage plan.
"""
#check if usage plan exists
usage_plan_list = AWS.APIGateway.UsagePlan.List(apig)
active_usages = [x for x in usage_plan_list if x['name'] == usage_name]
if len(active_usages) <= 0: #doesnt exist yet
throttle = {
'burstLimit': throttle_burst,
'rateLimit': throttle_rate
}
quota = {
'limit': quota_limit,
'offset': quota_offset,
'period': quota_period
}
return apig.client.create_usage_plan(name=usage_name,description=purpose,throttle=throttle,quota=quota)
else:
if overwrite:
return AWS.APIGateway.UsagePlan.Update(apig,active_usages[0]['id'],purpose,throttle_rate,throttle_burst,quota_limit,quota_period,quota_offset)
else:
return active_usages[0]
def Update(apig,usageplan_id: str,purpose: str,throttle_rate: float,throttle_burst: int,quota_limit: int,quota_period: str,quota_offset: int):
"""Update an existing usage plan.
Args:
apig (APIGateway): Instantiated APIGateway credential access object.
usage_name (str):
purpose (str):
throttle_rate (float): The steady-state rate limit.
throttle_burst (int): The maximum rate limit over a time ranging from one to a few seconds.
quota_limit (int): The maximum number of requests that can be made in a given time period.
quota_period (str): 'DAY'|'WEEK'|'MONTH'; The time period in which the limit applies.
quota_offset (int): The number of requests subtracted from the given limit in the initial time period.
Returns:
dict: response from server.
Ref:
https://docs.aws.amazon.com/apigateway/api-reference/link-relation/usageplan-update/
"""
response = apig.client.update_usage_plan(
usagePlanId=usageplan_id,
patchOperations=[
{
'op': 'replace',
'path': '/description',
'value': purpose,
},
{
'op': 'replace',
'path': '/throttle/burstLimit',
'value': str(throttle_burst),
},
{
'op': 'replace',
'path': '/throttle/rateLimit',
'value': str(throttle_rate),
},
{
'op': 'replace',
'path': '/quota/limit',
'value': str(quota_limit),
},
{
'op': 'replace',
'path': '/quota/offset',
'value': str(quota_offset),
},
{
'op': 'replace',
'path': '/quota/period',
'value': str(quota_period),
},
]
)
return response
def Delete(apig,usageplan_name: str):
"""Delete a Usage Plan.
Args:
apig (APIGateway): Instantiated APIGateway credential access object.
usageplan_name (str): Specified usageplan by name.
Returns:
dict: server response.
"""
usageplan_id = AWS.APIGateway.UsagePlan.GetId(apig,usageplan_name)
return apig.client.delete_usage_plan(usagePlanId=usageplan_id)
def Add_Key(apig,usageplan_id: str,key_id: str,key_type='API_KEY'):
"""Add an existing API Key to a Usage Plan.
Args:
apig (APIGateway): Instantiated APIGateway credential access object.
usageplan_id (str): Id of specified usageplan.
key_id (str): Id of specified key.
key_type (str): 'API_KEY' | ??
Returns:
dict: response from server.
"""
try:
return apig.client.create_usage_plan_key(usagePlanId=usageplan_id,keyId=key_id,keyType=key_type)
except Exception as ex:
#TODO: try to add the key,
#can cause exception is key is already subscribed to a/this usage plan
if ex.response['Error']['Code'] == 'ConflictException':
keys = [x for x in apig.client.get_usage_plan_keys(usagePlanId=usageplan_id)['items'] if x['id'] == key_id]
if len(keys) <= 0:
raise ex #unknown conflict?
return keys[0] #this returns the key dict, different than response?
else:
raise ex
a = 5
def Remove_Key(apig,usageplan_id: str,key_id: str):
"""Remove an existing API Key from a Usage Plan.
Args:
apig (APIGateway): Instantiated APIGateway credential access object.
usageplan_id (str): Id of specified usageplan.
key_id (stR): Id of specified key.
Returns:
dict: response from server.
"""
return apig.client.delete_usage_plan_key(usagePlanId=usageplan_id,keyId=key_id)
def Add_Stage(apig,usageplan_id: str,rest_api_id:str,stage_name:str):
"""Add a usageplan to a deployment.
Args:
usageplan_id (str): Id of specified usageplan.
rest_api_id (str): Id of specified API.
stage_name (str): Name of the deployment stage.
Returns:
dict: response
Ref:
https://github.com/boto/boto3/issues/825#issuecomment-251234288
https://stackoverflow.com/questions/39523225/update-aws-lambda-api-key-usage-plans-with-boto3
https://docs.aws.amazon.com/apigateway/api-reference/link-relation/usageplan-update/
"""
#TODO: first check if usage plan already has this stage in it?
all_plans = AWS.APIGateway.UsagePlan.List(apig)
this_plan = [x for x in all_plans if x['id'] == usageplan_id][0] #index will fail if plan not already created
stages_in_this_plan = [x for x in this_plan['apiStages'] if x['stage'] == stage_name]
if len(stages_in_this_plan) <= 0:
return apig.client.update_usage_plan(
usagePlanId=usageplan_id,
patchOperations=[
{
'op': 'add',#|'remove'|'replace'|'move'|'copy'|'test',
'path': '/apiStages',
'value': rest_api_id + ':' + stage_name
}
])
return this_plan
class Key:
"""Rest API Authorization Keys.
"""
def List(apig):
"""List all API Keys.
Args:
apig (APIGateway): Instantiated APIGateway credential access object.
Returns:
list: keys.
"""
return apig.client.get_api_keys()['items']
def Get_Key(apig,key_id: str,include_value=False):
"""Get the API key specified, will return value if to include_value.
Args:
apig (APIGateway): Instantiated APIGateway credential access object.
key_id (str): Key Id.
include_value (bool): True to include the value in the response from server.
Returns:
dict: response from server.
"""
return apig.client.get_api_key(apiKey=key_id,includeValue=include_value)
def Create(apig,key_name: str,purpose: str,enabled=True,value='',generate_distict_id=True):
"""Create an API key.
Args:
apig (APIGateway): Instantiated APIGateway credential access object.
key_name (str): Name of the key.
purpose (str): Why the key was needed.
enabled (bool): True if can be used by callers.
value (str): Specified value of the key. TODO: if empty, auto gens?
generate_distinct_id (bool): True to make the key identifier distinct from the created api key value.
Returns:
dict: server response OR already created api key.
"""
api_key_list = AWS.APIGateway.Key.List(apig)
active_api_keys = [x for x in | |
don't really care to have the labels separated from the data
#This is difficult to test separately without a lot of setup but was shown to be accurate
#Checking for multiple items was the most difficult part to do properly but this expression is some kind of generator that will break as soon as a match is found "not any(x in ["MEGA BUY", "BUYBUYBUY", "Good"] for x in worthit_list)"
for salvage_rarity,droprate_x in droprate_dict.items():
itemValues_dct,itemSum_val = compute_result(droprate_x,multiplier_dct,True)
methodprofit=round(itemSum_val - salvageCost_dct[salvage_rarity]-itemCost_dct[itemName_str][buysell],4)
print(formatline.format(*[salvage_rarity,round(methodprofit,4),round(itemSum_val,4)]+[itemValues_dct[x] for x in orderedkeys]))
"%d%%"%(100*(methodprofit/(salvageCost_dct[salvage_rarity]+itemCost_dct[itemName_str][buysell])))
if (methodprofit >= 100):
worthit_list = [itemName_str, "Check Kit", methodprofit, "%d%%"%(100*(methodprofit/(salvageCost_dct[salvage_rarity]+itemCost_dct[itemName_str][buysell]))), "MEGA BUY"]
elif (methodprofit >=50) and ("MEGA BUY" not in worthit_list):
worthit_list = [itemName_str, salvage_rarity, methodprofit, "%d%%"%(100*(methodprofit/(salvageCost_dct[salvage_rarity]+itemCost_dct[itemName_str][buysell]))), "BUYBUYBUY"]
elif (methodprofit >=20) and not any(x in ["MEGA BUY", "BUYBUYBUY"] for x in worthit_list):
worthit_list = [itemName_str, salvage_rarity, methodprofit, "%d%%"%(100*(methodprofit/(salvageCost_dct[salvage_rarity]+itemCost_dct[itemName_str][buysell]))), "Good"]
elif (methodprofit >=7) and not any(x in ["MEGA BUY", "BUYBUYBUY", "Good"] for x in worthit_list):
worthit_list = [itemName_str, salvage_rarity, methodprofit, "%d%%"%(100*(methodprofit/(salvageCost_dct[salvage_rarity]+itemCost_dct[itemName_str][buysell]))), "Consider"]
return worthit_list
#End of salvagePrint function
"""************************************
************ DROP RATES ************
************************************"""
"""New case needs the following information:
droprate dictionary
material IDs added to allAPI list
material IDs added to sort_allAPI function
variable to allAPI output if needed
salvagePrint function call
"""
"""
Drop rates: Metals
"""
""" T1 """
#Brittle Clump of Ore
droprate_BrittleClumpofOre={}
#All Peureki
droprate_BrittleClumpofOre['Copper']={'Copper Ore':1.896}
droprate_BrittleClumpofOre['Runecrafter']={'Copper Ore':1.86}
droprate_BrittleClumpofOre['Rare']={'Copper Ore':1.888}
#Bit of Metal Scrap
droprate_BitofMetalScrap = {}
#All Peureki
droprate_BitofMetalScrap['Copper']={'Copper Ore':1.796}
droprate_BitofMetalScrap['Runecrafter']={'Copper Ore':1.884}
droprate_BitofMetalScrap['Rare']={'Copper Ore':1.856}
""" T2 """
#Weak Clump of Ore
droprate_WeakClumpofOre = {}
#Peu
droprate_WeakClumpofOre['Copper']={'Copper Ore':0.37,'Silver Ore':0.65,'Iron Ore':0.81}
droprate_WeakClumpofOre['Runecrafter']={'Copper Ore':0.25,'Silver Ore':0.78,'Iron Ore':0.75}
droprate_WeakClumpofOre['Rare']={'Copper Ore':0.43,'Silver Ore':0.81,'Iron Ore':0.77}
#Pile of Metal Scrap
droprate_PileofMetalScrap = {}
#Peu
droprate_PileofMetalScrap['Copper']={'Copper Ore':0.608,'Silver Ore':0.748,'Iron Ore':0.504}
droprate_PileofMetalScrap['Runecrafter']={'Copper Ore':0.484,'Silver Ore':0.712,'Iron Ore':0.66}
droprate_PileofMetalScrap['Rare']={'Copper Ore':0.408,'Silver Ore':0.632,'Iron Ore':0.812}
""" T3 """
#Pile of Clump of Ore
droprate_ClumpofOre = {}
#Peu
droprate_ClumpofOre['Copper']={'Silver Ore':0.24,'Iron Ore':0.916,'Gold Ore':0.604}
droprate_ClumpofOre['Runecrafter']={'Silver Ore':0.148,'Iron Ore':1.008,'Gold Ore':0.728}
droprate_ClumpofOre['Rare']={'Silver Ore':0.2,'Iron Ore':0.924,'Gold Ore':0.792}
#Jagged Metal Scrap
droprate_JaggedMetalScrap = {}
#Peu
droprate_JaggedMetalScrap['Copper']={'Silver Ore':0.228,'Iron Ore':0.836,'Gold Ore':0.752}
droprate_JaggedMetalScrap['Runecrafter']={'Silver Ore':0.176,'Iron Ore':0.924,'Gold Ore':0.752}
droprate_JaggedMetalScrap['Rare']={'Silver Ore':0.212,'Iron Ore':1.012,'Gold Ore':0.704}
""" T4 """
#Laden Clump of Ore
droprate_LadenClumpofOre = {}
#Peu
droprate_LadenClumpofOre['Copper']={'Iron Ore':0.224,'Gold Ore':0.176,'Platinum Ore':1.484}
droprate_LadenClumpofOre['Runecrafter']={'Iron Ore':0.204,'Gold Ore':0.212,'Platinum Ore':1.436}
droprate_LadenClumpofOre['Rare']={'Iron Ore':0.22,'Gold Ore':0.16,'Platinum Ore':1.424}
#Metal Scrap
droprate_MetalScrap = {}
#Peu
droprate_MetalScrap['Copper']={'Iron Ore':0.212,'Gold Ore':0.276,'Platinum Ore':1.3}
droprate_MetalScrap['Runecrafter']={'Iron Ore':0.176,'Gold Ore':0.164,'Platinum Ore':1.476}
droprate_MetalScrap['Rare']={'Iron Ore':0.184,'Gold Ore':0.136,'Platinum Ore':1.488}
""" T5 """
#Loaded Clump of Ore
droprate_LoadedClumpofOre = {}
#Peu
droprate_LoadedClumpofOre['Copper']={'Platinum Ore':0.524,'Mithril Ore':1.088}
droprate_LoadedClumpofOre['Runecrafter']={'Platinum Ore':0.456,'Mithril Ore':1.312}
droprate_LoadedClumpofOre['Rare']={'Platinum Ore':0.392,'Mithril Ore':1.32}
#Salvageable Metal Scrap
droprate_SalvageableMetalScrap = {}
#Peu
droprate_SalvageableMetalScrap['Copper']={'Platinum Ore':0.53,'Mithril Ore':1.07}
droprate_SalvageableMetalScrap['Runecrafter']={'Platinum Ore':0.51,'Mithril Ore':1.1}
droprate_SalvageableMetalScrap['Rare']={'Platinum Ore':0.39,'Mithril Ore':1.32}
""" T6 """
#Rich Clump of Ore
droprate_RichClumpofOre = {}
#Peu
droprate_RichClumpofOre['Copper']={'Mithril Ore':1.172,'Orichalcum Ore':0.244}
droprate_RichClumpofOre['Runecrafter']={'Mithril Ore':1.472,'Orichalcum Ore':0.192}
droprate_RichClumpofOre['Rare']={'Mithril Ore':1.24,'Orichalcum Ore':0.212}
#Valuable Metal Scrap
droprate_ValuableMetalScrap = {}
#Peu
droprate_ValuableMetalScrap['Copper']={'Mithril Ore':1.216,'Orichalcum Ore':0.196}
droprate_ValuableMetalScrap['Runecrafter']={'Mithril Ore':1.276,'Orichalcum Ore':0.2}
droprate_ValuableMetalScrap['Rare']={'Mithril Ore':1.468,'Orichalcum Ore':0.204}
""" All Tiers """
#Unstable Metal Chunk
droprate_UnstableMetalChunk = {}
#Me
droprate_UnstableMetalChunk['Copper']={'Copper Ore':0.2035,'Iron Ore':0.9506,'Platinum Ore':0.5039,'Mithril Ore':0.1453,'Orichalcum Ore':0.2946}
droprate_UnstableMetalChunk['Runecrafter']={'Copper Ore':0.1531,'Iron Ore':0.911,'Platinum Ore':0.9593,'Mithril Ore':0.1966,'Orichalcum Ore':0.3427}
#Peu
droprate_UnstableMetalChunk['Rare']={'Copper Ore':0.136,'Iron Ore':1.004,'Platinum Ore':0.523,'Mithril Ore':0.151,'Orichalcum Ore':0.31}
"""
Drop rates: Leathers
"""
""" T1 """
#Tattered Hide
droprate_TatteredHide = {}
#Peureki
droprate_TatteredHide['Copper'] = {'Rawhide Leather Section':1.84}
droprate_TatteredHide['Runecrafter'] = {'Rawhide Leather Section':1.79}
droprate_TatteredHide['Rare'] = {'Rawhide Leather Section':1.87}
#Rawhide Leather Strap
droprate_RawhideLeatherStrap = {}
#Peureki
droprate_RawhideLeatherStrap['Copper'] = {'Rawhide Leather Section':1.788}
droprate_RawhideLeatherStrap['Runecrafter'] = {'Rawhide Leather Section':1.848}
droprate_RawhideLeatherStrap['Rare'] = {'Rawhide Leather Section':1.9}
#Tattered Pelt
droprate_TatteredPelt = {}
#Peureki
droprate_TatteredPelt['Copper'] = {'Rawhide Leather Section':1.9}
droprate_TatteredPelt['Runecrafter'] = {'Rawhide Leather Section':1.92}
droprate_TatteredPelt['Rare'] = {'Rawhide Leather Section':1.87}
""" T2 """
#Ripped Hide
droprate_RippedHide = {}
#Peureki
droprate_RippedHide['Copper'] = {'Rawhide Leather Section':0.46,'Thin Leather Section':1.33}
droprate_RippedHide['Runecrafter'] = {'Rawhide Leather Section':0.35,'Thin Leather Section':1.48}
droprate_RippedHide['Rare'] = {'Rawhide Leather Section':0.35,'Thin Leather Section':1.57}
#Thin Leather Strap
droprate_ThinLeatherStrap = {}
#Peureki
droprate_ThinLeatherStrap['Copper'] = {'Rawhide Leather Section':0.55,'Thin Leather Section':1.29}
droprate_ThinLeatherStrap['Runecrafter'] = {'Rawhide Leather Section':0.41,'Thin Leather Section':1.38}
droprate_ThinLeatherStrap['Rare'] = {'Rawhide Leather Section':0.35,'Thin Leather Section':1.59}
#Ripped Pelt
droprate_RippedPelt = {}
#Peureki
droprate_RippedPelt['Copper'] = {'Rawhide Leather Section':0.58,'Thin Leather Section':1.18}
droprate_RippedPelt['Runecrafter'] = {'Rawhide Leather Section':0.45,'Thin Leather Section':1.44}
droprate_RippedPelt['Rare'] = {'Rawhide Leather Section':0.35,'Thin Leather Section':1.56}
""" T3 """
#Torn Hide
droprate_TornHide = {}
#Peureki
droprate_TornHide['Copper'] = {'Thin Leather Section':0.48,'Coarse Leather Section':1.41}
droprate_TornHide['Runecrafter'] = {'Thin Leather Section':0.26,'Coarse Leather Section':1.6}
droprate_TornHide['Rare'] = {'Thin Leather Section':0.32,'Coarse Leather Section':1.6}
#Coarse Leather Strap
droprate_CoarseLeatherStrap = {}
#Peureki
droprate_CoarseLeatherStrap['Copper'] = {'Thin Leather Section':0.422,'Coarse Leather Section':1.38}
droprate_CoarseLeatherStrap['Runecrafter'] = {'Thin Leather Section':0.348,'Coarse Leather Section':1.44}
droprate_CoarseLeatherStrap['Rare'] = {'Thin Leather Section':0.456,'Coarse Leather Section':1.42}
#Torn Pelt
droprate_TornPelt = {}
#Peureki
droprate_TornPelt['Copper'] = {'Thin Leather Section':0.38,'Coarse Leather Section':1.48}
droprate_TornPelt['Runecrafter'] = {'Thin Leather Section':0.26,'Coarse Leather Section':1.6}
droprate_TornPelt['Rare'] = {'Thin Leather Section':0.32,'Coarse Leather Section':1.6}
""" T4 """
#Frayed Hide
droprate_FrayedHide={}
#Peu
droprate_FrayedHide['Copper']={'Coarse Leather Section':0.57,'Rugged Leather Section':1.16}
#mine
droprate_FrayedHide['Runecrafter']={'Coarse Leather Section':0.4167,'Rugged Leather Section':1.4132}
droprate_FrayedHide['Rare']={'Coarse Leather Section':0.3641,'Rugged Leather Section':1.5538}
#Thick Leather Strap
droprate_ThickLeatherStrap = {}
#Peureki
droprate_ThickLeatherStrap['Copper'] = {'Coarse Leather Section':0.52,'Rugged Leather Section':1.24}
droprate_ThickLeatherStrap['Runecrafter'] = {'Coarse Leather Section':0.29,'Rugged Leather Section':1.64}
droprate_ThickLeatherStrap['Rare'] = {'Coarse Leather Section':0.3,'Rugged Leather Section':1.53}
#Frayed Pelt
droprate_FrayedPelt = {}
#Peureki
droprate_FrayedPelt['Copper'] = {'Coarse Leather Section':0.52,'Rugged Leather Section':1.22}
droprate_FrayedPelt['Runecrafter'] = {'Coarse Leather Section':0.36,'Rugged Leather Section':1.4}
droprate_FrayedPelt['Rare'] = {'Coarse Leather Section':0.3,'Rugged Leather Section':1.62}
""" T5 """
#Filthy Hide
droprate_FilthyHIde = {}
#Peureki
droprate_FilthyHIde['Copper'] = {'Rugged Leather Section':1.36,'Thick Leather Section':0.4}
droprate_FilthyHIde['Runecrafter'] = {'Rugged Leather Section':0.7,'Thick Leather Section':0.96}
droprate_FilthyHIde['Rare'] = {'Rugged Leather Section':0.78,'Thick Leather Section':1.08}
#Rugged Leather Strap
droprate_RuggedLeatherStrap = {}
#Peureki
droprate_RuggedLeatherStrap['Copper'] = {'Rugged Leather Section':1.12,'Thick Leather Section':0.62}
droprate_RuggedLeatherStrap['Runecrafter'] = {'Rugged Leather Section':1.02,'Thick Leather Section':0.77}
droprate_RuggedLeatherStrap['Rare'] = {'Rugged Leather Section':0.83,'Thick Leather Section':0.9}
#Filthy Pelt
droprate_FilthyPelt = {}
#Peureki
droprate_FilthyPelt['Copper'] = {'Rugged Leather Section':1.28,'Thick Leather Section':0.48}
droprate_FilthyPelt['Runecrafter'] = {'Rugged Leather Section':1.24,'Thick Leather Section':0.58}
droprate_FilthyPelt['Rare'] = {'Rugged Leather Section':0.98,'Thick Leather Section':0.84}
""" T6 """
#Salvageable Hide
droprate_SalvageableHide = {}
#Peureki
droprate_SalvageableHide['Copper'] = {'Thick Leather Section':1.316,'Hardened Leather Section':0.064}
droprate_SalvageableHide['Runecrafter'] = {'Thick Leather Section':1.3,'Hardened Leather Section':0.076}
droprate_SalvageableHide['Rare'] = {'Thick Leather Section':1.236,'Hardened Leather Section':0.1}
#Hard Leather Strap
droprate_HardLeatherStrap={}
#Mine
droprate_HardLeatherStrap['Copper'] = {'Thick Leather Section':1.2844,'Hardened Leather Section':0.0791}
droprate_HardLeatherStrap['Runecrafter'] = {'Thick Leather Section':1.3045,'Hardened Leather Section':0.0813}
droprate_HardLeatherStrap['Rare'] = {'Thick Leather Section':1.2588,'Hardened Leather Section':0.0975}
#Salvageable Pelt
droprate_SalvageablePelt = {}
#Peureki
droprate_SalvageablePelt['Copper'] = {'Thick Leather Section':1.24,'Hardened Leather Section':0.100}
droprate_SalvageablePelt['Runecrafter'] = {'Thick Leather Section':1.21,'Hardened Leather Section':0.11}
droprate_SalvageablePelt['Rare'] = {'Thick Leather Section':1.22,'Hardened Leather Section':0.11}
""" All Tiers """
#Unstable Hide
droprate_UnstableHide = {}
#My data
droprate_UnstableHide['Copper'] = {'Rawhide Leather Section':0.1822,'Thin Leather Section':0.4846,'Coarse Leather Section':0.4884,'Rugged Leather Section':0.4612,'Thick Leather Section':0.1537,'Hardened Leather Section':0.3004}
droprate_UnstableHide['Runecrafter'] = {'Rawhide Leather Section':0.1746,'Thin Leather Section':0.4780,'Coarse Leather Section':0.4793,'Rugged Leather Section':0.4920,'Thick Leather Section':0.1646,'Hardened Leather Section':0.3170}
droprate_UnstableHide['Rare'] = {'Rawhide Leather Section':0.1747,'Thin Leather Section':0.4603,'Coarse Leather Section':0.4833,'Rugged Leather Section':0.5240,'Thick Leather Section':0.1606,'Hardened Leather Section':0.3366}
#Bloodstone-Warped Hide
droprate_BloodstoneWarpedHide={}
#my data only
droprate_BloodstoneWarpedHide['Copper'] = {'Rawhide Leather Section':0.0462,'Thin Leather Section':0.0533,'Coarse Leather Section':0.0445,'Rugged Leather Section':0.0467,'Thick Leather Section':0.4533,'Hardened Leather Section':0.4714}
droprate_BloodstoneWarpedHide['Runecrafter'] = {'Rawhide Leather Section':0.0483,'Thin Leather Section':0.0463,'Coarse Leather Section':0.0461,'Rugged Leather Section':0.0468,'Thick Leather Section':0.4820,'Hardened Leather Section':0.5337}
droprate_BloodstoneWarpedHide['Rare'] = {'Rawhide Leather Section':0.0534,'Thin Leather Section':0.0647,'Coarse Leather Section':0.0605,'Rugged Leather Section':0.0578,'Thick Leather Section':0.4863,'Hardened Leather Section':0.5581}
"""
Drop rates: Cloth
"""
""" T1 """
#Shredded Garment
droprate_ShreddedGarment = {}
#Peureki
droprate_ShreddedGarment['Copper']={'Jute Scrap':1.884}
droprate_ShreddedGarment['Runecrafter']={'Jute Scrap':1.836}
droprate_ShreddedGarment['Rare']={'Jute Scrap':2.016}
#Half-Eaten Mass
droprate_HalfEatenMass = {}
#Peureki
droprate_HalfEatenMass['Copper']={'Jute Scrap':1.73}
droprate_HalfEatenMass['Runecrafter']={'Jute Scrap':1.74}
droprate_HalfEatenMass['Rare']={'Jute Scrap':1.89}
#Shredded Rag
droprate_ShreddedRag = {}
#Peureki
droprate_ShreddedRag['Copper']={'Jute Scrap':1.784}
droprate_ShreddedRag['Runecrafter']={'Jute Scrap':1.844}
droprate_ShreddedRag['Rare']={'Jute Scrap':1.852}
""" T2 """
#Worn Garment
droprate_WornGarment = {}
#me
droprate_WornGarment['Copper']={'Jute Scrap':0.3560,'Wool Scrap':1.4320}
droprate_WornGarment['Runecrafter']={'Jute Scrap':0.4232,'Wool Scrap':1.4232}
droprate_WornGarment['Rare']={'Jute Scrap':0.3938,'Wool Scrap':1.4831}
#Decaying
droprate_DecayingMass = {}
#Peureki
droprate_DecayingMass['Copper']={'Jute Scrap':0.4,'Wool Scrap':1.42}
droprate_WornGarment['Runecrafter']={'Jute Scrap':0.68,'Wool Scrap':1.24}
droprate_WornGarment['Rare']={'Jute Scrap':0.38,'Wool Scrap':1.44}
#Worn Rag
droprate_WornRag = {}
#Me
droprate_WornRag['Copper']={'Jute Scrap':0.4772,'Wool Scrap':1.3423}
droprate_WornRag['Runecrafter']={'Jute Scrap':0.4283,'Wool Scrap':1.3811}
droprate_WornRag['Rare']={'Jute Scrap':0.3742,'Wool Scrap':1.5470}
""" T3 """
#Ragged Garment
droprate_RaggedGarment = {}
#Peu
droprate_RaggedGarment['Copper']={'Wool Scrap':00.492,'Cotton Scrap':1.372}
droprate_RaggedGarment['Runecrafter']={'Wool Scrap':00.416,'Cotton Scrap':1.424}
droprate_RaggedGarment['Rare']={'Wool Scrap':00.34,'Cotton Scrap':1.522}
#Fetid Mass
droprate_FetidMass = {}
#Peu
droprate_FetidMass['Copper']={'Wool Scrap':00.28,'Cotton Scrap':1.44}
droprate_FetidMass['Runecrafter']={'Wool Scrap':00.46,'Cotton Scrap':1.4}
droprate_FetidMass['Rare']={'Wool Scrap':00.26,'Cotton Scrap':1.54}
#Soiled Rag
droprate_SoiledRag = {}
#Peu
droprate_SoiledRag['Copper']={'Wool Scrap':00.36,'Cotton Scrap':1.54}
droprate_SoiledRag['Runecrafter']={'Wool Scrap':00.34,'Cotton Scrap':1.45}
droprate_SoiledRag['Rare']={'Wool Scrap':00.34,'Cotton Scrap':1.38}
""" T4 """
#Frayed Garment
droprate_FrayedGarment = {}
#wiki
droprate_FrayedGarment['Copper']={'Cotton Scrap':00.55,'Linen Scrap':1.25}
#Peu
droprate_FrayedGarment['Runecrafter']={'Cotton Scrap':00.484,'Linen Scrap':1.4}
droprate_FrayedGarment['Rare']={'Cotton Scrap':00.432,'Linen Scrap':0.976}
#Malodorous Mass
droprate_MalodorousMass = {}
#Peu
droprate_MalodorousMass['Copper']={'Cotton Scrap':00.43,'Linen Scrap':1.36}
droprate_MalodorousMass['Runecrafter']={'Cotton Scrap':00.45,'Linen Scrap':1.5}
droprate_MalodorousMass['Rare']={'Cotton Scrap':00.37,'Linen Scrap':1.46}
#Frayed Rag
droprate_FrayedRag = {}
#Peu
droprate_FrayedRag['Copper']={'Cotton Scrap':00.488,'Linen Scrap':1.308}
droprate_FrayedRag['Runecrafter']={'Cotton Scrap':00.424,'Linen Scrap':1.484}
droprate_FrayedRag['Rare']={'Cotton Scrap':00.324,'Linen Scrap':1.556}
""" T5 """
#Torn Garment
droprate_TornGarment = {}
#Peu
droprate_TornGarment['Copper']={'Linen Scrap':00.428,'Silk Scrap':1.4}
droprate_TornGarment['Runecrafter']={'Linen Scrap':00.436,'Silk Scrap':1.356}
droprate_TornGarment['Rare']={'Linen Scrap':00.448,'Silk Scrap':1.46}
#Half-Digested Mass
droprate_HalfDigestedMass = {}
#Peu
droprate_HalfDigestedMass['Copper']={'Linen Scrap':00.32,'Silk Scrap':1.42}
droprate_HalfDigestedMass['Runecrafter']={'Linen Scrap':00.53,'Silk Scrap':1.27}
droprate_HalfDigestedMass['Rare']={'Linen Scrap':00.35,'Silk Scrap':1.51}
#Torn Rag
droprate_TornRag = {}
#Peu
droprate_TornRag['Copper']={'Linen Scrap':00.35,'Silk Scrap':1.47}
droprate_TornRag['Runecrafter']={'Linen Scrap':00.43,'Silk Scrap':1.36}
#wiki
droprate_TornRag['Rare']={'Linen Scrap':00.324,'Silk Scrap':1.596}
""" T6 """
#Discarded Garment
droprate_DiscardedGarment = {}
#Peu
droprate_DiscardedGarment['Copper']={'Silk Scrap':1.31,'Gossamer Scrap':00.098}
droprate_DiscardedGarment['Runecrafter']={'Silk Scrap':1.366,'Gossamer Scrap':00.081}
droprate_DiscardedGarment['Rare']={'Silk Scrap':1.296,'Gossamer Scrap':00.121}
#Regurgitated Mass
droprate_RegurgitatedMass = {}
#Peu
droprate_RegurgitatedMass['Copper']={'Silk Scrap':1.61,'Gossamer Scrap':00.1}
droprate_RegurgitatedMass['Runecrafter']={'Silk Scrap':1.5,'Gossamer Scrap':00.13}
droprate_RegurgitatedMass['Rare']={'Silk Scrap':1.49,'Gossamer Scrap':00.08}
#Rag
droprate_Rag = {}
#Peu
droprate_Rag['Copper']={'Silk Scrap':1.596,'Gossamer Scrap':00.076}
droprate_Rag['Runecrafter']={'Silk Scrap':1.53,'Gossamer Scrap':00.124}
droprate_Rag['Rare']={'Silk Scrap':1.55,'Gossamer Scrap':00.104}
""" Additional Garments """
#Garment 28
droprate_Garment28 = {}
#No data anywhere. Placehoder for completeness
droprate_Garment28['Copper']={'Linen Scrap':00.00,'Silk Scrap':00.00}
droprate_Garment28['Runecrafter']={'Linen Scrap':00.00,'Silk Scrap':00.00}
droprate_Garment28['Rare']={'Linen Scrap':00.00,'Silk Scrap':00.00}
#Garment 32
droprate_Garment32 = {}
#No data anywhere. Placehoder for completeness
droprate_Garment32['Copper']={'Linen Scrap':00.00,'Silk Scrap':00.00}
droprate_Garment32['Runecrafter']={'Linen Scrap':00.00,'Silk Scrap':00.00}
droprate_Garment32['Rare']={'Linen Scrap':00.00,'Silk Scrap':00.00}
""" All Tiers """
#Unstable Cloth
droprate_UnstableRag = {}
#Peu
droprate_UnstableRag['Copper']={'Jute Scrap':0.1855,'Wool Scrap':0.5135,'Cotton Scrap':0.4850,'Linen Scrap':0.5166,'Silk Scrap':0.1855,'Gossamer Scrap':0.1917}
droprate_UnstableRag['Runecrafter']={'Jute Scrap':0.1746,'Wool Scrap':0.5373,'Cotton Scrap':0.5317,'Linen Scrap':0.4857,'Silk Scrap':0.1833,'Gossamer Scrap':0.1825}
droprate_UnstableRag['Rare']={'Jute Scrap':0.1604,'Wool Scrap':0.5076,'Cotton Scrap':0.5761,'Linen Scrap':0.4855,'Silk Scrap':0.2109,'Gossamer Scrap':0.1680}
"""
Drop rates: Wood
"""
#Yes, there's only 1
droprate_ReclaimedWoodChunk={}
#Wiki
droprate_ReclaimedWoodChunk['Copper']={'Green Wood Log':0.102,'Soft Wood Log':0.4703,'Seasoned Wood Log':0.504,'Hard Wood Log':0.5206,'Elder Wood Log':0.163,'Ancient Wood Log':0.277}
#Peu
droprate_ReclaimedWoodChunk['Runecrafter']={'Green Wood Log':0.109,'Soft Wood Log':0.523,'Seasoned Wood Log':0.546,'Hard Wood Log':0.436,'Elder Wood Log':0.178,'Ancient Wood Log':0.344}
droprate_ReclaimedWoodChunk['Rare']={'Green Wood Log':0.12,'Soft Wood Log':0.459,'Seasoned Wood Log':0.511,'Hard Wood Log':0.469,'Elder Wood Log':0.149,'Ancient Wood Log':0.331}
"""
Helper stuff
"""
#All relevant IDs
allIDs = [79423,#Wood salvage
21690,21678,21691,21679,21692,21680,21693,21681,21694,21682,21695,21683,79079,#Metal salvage
21661,21684,21653,21664,21685,21654,21667,21686,21655,21668,21687,21656,21670,21688,21657,22331,21689,21658,79213,80681,#Leather salvage
21669,22325,21659,21671,22326,21660,21672,22327,21662,21673,22328,21663,21674,22329,21665,21675,22330,21666,79138,#Cloth salvage
21676,21677,#The random other Rags
19723,19726,19727,19724,19722,19725,#raw wood
19710,19713,19714,19711,19709,19712,#refined wood
19697,19703,19699,19698,19702,19700,19701,#raw metal
19680,19679,19687,19683,19688,19682,19686,19681,19684,19685,#refined metal
19718,19739,19741,19743,19748,19745,#raw cloth
19720,19740,19742,19744,19747,19746,#refined cloth
19719,19728,19730,19731,19729,19732,#raw leather
19738,19733,19734,19736,19735,19737]#refined leather
#Salvage options
#salvageOptions 'Mystic':10.5, 'Copper':5 , 'Runecrafter':30, 'Silver':60
salvageCost = {'Copper':5 , 'Runecrafter':30, 'Rare':60}
#Containers
#defaulting to main ingots for refined to avoid problems. generate_multiplier will change as needed
unrefined_to_refined = {'Hardened Leather Section':'Cured Hardened Leather Square','Thick Leather Section':'Cured Thick Leather Square','Rugged Leather Section':'Cured Rugged Leather Square','Coarse Leather Section':'Cured Coarse Leather Square','Thin Leather Section':'Cured Thin Leather Square','Rawhide Leather Section':'Stretched Rawhide Leather Square',
'Copper Ore':'Copper Ingot','Silver Ore':'Silver Ingot','Iron Ore':'Iron Ingot','Gold Ore':'Gold Ingot','Platinum Ore':'Platinum Ingot','Mithril Ore':'Mithril Ingot','Orichalcum Ore':'Orichalcum Ingot',
'Jute Scrap':'Bolt of Jute','Wool Scrap':'Bolt of Wool','Cotton Scrap':'Bolt of Cotton','Linen Scrap':'Bolt of Linen','Silk Scrap':'Bolt of Silk','Gossamer Scrap':'Bolt of Gossamer',
'Green Wood Log':'Green Wood Plank','Soft Wood Log':'Soft Wood Plank','Seasoned Wood Log':'Seasoned Wood Plank','Hard Wood Log':'Hard Wood Plank','Elder Wood Log':'Elder Wood Plank','Ancient Wood Log':'Ancient Wood Plank'}
refined_scalar = {'Stretched Rawhide Leather Square':2,'Cured Thin Leather Square':2,'Cured Coarse Leather Square':2,'Cured Rugged Leather Square':2,'Cured Thick Leather Square':4,'Cured Hardened Leather Square':3,
'Copper Ingot':2,'Bronze Ingot':2,'Silver Ingot':2,'Iron Ingot':3,'Steel Ingot':3,'Gold Ingot':2,'Platinum Ingot':2,'Darksteel Ingot':2,'Mithril Ingot':2,'Orichalcum Ingot':2,
'Bolt of Jute':2,'Bolt of Wool':2,'Bolt of Cotton':2,'Bolt | |
1. `unit_id` - (randomization) unit id
1. `agg_type` - level of aggregation
1. `goal` - goal name
1. any number of dimensional columns, e.g. column `product` containing values `p_1`
1. `count` - number of observed goals
1. `sum_value` - value of observed goals
Returns:
set of dataframes with evaluation
Usage:
```python
from epstats.toolkit import Experiment, Metric, SrmCheck
experiment = Experiment(
'test-real-valued',
'a',
[Metric(
2,
'Average Bookings',
'value(test_unit_type.unit.conversion)',
'count(test_unit_type.unit.exposure)')
],
[],
unit_type='test_unit_type')
# This gets testing data, use other Dao or get aggregated goals in some other way.
from epstats.toolkit.testing import TestData
goals = TestData.load_goals_by_unit(experiment.id)
# evaluate experiment
ev = experiment.evaluate_by_unit(goals)
# work with results
print(ev.exposures)
print(ev.metrics[ev.metrics == 1])
print(ev.checks[ev.checks == 1])
# this is to assert that this code sample works correctly
from epstats.toolkit.testing import TestDao
assert_experiment(experiment, ev, TestDao(TestData()))
```
Input data frame example:
```
exp_id exp_variant_id unit_type unit_id agg_type goal product count sum_value
test-srm a test_unit_type test_unit_type_1 unit exposure 1 1
test-srm a test_unit_type test_unit_type_1 unit conversion product_1 2 75
test-srm b test_unit_type test_unit_type_2 unit exposure 1 1
test-srm b test_unit_type test_unit_type_3 unit exposure 1 1
test-srm b test_unit_type test_unit_type_3 unit conversion product_2 1 1
```
"""
g = self._fix_missing_by_unit(goals)
# We need to pivot table to get all goals per `unit_id` on the same row in the data frame.
# This is needed to be able to vector-evaluate compound metrics
# eg. `value(test_unit_type.unit.conversion) - value(test_unit_type.unit.refund)`
g = (
pd.pivot_table(
g,
values=["count", "sum_value"],
index=[
"exp_id",
"exp_variant_id",
"unit_type",
"agg_type",
"unit_id",
]
+ self.get_dimension_columns(),
columns="goal",
aggfunc=np.sum,
fill_value=0,
)
.swaplevel(axis=1)
.reset_index()
)
return self._evaluate(
g,
Experiment._metrics_column_fce_by_unit,
Experiment._checks_fce_by_unit,
Experiment._exposures_fce_by_unit,
)
def get_goals(self) -> List[EpGoal]:
"""
List of all goals needed to evaluate all metrics and checks in the experiment.
Returns:
list of parsed structured goals
"""
res = set()
for m in self.metrics:
res = res.union(m.get_goals())
for c in self.checks:
res = res.union(c.get_goals())
res = res.union(self._exposure_goals)
return list(res)
@staticmethod
def _metrics_column_fce_agg(m: Metric, goals: pd.DataFrame):
"""
Gets count, sum_value, sum_sqr_value columns by expression from already aggregated goals.
"""
return m.get_evaluate_columns_agg(goals)
@staticmethod
def _metrics_column_fce_by_unit(m: Metric, goals: pd.DataFrame):
"""
Gets count, sum_value, sum_sqr_value columns by expression from goals grouped by `unit_id`.
"""
return m.get_evaluate_columns_by_unit(goals)
@staticmethod
def _checks_fce_agg(c: Check, goals: pd.DataFrame, control_variant: str):
"""
Evaluates checks from already aggregated goals.
"""
return c.evaluate_agg(goals, control_variant)
@staticmethod
def _checks_fce_by_unit(c: Check, goals: pd.DataFrame, control_variant: str):
"""
Evaluates checks from goals grouped by `unit_id`.
"""
return c.evaluate_by_unit(goals, control_variant)
@staticmethod
def _exposures_fce_agg(goals: pd.DataFrame, exp_id: str, unit_type: str):
"""
Evaluates checks from already aggregated goals.
"""
df = (
goals[(goals["unit_type"] == unit_type) & (goals["agg_type"] == "global") & (goals["goal"] == "exposure")]
.groupby("exp_variant_id")
.agg(exposures=("count", "sum"))
.reset_index()
)
df["exp_id"] = exp_id
return df
@staticmethod
def _exposures_fce_by_unit(goals: pd.DataFrame, exp_id: str, unit_type: str):
"""
Evaluates checks from already aggregated goals.
"""
df = goals[(goals["unit_type"] == unit_type) & (goals["agg_type"] == "unit")][
[("exp_variant_id", ""), ("exposure", "count")]
]
df = df.droplevel(0, axis=1)
df.columns = ["exp_variant_id", "exposures"]
d = df.groupby("exp_variant_id").agg(exposures=("exposures", "sum")).reset_index()
d["exp_id"] = exp_id
return d
def _evaluate(self, goals: pd.DataFrame, metrics_column_fce, checks_fce, exposures_fce):
metrics = self._evaluate_metrics(goals, metrics_column_fce)
checks = self._evaluate_checks(goals, checks_fce)
exposures = self._evaluate_exposures(goals, exposures_fce)
return Evaluation(metrics, checks, exposures)
def _evaluate_exposures(self, goals: pd.DataFrame, exposures_fce) -> pd.DataFrame:
return exposures_fce(goals, self.id, self.unit_type)
def _evaluate_checks(self, goals: pd.DataFrame, check_fce) -> pd.DataFrame:
res = []
for c in self.checks:
try:
r = check_fce(c, goals, self.control_variant)
r["exp_id"] = self.id
res.append(r)
except Exception as e:
self._logger.warning(f"Cannot evaluate check [{c.id} in experiment [{self.id}] because of {e}")
self.statsd.incr("errors.check")
c = pd.concat(res, axis=1) if res != [] else pd.DataFrame([], columns=Evaluation.check_columns())
c["timestamp"] = round(get_utc_timestamp(datetime.now()).timestamp())
return c[Evaluation.check_columns()]
def get_dimension_columns(self) -> List[str]:
"""
Returns a list of all dimensions used in all metrics in the experiment.
"""
return list({d for g in self.get_goals() for d in g.dimension_to_value.keys()})
def _set_variants(self, goals):
# what variants and goals there should be from all the goals needed to evaluate all metrics
self.variants = (
self.variants
if self.variants is not None
else np.unique(np.append(goals["exp_variant_id"], self.control_variant))
)
def _fix_missing_agg(self, goals: pd.DataFrame) -> pd.DataFrame:
"""
Adds zero values for missing goals and variants that are needed for metric evaluation.
Does that in the best effort - fills in `count`, `sum_sqr_count`, `sum_value`, `sum_sqr_value` and `count_unique` with zeros.
"""
# what variants and goals there should be from all the goals needed to evaluate all metrics
self._set_variants(goals)
g = goals[goals.exp_variant_id.isin(self.variants)]
nvs = self.variants
ngs = self.get_goals()
# variants * goals is the number of variant x goals combinations we expect in the data
lnvs = len(nvs)
lngs = len(ngs)
# create zero data frame for all variants and goals
empty_df = pd.DataFrame(
{
"exp_id": self.id,
"exp_variant_id": np.tile(nvs, lngs),
"unit_type": np.repeat([g.unit_type for g in ngs], lnvs),
"agg_type": np.repeat([g.agg_type for g in ngs], lnvs),
"goal": np.repeat([g.goal for g in ngs], lnvs),
"count": 0,
"sum_sqr_count": 0,
"sum_value": 0,
"sum_sqr_value": 0,
"count_unique": 0,
}
)
for dimension in self.get_dimension_columns():
empty_df[dimension] = np.repeat([g.dimension_to_value.get(dimension, "") for g in ngs], lnvs)
# join to existing data and use zeros for only missing variants and goals
m = (
pd.concat([g, empty_df], axis=0)
.fillna({d: "" for d in self.get_dimension_columns()})
.groupby(
[
"exp_id",
"exp_variant_id",
"unit_type",
"agg_type",
"goal",
]
+ self.get_dimension_columns(),
# dropna=False,
)
.sum()
.reset_index()
)
return m
def _fix_missing_by_unit(self, goals: pd.DataFrame) -> pd.DataFrame:
"""
Adds zero values for missing goals and variants that are needed for metric evaluation.
Does that in the best effort - fills in `count`, `sum_value` with zeros.
"""
# what variants and goals there should be from all the goals needed to evaluate all metrics
self._set_variants(goals)
g = goals[goals.exp_variant_id.isin(self.variants)]
nvs = self.variants
ngs = self.get_goals()
# variants * goals is the number of variant x goals combinations we expect in the data
lnvs = len(nvs)
lngs = len(ngs)
# create zero data frame for all variants and goals
empty_df = pd.DataFrame(
{
"exp_id": self.id,
"exp_variant_id": np.tile(nvs, lngs),
"unit_type": np.repeat([g.unit_type for g in ngs], lnvs),
"agg_type": np.repeat([g.agg_type for g in ngs], lnvs),
"goal": np.repeat([g.goal for g in ngs], lnvs),
"unit_id": np.nan,
"count": 0,
"sum_value": 0,
}
)
for dimension in self.get_dimension_columns():
empty_df[dimension] = np.repeat([g.dimension_to_value.get(dimension, "") for g in ngs], lnvs)
# join to existing data and use zeros for only missing variants and goals
m = pd.concat([g, empty_df], axis=0).fillna({d: "" for d in self.get_dimension_columns()})
return m[
[
"exp_id",
"exp_variant_id",
"unit_type",
"agg_type",
"goal",
"unit_id",
"count",
"sum_value",
]
+ self.get_dimension_columns()
]
def _evaluate_metrics(self, goals: pd.DataFrame, column_fce) -> pd.DataFrame:
if not self.metrics:
return pd.DataFrame([], columns=Evaluation.metric_columns())
sts = []
for m in self.metrics:
count, sum_value, sum_sqr_value = column_fce(m, goals)
sts.append([count, sum_value, sum_sqr_value])
stats = np.array(sts).transpose(0, 2, 1)
metrics = stats.shape[0]
variants = stats.shape[1]
count = stats[:, :, 0]
sum_value = stats[:, :, 1]
sum_sqr_value = stats[:, :, 2]
with np.errstate(divide="ignore", invalid="ignore"):
# We fill in zeros, when goal data are missing for some variant.
# There could be division by zero here which is expected as we return
# nan or inf values to the caller.
mean = sum_value / count
std = np.sqrt((sum_sqr_value - sum_value * sum_value / count) / (count - 1))
# sequential testing correction
if self.date_from is not None and self.date_to is not None:
# Parameters
test_length = (self.date_to - self.date_from).days + 1 # test length in days
actual_day = (self.date_for - self.date_from).days + 1 # day(s) since beginning of the test
actual_day = min(actual_day, test_length) # actual day of evaluation must be in interval [1, test_length]
# confidence level adjustment - applied when actual_day < test_length (test is still running)
confidence_level = Statistics.obf_alpha_spending_function(self.confidence_level, test_length, actual_day)
else:
confidence_level = self.confidence_level # no change
stats = np.dstack((count, mean, std, sum_value, np.ones(count.shape) * confidence_level))
stats = np.dstack(
(
np.repeat([m.id for m in self.metrics], variants).reshape(metrics, variants, -1),
np.repeat([m.name for m in self.metrics], variants).reshape(metrics, variants, -1),
np.tile(goals["exp_variant_id"].unique(), metrics).reshape(metrics, variants, -1),
stats,
)
)
# dimensions of `stats` array: (metrics, variants, stats)
# elements of `stats` array: metrics_id, exp_variant_id, count, mean, std, sum_value, confidence_level
# hypothesis evaluation (standard way using t-test)
c = Statistics.ttest_evaluation(stats, self.control_variant)
# multiple variants (comparisons) correction - applied when we have multiple | |
import os
import glob
import shutil
# from scipy.ndimage import binary_fill_holes
import itertools
import numpy as np
import dippykit as dip
import matplotlib.pyplot as plt
from tqdm import tqdm
import cv2
from cv2 import INTER_AREA
from skimage import filters
from scipy.spatial import ConvexHull
from scipy.ndimage.morphology import binary_erosion
sin = lambda ang: np.sin(ang * np.pi / 180)
cos = lambda ang: np.cos(ang * np.pi / 180)
tan = lambda ang: sin(ang) / cos(ang)
default_frag_size = (128, 128)
dir_in = f'data/processed/full_vases/'
dir_out = f'data/processed/vase_fragment_dataset/'
if not os.path.isdir(dir_out):
os.mkdir(dir_out)
out_img = lambda img_id: f'{dir_out}/full_{img_id}.jpg'
out_frag = lambda img_id, n_frag: f'{dir_out}/frag_{img_id}_{n_frag}.jpg'
n_fragments = 10
_pix2pix_counter = 1
_pix2pix_marker_size = 5
_pix2pix_outsize = (256, 256)
_pix2pix_dir = 'data/processed/pix2pix_vase_fragments/train/'
out_pix2pix = lambda img_id, n_frag: f'{_pix2pix_dir}/frag_{img_id}_{n_frag}.jpg'
os.makedirs(_pix2pix_dir, exist_ok=True)
def contiguous(point, shape, range=1):
# p_x = [point[0]-1, point[0], point[0]+1]
# p_y = [point[1]-1, point[1], point[1]+1]
p_x = np.ones((2*range+1), dtype=np.int32)*point[0]
p_x += np.arange(-range, range+1)
p_y = np.ones((2*range+1), dtype=np.int32)*point[1]
p_y += np.arange(-range, range+1)
p_x = [p for p in p_x if 0 <= p < shape[0]]
p_y = [p for p in p_y if 0 <= p < shape[1]]
points = list(itertools.product(p_x, p_y))
return points
def space_fill(img, start=None, ):
if start is None:
start = img.shape[0] // 2, img.shape[1] // 2
# mask = np.zeros(img.shape, dtype=int) - 1
# max_count = 10
# mask[start] = max_count
thresh = np.percentile(img, 95)
# mask2 = np.zeros(img.shape, dtype=bool)
# details = img > thresh
# mask2[details] = 1
mask = img > thresh
# trim = 25
# make this general to image shape, not just 512x512
trim = img.shape[0] // 20
mask[:trim, :] = 0
mask[-trim:, :] = 0
mask[:, :trim] = 0
mask[:, -trim:] = 0
mask_inds = np.argwhere(mask)
m_min = np.min(mask_inds[:, 0])
m_max = np.max(mask_inds[:, 0])
n_min = np.min(mask_inds[:, 1])
n_max = np.max(mask_inds[:, 1])
print(m_min, m_max, n_min, n_max)
return mask, m_min, m_max, n_min, n_max
# this takes awhile, I can do simpler
# max_range = 10
# final_mask = np.zeros(img.shape, dtype=bool)
# for m, n in np.ndindex(mask.shape):
# for i, j in contiguous((m, n), mask.shape, max_range):
# if mask[i, j]:
# final_mask[m, n] = 1
# break
# final_mask = binary_fill_holes(final_mask)
# return final_mask
def mark_image_box(img, m_min, m_max, n_min, n_max):
new_img = np.copy(img)
thick=5
for m in m_min, m_max:
# new_img[m-thick:m+thick, n_min:n_max, :] = (255, 0, 0)
new_img[m-thick:m+thick, n_min:n_max] = 255
for n in n_min, n_max:
# new_img[m_min:m_max, n-thick:n+thick, :] = (255, 0, 0)
new_img[m_min:m_max, n-thick:n+thick] = 255
return new_img
def fragment_slow(img, m_min, m_max, n_min, n_max, frag_size=default_frag_size):
if m_min > m_max - frag_size[0] or n_min > n_max - frag_size[1]:
return None, (0, 0)
# m_start = np.random.randint(m_min, m_max+1-frag_size)
# n_start = np.random.randint(n_min, n_max+1-frag_size)
# use normal dist, stdev range/2/2
norm_scale = 2
m_choices = np.arange(m_min, m_max+1-frag_size[0])
n_choices = np.arange(n_min, n_max+1-frag_size[1])
m_ind = np.random.normal(len(m_choices)/2, len(m_choices)/2/norm_scale)
n_ind = np.random.normal(len(n_choices)/2, len(n_choices)/2/norm_scale)
m_ind = round(m_ind)
n_ind = round(n_ind)
m_ind = 0 if m_ind < 0 else m_ind
m_ind = len(m_choices) - 1 if m_ind >= len(m_choices) else m_ind
n_ind = 0 if n_ind < 0 else n_ind
n_ind = len(n_choices) - 1 if n_ind >= len(n_choices) else n_ind
m_start = m_choices[m_ind]
n_start = n_choices[n_ind]
new_img = np.copy(img[m_start:m_start+frag_size[0], n_start:n_start+frag_size[1]])
n_cuts = np.random.choice([3, 4, 5], p=[.4, .4, .2])
angles = np.linspace(0, 360, n_cuts+1)[:-1]
# perturb each by a little
angles += np.random.uniform(0, 30/n_cuts, angles.shape)
# rotate all angles randomly
angles += np.random.random()*90/n_cuts
cut_start_m = np.random.randint(0, frag_size[0]//2)
cut_start_n = 0
cuts = [[cut_start_m, cut_start_n]]
for ang in angles:
cut_end_m1 = frag_size[0]-1 if np.sign(cos(ang)) > 0 else 0
cut_end_n1 = cut_start_n + (cut_end_m1-cut_start_m) * tan(ang)
cut_end_n2 = frag_size[1]-1 if np.sign(sin(ang)) > 0 else 0
cut_end_m2 = cut_start_m + (cut_end_n2-cut_start_n) / tan(ang)
# print()
# print('start', cut_start_m, cut_start_n)
# print('option 1', cut_end_m1, cut_end_n1)
# print('option 2', cut_end_m2, cut_end_n2)
if cut_end_n1 < 0 or cut_end_n1 >= frag_size[1]:
cut_end_n = cut_end_n2
cut_end_m = cut_end_m2
else:
cut_end_n = cut_end_n1
cut_end_m = cut_end_m1
cuts += [[cut_end_m, cut_end_n]]
cut_start_m = cut_end_m
cut_start_n = cut_end_n
mask = np.ones(new_img.shape, dtype=bool)
# print('angles', angles)
# print('cuts', cuts)
for n, (a, b) in enumerate(zip(cuts[:-1], cuts[1:])):
a, b = np.array(a), np.array(b)
# print(a, b)
ang = angles[n]
# plt.plot((a[0], b[0]), (a[1], b[1]), marker='o')
for t in np.linspace(0, 1, max(frag_size)*2):
p = t * a + (1 - t) * b
img_ind = np.round(p).astype(np.int)
img_ind[img_ind < 0] = 0
# img_ind[img_ind > frag_size-1] = frag_size-1
img_ind[0] = min(img_ind[0], frag_size[0]-1)
img_ind[1] = min(img_ind[1], frag_size[1]-1)
end_m = frag_size[0]-1 if np.sign(sin(ang)) > 0 else 0
end_n = frag_size[0]-1 if np.sign(-cos(ang)) > 0 else 0
m_slice = slice(img_ind[0], end_m) \
if end_m > img_ind[0] else \
slice(end_m, img_ind[0])
n_slice = slice(img_ind[1], end_n) \
if end_n > img_ind[1] else \
slice(end_n, img_ind[1])
mask[m_slice, img_ind[1]] = 0
mask[img_ind[0], n_slice] = 0
# new_img[img_ind[0], img_ind[1], :] = (255, 0, 0)
new_img[~mask] = 255
# border is also funky, just trim it
new_img[0, :] = 255
new_img[-1, :] = 255
new_img[:, 0] = 255
new_img[:, -1] = 255
# plt.subplot(121)
# plt.imshow(new_img)
# plt.subplot(122)
# plt.imshow(new_img)
# plt.show()
return new_img, (m_start, n_start)
def fragment(img, m_min, m_max, n_min, n_max, frag_size=default_frag_size):
if m_min > m_max - frag_size[0] or n_min > n_max - frag_size[1]:
return None, (0, 0)
# m_start = np.random.randint(m_min, m_max+1-frag_size)
# n_start = np.random.randint(n_min, n_max+1-frag_size)
# use normal dist, stdev range/2/2
norm_scale = 2
m_choices = np.arange(m_min, m_max+1-frag_size[0])
n_choices = np.arange(n_min, n_max+1-frag_size[1])
m_ind = np.random.normal(len(m_choices)/2, len(m_choices)/2/norm_scale)
n_ind = np.random.normal(len(n_choices)/2, len(n_choices)/2/norm_scale)
m_ind = round(m_ind)
n_ind = round(n_ind)
m_ind = 0 if m_ind < 0 else m_ind
m_ind = len(m_choices) - 1 if m_ind >= len(m_choices) else m_ind
n_ind = 0 if n_ind < 0 else n_ind
n_ind = len(n_choices) - 1 if n_ind >= len(n_choices) else n_ind
m_start = m_choices[m_ind]
n_start = n_choices[n_ind]
new_img = np.copy(img[m_start:m_start+frag_size[0], n_start:n_start+frag_size[1]])
# random shapes method
# shape = np.random.choice(['triangle', 'rectangle', 'circle'])
# from skimage.draw import random_shapes
# result, shapes = random_shapes(new_img.shape, max_shapes=1,
# shape=shape, multichannel=False)
# print(shapes)
# mask = result != 255
# new_img[~mask] = 255
from skimage.draw import polygon
# generate 4 random points along perimeter of new_img
n_points = 4
# perimeter = 2*(frag_size[0]+frag_size[1])
perim_sizes = np.array([
frag_size[0],
frag_size[1],
frag_size[0],
frag_size[1],
])
perim_sum = np.array([
0,
frag_size[0],
frag_size[0]+frag_size[1],
2*frag_size[0]+frag_size[1],
])
offsets = np.random.random((n_points,))*perim_sizes
offsets = offsets + perim_sum + np.random.random()*frag_size[0]
def offset_to_xy(o):
# clean way, way below is equivalent but branchless
# if o < perim_sum[1]:
# return o, 0
# elif o < perim_sum[2]:
# return frag_size[0], o-perim_sum[1]
# elif o < perim_sum[3]:
# return frag_size[0] + (perim_sum[2]-o), frag_size[1]
# else:
# return 0, frag_size[1] + (perim_sum[3]-o)
# nvm this was hard
# return np.array((o, 0)) * (0 < o < perim_sum[1]) * \
# np.array((frag_size[0], o-perim_sum[1])) * (perim_sum[1] < o < perim_sum[2]) * \
# np.array((frag_size[0]+perim_sum[2]-o, frag_size[1])) * (perim_sum[2] < o < perim_sum[3]) * \
# np.array((0, frag_size[1] + perim_sum[3]-o)) * (perim_sum[3] < o)
# let me try splitting x and y
x = o * (o < perim_sum[1]) + \
frag_size[0] * np.logical_and(perim_sum[1] < o, o < perim_sum[2]) + \
(frag_size[0] + perim_sum[2] - o)*np.logical_and(perim_sum[2] < o, o < perim_sum[3])
y = (o-perim_sum[1]) * np.logical_and(perim_sum[1] < o, o < perim_sum[2]) + \
frag_size[1]*np.logical_and(perim_sum[2] < o, o < perim_sum[3]) + \
(frag_size[1] + perim_sum[3]-o)*(perim_sum[3] < o)
return np.stack((x, y), axis=1)
# poly = np.array([offset_to_xy(o) for o in offsets])
poly = offset_to_xy(offsets) # branchless
mm, nn = polygon(poly[:, 0], poly[:, 1], new_img.shape)
# offsets = perim_sizes / 2 + perim_sum
# poly = np.array([offset_to_xy(o) for o in offsets])
# poly = offset_to_xy(offsets) # branchless
# mm, nn = polygon(poly[:, 0], poly[:, 1], new_img.shape)
# print(offsets)
# print(poly)
mask = np.zeros_like(new_img, dtype=bool)
mask[mm, nn] = 1
new_img[~mask] = 255
# border is also funky, just trim it
new_img[0, :] = 255
new_img[-1, :] = 255
new_img[:, 0] = 255
new_img[:, -1] = 255
# plt.subplot(121)
# plt.imshow(img)
# plt.subplot(122)
# plt.imshow(new_img)
# plt.show()
return new_img, (m_start, n_start)
def main_biggan():
for f_img in glob.glob(dir_in + '/*'):
img = dip.imread(f_img)
img_id = int(os.path.split(f_img)[-1].split('.')[0])
print(f_img, img.shape)
if len(img.shape) == 3:
gray = np.mean(img, axis=-1)
else:
# gray = img
continue
grad = dip.transforms.edge_detect(gray)
mask, m_min, m_max, n_min, n_max | |
import datetime
import time
from typing import Any, Callable, Dict, Iterable, Optional, Tuple
import pendulum
import prefect
from prefect.client import Client
from prefect.core import Edge, Task
from prefect.engine.result import Result
from prefect.engine.runner import ENDRUN, call_state_handlers
from prefect.engine.state import Cached, ClientFailed, Failed, Queued, Retrying, State
from prefect.engine.task_runner import TaskRunner, TaskRunnerInitializeResult
from prefect.utilities.exceptions import VersionLockError
from prefect.utilities.executors import tail_recursive
class CloudTaskRunner(TaskRunner):
"""
TaskRunners handle the execution of Tasks and determine the State of a Task
before, during and after the Task is run.
In particular, through the TaskRunner you can specify the states of any upstream dependencies,
and what state the Task should be initialized with.
Args:
- task (Task): the Task to be run / executed
- state_handlers (Iterable[Callable], optional): A list of state change handlers
that will be called whenever the task changes state, providing an opportunity to
inspect or modify the new state. The handler will be passed the task runner
instance, the old (prior) state, and the new (current) state, with the following
signature: `state_handler(TaskRunner, old_state, new_state) -> State`; If multiple
functions are passed, then the `new_state` argument will be the result of the
previous handler.
- flow_result: the result instance configured for the flow (if any)
"""
def __init__(
self,
task: Task,
state_handlers: Iterable[Callable] = None,
flow_result: Result = None,
) -> None:
self.client = Client()
super().__init__(
task=task, state_handlers=state_handlers, flow_result=flow_result
)
def call_runner_target_handlers(self, old_state: State, new_state: State) -> State:
"""
A special state handler that the TaskRunner uses to call its task's state handlers.
This method is called as part of the base Runner's `handle_state_change()` method.
Args:
- old_state (State): the old (previous) state
- new_state (State): the new (current) state
Returns:
- State: the new state
"""
raise_on_exception = prefect.context.get("raise_on_exception", False)
try:
new_state = super().call_runner_target_handlers(
old_state=old_state, new_state=new_state
)
# PrefectStateSignals are trapped and turned into States
except prefect.engine.signals.PrefectStateSignal as exc:
self.logger.info(
"{name} signal raised: {rep}".format(
name=type(exc).__name__, rep=repr(exc)
)
)
if raise_on_exception:
raise exc
new_state = exc.state
except Exception as exc:
msg = "Exception raised while calling state handlers: {}".format(repr(exc))
self.logger.exception(msg)
if raise_on_exception:
raise exc
new_state = Failed(msg, result=exc)
task_run_id = prefect.context.get("task_run_id")
version = prefect.context.get("task_run_version")
try:
cloud_state = new_state
state = self.client.set_task_run_state(
task_run_id=task_run_id,
version=version if cloud_state.is_running() else None,
state=cloud_state,
cache_for=self.task.cache_for,
)
except VersionLockError as exc:
state = self.client.get_task_run_state(task_run_id=task_run_id)
if state.is_running():
self.logger.debug(
"Version lock encountered and task {} is already in a running state.".format(
self.task.name
)
)
raise ENDRUN(state=state) from exc
self.logger.debug(
"Version lock encountered for task {}, proceeding with state {}...".format(
self.task.name, type(state).__name__
)
)
try:
new_state = state.load_result(self.result)
except Exception as exc_inner:
self.logger.debug(
"Error encountered attempting to load result for state of {} task...".format(
self.task.name
)
)
self.logger.error(repr(exc_inner))
raise ENDRUN(state=state) from exc_inner
except Exception as exc:
self.logger.exception(
"Failed to set task state with error: {}".format(repr(exc))
)
raise ENDRUN(state=ClientFailed(state=new_state)) from exc
if state.is_queued():
state.state = old_state # type: ignore
raise ENDRUN(state=state)
prefect.context.update(task_run_version=(version or 0) + 1)
return new_state
def initialize_run( # type: ignore
self, state: Optional[State], context: Dict[str, Any]
) -> TaskRunnerInitializeResult:
"""
Initializes the Task run by initializing state and context appropriately.
Args:
- state (Optional[State]): the initial state of the run
- context (Dict[str, Any]): the context to be updated with relevant information
Returns:
- tuple: a tuple of the updated state, context, and upstream_states objects
"""
# load task run info
try:
task_run_info = self.client.get_task_run_info(
flow_run_id=context.get("flow_run_id", ""),
task_id=context.get("task_id", ""),
map_index=context.get("map_index"),
)
# if state was provided, keep it; otherwise use the one from db
state = state or task_run_info.state # type: ignore
context.update(
task_run_id=task_run_info.id, # type: ignore
task_run_version=task_run_info.version, # type: ignore
)
except Exception as exc:
self.logger.exception(
"Failed to retrieve task state with error: {}".format(repr(exc))
)
if state is None:
state = Failed(
message="Could not retrieve state from Prefect Cloud",
result=exc,
)
raise ENDRUN(state=state) from exc
# we assign this so it can be shared with heartbeat thread
self.task_run_id = context.get("task_run_id", "") # type: str
context.update(checkpointing=True)
return super().initialize_run(state=state, context=context)
@call_state_handlers
def check_task_is_cached(self, state: State, inputs: Dict[str, Result]) -> State:
"""
Checks if task is cached in the DB and whether any of the caches are still valid.
Args:
- state (State): the current state of this task
- inputs (Dict[str, Result]): a dictionary of inputs whose keys correspond
to the task's `run()` arguments.
Returns:
- State: the state of the task after running the check
Raises:
- ENDRUN: if the task is not ready to run
"""
if state.is_cached() is True:
assert isinstance(state, Cached) # mypy assert
sanitized_inputs = {key: res.value for key, res in inputs.items()}
if self.task.cache_validator(
state, sanitized_inputs, prefect.context.get("parameters")
):
state = state.load_result(self.result)
return state
if self.task.cache_for is not None:
oldest_valid_cache = datetime.datetime.utcnow() - self.task.cache_for
cached_states = self.client.get_latest_cached_states(
task_id=prefect.context.get("task_id", ""),
cache_key=self.task.cache_key,
created_after=oldest_valid_cache,
)
if cached_states:
self.logger.debug(
"Task '{name}': {num} candidate cached states were found".format(
name=prefect.context.get("task_full_name", self.task.name),
num=len(cached_states),
)
)
for candidate_state in cached_states:
assert isinstance(candidate_state, Cached) # mypy assert
candidate_state.load_cached_results(inputs)
sanitized_inputs = {key: res.value for key, res in inputs.items()}
if self.task.cache_validator(
candidate_state,
sanitized_inputs,
prefect.context.get("parameters"),
):
try:
return candidate_state.load_result(self.result)
except Exception:
location = getattr(
candidate_state._result, "location", None
)
self.logger.warning(
f"Failed to load cached state data from {location}.",
exc_info=True,
)
self.logger.debug(
"Task '{name}': can't use cache because no candidate Cached states "
"were valid".format(
name=prefect.context.get("task_full_name", self.task.name)
)
)
else:
self.logger.debug(
"Task '{name}': can't use cache because no Cached states were found".format(
name=prefect.context.get("task_full_name", self.task.name)
)
)
return state
def load_results(
self, state: State, upstream_states: Dict[Edge, State]
) -> Tuple[State, Dict[Edge, State]]:
"""
Given the task's current state and upstream states, populates all relevant result
objects for this task run.
Args:
- state (State): the task's current state.
- upstream_states (Dict[Edge, State]): the upstream state_handlers
Returns:
- Tuple[State, dict]: a tuple of (state, upstream_states)
"""
upstream_results = {}
try:
if state.is_mapped():
# ensures mapped children are only loaded once
state = state.load_result(self.result)
for edge, upstream_state in upstream_states.items():
upstream_states[edge] = upstream_state.load_result(
edge.upstream_task.result or self.flow_result
)
if edge.key is not None:
upstream_results[edge.key] = (
edge.upstream_task.result or self.flow_result
)
state.load_cached_results(upstream_results)
return state, upstream_states
except Exception as exc:
new_state = Failed(
message=f"Failed to retrieve task results: {exc}", result=exc
)
final_state = self.handle_state_change(old_state=state, new_state=new_state)
raise ENDRUN(final_state) from exc
def set_task_run_name(self, task_inputs: Dict[str, Result]) -> None:
"""
Sets the name for this task run by calling the `set_task_run_name` mutation.
Args:
- task_inputs (Dict[str, Result]): a dictionary of inputs whose keys correspond
to the task's `run()` arguments.
"""
task_run_name = self.task.task_run_name
if task_run_name:
raw_inputs = {k: r.value for k, r in task_inputs.items()}
formatting_kwargs = {
**prefect.context.get("parameters", {}),
**prefect.context,
**raw_inputs,
}
if not isinstance(task_run_name, str):
task_run_name = task_run_name(**formatting_kwargs)
else:
task_run_name = task_run_name.format(**formatting_kwargs)
self.client.set_task_run_name(
task_run_id=self.task_run_id, name=task_run_name # type: ignore
)
@tail_recursive
def run(
self,
state: State = None,
upstream_states: Dict[Edge, State] = None,
context: Dict[str, Any] = None,
is_mapped_parent: bool = False,
) -> State:
"""
The main endpoint for TaskRunners. Calling this method will conditionally execute
`self.task.run` with any provided inputs, assuming the upstream dependencies are in a
state which allow this Task to run. Additionally, this method will wait and perform
Task retries which are scheduled for <= 1 minute in the future.
Args:
- state (State, optional): initial `State` to begin task run from;
defaults to `Pending()`
- upstream_states (Dict[Edge, State]): a dictionary
representing the states of any tasks upstream of this one. The keys of the
dictionary should correspond to the edges leading to the task.
- context (dict, optional): prefect Context to use for execution
- is_mapped_parent (bool): a boolean indicating whether this task run is the run of
a parent mapped task
Returns:
- `State` object representing the final post-run state of the Task
"""
context = context or {}
with prefect.context(context):
end_state = super().run(
state=state,
upstream_states=upstream_states,
context=context,
is_mapped_parent=is_mapped_parent,
)
while (end_state.is_retrying() or end_state.is_queued()) and (
end_state.start_time <= pendulum.now("utc").add(minutes=10) # type: ignore
):
assert isinstance(end_state, (Retrying, Queued))
naptime = max(
(end_state.start_time - pendulum.now("utc")).total_seconds(), 0
)
for _ in range(int(naptime) // 30):
# send heartbeat every 30 seconds to let API know task run is still alive
self.client.update_task_run_heartbeat(
task_run_id=prefect.context.get("task_run_id")
)
naptime -= 30
| |
# Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VAT executor library."""
import json
from os import remove
from paramiko.ssh_exception import SSHException
from robot.api import logger
from resources.libraries.python.ssh import SSH, SSHTimeout
from resources.libraries.python.Constants import Constants
from resources.libraries.python.PapiHistory import PapiHistory
__all__ = ['VatExecutor']
def cleanup_vat_json_output(json_output, vat_name=None):
"""Return VAT JSON output cleaned from VAT clutter.
Clean up VAT JSON output from clutter like vat# prompts and such.
:param json_output: Cluttered JSON output.
:param vat_name: Name of the VAT script.
:type json_output: JSON
:type vat_name: str
:returns: Cleaned up output JSON string.
:rtype: JSON
"""
retval = json_output
clutter = ['vat#', 'dump_interface_table error: Misc']
if vat_name:
remote_file_path = '{0}/{1}/{2}'.format(Constants.REMOTE_FW_DIR,
Constants.RESOURCES_TPL_VAT,
vat_name)
clutter.append("{0}(2):".format(remote_file_path))
for garbage in clutter:
retval = retval.replace(garbage, '')
return retval
def get_vpp_pid(node):
"""Get PID of running VPP process.
:param node: DUT node.
:type node: dict
:returns: PID of VPP process / List of PIDs if more VPP processes are
running on the DUT node.
:rtype: int or list
"""
import resources.libraries.python.DUTSetup as PidLib
pid = PidLib.DUTSetup.get_vpp_pid(node)
return pid
class VatExecutor(object):
"""Contains methods for executing VAT commands on DUTs."""
def __init__(self):
self._stdout = None
self._stderr = None
self._ret_code = None
self._script_name = None
def execute_script(self, vat_name, node, timeout=120, json_out=True,
copy_on_execute=False):
"""Execute VAT script on remote node, and store the result. There is an
option to copy script from local host to remote host before execution.
Path is defined automatically.
:param vat_name: Name of the vat script file. Only the file name of
the script is required, the resources path is prepended
automatically.
:param node: Node to execute the VAT script on.
:param timeout: Seconds to allow the script to run.
:param json_out: Require JSON output.
:param copy_on_execute: If true, copy the file from local host to remote
before executing.
:type vat_name: str
:type node: dict
:type timeout: int
:type json_out: bool
:type copy_on_execute: bool
:raises SSHException: If cannot open connection for VAT.
:raises SSHTimeout: If VAT execution is timed out.
:raises RuntimeError: If VAT script execution fails.
"""
ssh = SSH()
try:
ssh.connect(node)
except:
raise SSHException("Cannot open SSH connection to execute VAT "
"command(s) from vat script {name}"
.format(name=vat_name))
if copy_on_execute:
ssh.scp(vat_name, vat_name)
remote_file_path = vat_name
with open(vat_name, 'r') as vat_file:
for line in vat_file:
PapiHistory.add_to_papi_history(node,
line.replace('\n', ''),
papi=False)
else:
remote_file_path = '{0}/{1}/{2}'.format(Constants.REMOTE_FW_DIR,
Constants.RESOURCES_TPL_VAT,
vat_name)
cmd = "{vat_bin} {json} in {vat_path} script".format(
vat_bin=Constants.VAT_BIN_NAME,
json="json" if json_out is True else "",
vat_path=remote_file_path)
try:
ret_code, stdout, stderr = ssh.exec_command_sudo(cmd=cmd,
timeout=timeout)
except SSHTimeout:
logger.error("VAT script execution timeout: {0}".format(cmd))
raise
except:
raise RuntimeError("VAT script execution failed: {0}".format(cmd))
self._ret_code = ret_code
self._stdout = stdout
self._stderr = stderr
self._script_name = vat_name
def write_and_execute_script(self, node, tmp_fn, commands, timeout=300,
json_out=False):
"""Write VAT commands to the script, copy it to node and execute it.
:param node: VPP node.
:param tmp_fn: Path to temporary file script.
:param commands: VAT command list.
:param timeout: Seconds to allow the script to run.
:param json_out: Require JSON output.
:type node: dict
:type tmp_fn: str
:type commands: list
:type timeout: int
:type json_out: bool
"""
with open(tmp_fn, 'w') as tmp_f:
tmp_f.writelines(commands)
self.execute_script(tmp_fn, node, timeout=timeout, json_out=json_out,
copy_on_execute=True)
remove(tmp_fn)
def execute_script_json_out(self, vat_name, node, timeout=120):
"""Pass all arguments to 'execute_script' method, then cleanup returned
json output.
:param vat_name: Name of the vat script file. Only the file name of
the script is required, the resources path is prepended
automatically.
:param node: Node to execute the VAT script on.
:param timeout: Seconds to allow the script to run.
:type vat_name: str
:type node: dict
:type timeout: int
"""
self.execute_script(vat_name, node, timeout, json_out=True)
self._stdout = cleanup_vat_json_output(self._stdout, vat_name=vat_name)
def script_should_have_failed(self):
"""Read return code from last executed script and raise exception if the
script didn't fail."""
if self._ret_code is None:
raise Exception("First execute the script!")
if self._ret_code == 0:
raise AssertionError(
"VAT Script execution passed, but failure was expected: {cmd}"
.format(cmd=self._script_name))
def script_should_have_passed(self):
"""Read return code from last executed script and raise exception if the
script failed."""
if self._ret_code is None:
raise Exception("First execute the script!")
if self._ret_code != 0:
raise AssertionError(
"VAT Script execution failed, but success was expected: {cmd}"
.format(cmd=self._script_name))
def get_script_stdout(self):
"""Returns value of stdout from last executed script."""
return self._stdout
def get_script_stderr(self):
"""Returns value of stderr from last executed script."""
return self._stderr
@staticmethod
def cmd_from_template(node, vat_template_file, json_param=True, **vat_args):
"""Execute VAT script on specified node. This method supports
script templates with parameters.
:param node: Node in topology on witch the script is executed.
:param vat_template_file: Template file of VAT script.
:param vat_args: Arguments to the template file.
:returns: List of JSON objects returned by VAT.
"""
with VatTerminal(node, json_param=json_param) as vat:
return vat.vat_terminal_exec_cmd_from_template(vat_template_file,
**vat_args)
class VatTerminal(object):
"""VAT interactive terminal.
:param node: Node to open VAT terminal on.
:param json_param: Defines if outputs from VAT are in JSON format.
Default is True.
:type node: dict
:type json_param: bool
"""
__VAT_PROMPT = ("vat# ", )
__LINUX_PROMPT = (":~# ", ":~$ ", "~]$ ", "~]# ")
def __init__(self, node, json_param=True):
json_text = ' json' if json_param else ''
self.json = json_param
self._node = node
self._ssh = SSH()
self._ssh.connect(self._node)
try:
self._tty = self._ssh.interactive_terminal_open()
except Exception:
raise RuntimeError("Cannot open interactive terminal on node {0}".
format(self._node))
for _ in range(3):
try:
self._ssh.interactive_terminal_exec_command(
self._tty,
'sudo -S {0}{1}'.format(Constants.VAT_BIN_NAME, json_text),
self.__VAT_PROMPT)
except Exception:
continue
else:
break
else:
vpp_pid = get_vpp_pid(self._node)
if vpp_pid:
if isinstance(vpp_pid, int):
logger.trace("VPP running on node {0}".
format(self._node['host']))
else:
logger.error("More instances of VPP running on node {0}.".
format(self._node['host']))
else:
logger.error("VPP not running on node {0}.".
format(self._node['host']))
raise RuntimeError("Failed to open VAT console on node {0}".
format(self._node['host']))
self._exec_failure = False
self.vat_stdout = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.vat_terminal_close()
def vat_terminal_exec_cmd(self, cmd):
"""Execute command on the opened VAT terminal.
:param cmd: Command to be executed.
:returns: Command output in python representation of JSON format or
None if not in JSON mode.
"""
PapiHistory.add_to_papi_history(self._node, cmd, papi=False)
logger.debug("Executing command in VAT terminal: {0}".format(cmd))
try:
out = self._ssh.interactive_terminal_exec_command(self._tty, cmd,
self.__VAT_PROMPT)
self.vat_stdout = out
except Exception:
self._exec_failure = True
vpp_pid = get_vpp_pid(self._node)
if vpp_pid:
if isinstance(vpp_pid, int):
raise RuntimeError("VPP running on node {0} but VAT command"
" {1} execution failed.".
format(self._node['host'], cmd))
else:
raise RuntimeError("More instances of VPP running on node "
"{0}. VAT command {1} execution failed.".
format(self._node['host'], cmd))
else:
raise RuntimeError("VPP not running on node {0}. VAT command "
"{1} execution failed.".
format(self._node['host'], cmd))
logger.debug("VAT output: {0}".format(out))
if self.json:
obj_start = out.find('{')
obj_end = out.rfind('}')
array_start = out.find('[')
array_end = out.rfind(']')
if obj_start == -1 and array_start == -1:
raise RuntimeError("VAT command {0}: no JSON data.".format(cmd))
if obj_start < array_start or array_start == -1:
start = obj_start
end = obj_end + 1
else:
start = array_start
end = array_end + 1
out = out[start:end]
json_out = json.loads(out)
return json_out
else:
return None
def vat_terminal_close(self):
"""Close VAT terminal."""
# interactive terminal is dead, we only need to close session
if not self._exec_failure:
try:
self._ssh.interactive_terminal_exec_command(self._tty,
'quit',
self.__LINUX_PROMPT)
except Exception:
vpp_pid = get_vpp_pid(self._node)
if vpp_pid:
if isinstance(vpp_pid, int):
logger.trace("VPP running on node {0}.".
format(self._node['host']))
else:
logger.error("More instances of VPP running on node "
"{0}.".format(self._node['host']))
else:
logger.error("VPP not running on node {0}.".
format(self._node['host']))
raise RuntimeError("Failed to close VAT console on node {0}".
format(self._node['host']))
try:
self._ssh.interactive_terminal_close(self._tty)
except:
raise RuntimeError("Cannot close interactive terminal on node {0}".
format(self._node['host']))
def vat_terminal_exec_cmd_from_template(self, vat_template_file, **args):
"""Execute VAT script from a file.
:param vat_template_file: Template file name of a VAT script.
:param args: Dictionary of parameters for VAT script.
:returns: List of JSON objects returned by VAT.
"""
file_path = '{}/{}'.format(Constants.RESOURCES_TPL_VAT,
vat_template_file)
with open(file_path, 'r') as template_file:
cmd_template = template_file.readlines()
ret = | |
from __future__ import annotations
import collections
import inspect
from dataclasses import dataclass
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
from flytekit.common import constants as _common_constants
from flytekit.common.exceptions.user import FlyteValidationException, FlyteValueException
from flytekit.core.base_task import PythonTask
from flytekit.core.class_based_resolver import ClassStorageTaskResolver
from flytekit.core.condition import ConditionalSection
from flytekit.core.context_manager import (
BranchEvalMode,
CompilationState,
ExecutionState,
FlyteContext,
FlyteContextManager,
FlyteEntities,
)
from flytekit.core.interface import (
Interface,
transform_inputs_to_parameters,
transform_interface_to_typed_interface,
transform_signature_to_interface,
)
from flytekit.core.launch_plan import LaunchPlan
from flytekit.core.node import Node
from flytekit.core.promise import (
NodeOutput,
Promise,
VoidPromise,
binding_from_python_std,
create_and_link_node,
create_native_named_tuple,
create_task_output,
translate_inputs_to_literals,
)
from flytekit.core.python_auto_container import PythonAutoContainerTask
from flytekit.core.reference_entity import ReferenceEntity, WorkflowReference
from flytekit.core.type_engine import TypeEngine
from flytekit.loggers import logger
from flytekit.models import interface as _interface_models
from flytekit.models import literals as _literal_models
from flytekit.models.core import workflow as _workflow_model
GLOBAL_START_NODE = Node(
id=_common_constants.GLOBAL_INPUT_NODE_ID,
metadata=None,
bindings=[],
upstream_nodes=[],
flyte_entity=None,
)
class WorkflowFailurePolicy(Enum):
FAIL_IMMEDIATELY = _workflow_model.WorkflowMetadata.OnFailurePolicy.FAIL_IMMEDIATELY
FAIL_AFTER_EXECUTABLE_NODES_COMPLETE = (
_workflow_model.WorkflowMetadata.OnFailurePolicy.FAIL_AFTER_EXECUTABLE_NODES_COMPLETE
)
@dataclass
class WorkflowMetadata(object):
on_failure: WorkflowFailurePolicy
def __post_init__(self):
if (
self.on_failure != WorkflowFailurePolicy.FAIL_IMMEDIATELY
and self.on_failure != WorkflowFailurePolicy.FAIL_AFTER_EXECUTABLE_NODES_COMPLETE
):
raise FlyteValidationException(f"Failure policy {self.on_failure} not acceptable")
def to_flyte_model(self):
if self.on_failure == WorkflowFailurePolicy.FAIL_IMMEDIATELY:
on_failure = 0
else:
on_failure = 1
return _workflow_model.WorkflowMetadata(on_failure=on_failure)
@dataclass
class WorkflowMetadataDefaults(object):
"""
This class is similarly named to the one above. Please see the IDL for more information but essentially, this
WorkflowMetadataDefaults class represents the defaults that are handed down to a workflow's tasks, whereas
WorkflowMetadata represents metadata about the workflow itself.
"""
interruptible: bool
def __post_init__(self):
if self.interruptible is not True and self.interruptible is not False:
raise FlyteValidationException(f"Interruptible must be boolean, {self.interruptible} invalid")
def to_flyte_model(self):
return _workflow_model.WorkflowMetadataDefaults(interruptible=self.interruptible)
def construct_input_promises(inputs: List[str]):
return {
input_name: Promise(var=input_name, val=NodeOutput(node=GLOBAL_START_NODE, var=input_name))
for input_name in inputs
}
def get_promise(binding_data: _literal_models.BindingData, outputs_cache: Dict[Node, Dict[str, Promise]]) -> Promise:
"""
This is a helper function that will turn a binding into a Promise object, using a lookup map. Please see
get_promise_map for the rest of the details.
"""
if binding_data.promise is not None:
if not isinstance(binding_data.promise, NodeOutput):
raise FlyteValidationException(
f"Binding data Promises have to be of the NodeOutput type {type(binding_data.promise)} found"
)
# b.var is the name of the input to the task
# binding_data.promise.var is the name of the upstream node's output we want
return outputs_cache[binding_data.promise.node][binding_data.promise.var]
elif binding_data.scalar is not None:
return Promise(var="placeholder", val=_literal_models.Literal(scalar=binding_data.scalar))
elif binding_data.collection is not None:
literals = []
for bd in binding_data.collection.bindings:
p = get_promise(bd, outputs_cache)
literals.append(p.val)
return Promise(
var="placeholder",
val=_literal_models.Literal(collection=_literal_models.LiteralCollection(literals=literals)),
)
elif binding_data.map is not None:
literals = {}
for k, bd in binding_data.map.bindings.items():
p = get_promise(bd, outputs_cache)
literals[k] = p.val
return Promise(
var="placeholder", val=_literal_models.Literal(map=_literal_models.LiteralMap(literals=literals))
)
raise FlyteValidationException("Binding type unrecognized.")
def get_promise_map(
bindings: List[_literal_models.Binding], outputs_cache: Dict[Node, Dict[str, Promise]]
) -> Dict[str, Promise]:
"""
Local execution of imperatively defined workflows is done node by node. This function will fill in the node's
entity's input arguments, which are specified using the bindings list, and a map of nodes to its outputs.
Basically this takes the place of propeller in resolving bindings, pulling in outputs from previously completed
nodes and filling in the necessary inputs.
"""
entity_kwargs = {}
for b in bindings:
entity_kwargs[b.var] = get_promise(b.binding, outputs_cache)
return entity_kwargs
class WorkflowBase(object):
def __init__(
self,
name: str,
workflow_metadata: WorkflowMetadata,
workflow_metadata_defaults: WorkflowMetadataDefaults,
python_interface: Interface,
**kwargs,
):
self._name = name
self._workflow_metadata = workflow_metadata
self._workflow_metadata_defaults = workflow_metadata_defaults
self._python_interface = python_interface
self._interface = transform_interface_to_typed_interface(python_interface)
self._inputs = {}
self._unbound_inputs = set()
self._nodes = []
self._output_bindings: Optional[List[_literal_models.Binding]] = []
FlyteEntities.entities.append(self)
super().__init__(**kwargs)
@property
def name(self) -> str:
return self._name
@property
def short_name(self) -> str:
return self._name.split(".")[-1]
@property
def workflow_metadata(self) -> Optional[WorkflowMetadata]:
return self._workflow_metadata
@property
def workflow_metadata_defaults(self):
return self._workflow_metadata_defaults
@property
def python_interface(self) -> Interface:
return self._python_interface
@property
def interface(self) -> _interface_models.TypedInterface:
return self._interface
@property
def output_bindings(self) -> List[_literal_models.Binding]:
return self._output_bindings
@property
def nodes(self) -> List[Node]:
return self._nodes
def __repr__(self):
return (
f"WorkflowBase - {self._name} && "
f"Inputs ({len(self._python_interface.inputs)}): {self._python_interface.inputs} && "
f"Outputs ({len(self._python_interface.outputs)}): {self._python_interface.outputs} && "
f"Output bindings: {self._output_bindings} && "
)
def __call__(self, *args, **kwargs):
"""
The call pattern for Workflows is close to, but not exactly, the call pattern for Tasks. For local execution,
it goes
__call__ -> _local_execute -> execute
From execute, different things happen for the two Workflow styles. For PythonFunctionWorkflows, the Python
function is run, for the ImperativeWorkflow, each node is run one at a time.
"""
if len(args) > 0:
raise AssertionError("Only Keyword Arguments are supported for Workflow executions")
ctx = FlyteContextManager.current_context()
# Get default agruements and override with kwargs passed in
input_kwargs = self.python_interface.default_inputs_as_kwargs
input_kwargs.update(kwargs)
# The first condition is compilation.
if ctx.compilation_state is not None:
return create_and_link_node(ctx, entity=self, interface=self.python_interface, **input_kwargs)
# This condition is hit when this workflow (self) is being called as part of a parent's workflow local run.
# The context specifying the local workflow execution has already been set.
elif (
ctx.execution_state is not None and ctx.execution_state.mode == ExecutionState.Mode.LOCAL_WORKFLOW_EXECUTION
):
if ctx.execution_state.branch_eval_mode == BranchEvalMode.BRANCH_SKIPPED:
if self.python_interface and self.python_interface.output_tuple_name:
variables = [k for k in self.python_interface.outputs.keys()]
output_tuple = collections.namedtuple(self.python_interface.output_tuple_name, variables)
nones = [None for _ in self.python_interface.outputs.keys()]
return output_tuple(*nones)
else:
return None
# We are already in a local execution, just continue the execution context
return self._local_execute(ctx, **input_kwargs)
# Last is starting a local workflow execution
else:
# Run some sanity checks
# Even though the _local_execute call generally expects inputs to be Promises, we don't have to do the
# conversion here in this loop. The reason is because we don't prevent users from specifying inputs
# as direct scalars, which means there's another Promise-generating loop inside _local_execute too
for k, v in input_kwargs.items():
if k not in self.interface.inputs:
raise ValueError(f"Received unexpected keyword argument {k}")
if isinstance(v, Promise):
raise ValueError(f"Received a promise for a workflow call, when expecting a native value for {k}")
with FlyteContextManager.with_context(
ctx.with_execution_state(
ctx.new_execution_state().with_params(mode=ExecutionState.Mode.LOCAL_WORKFLOW_EXECUTION)
)
) as child_ctx:
result = self._local_execute(child_ctx, **input_kwargs)
expected_outputs = len(self.python_interface.outputs)
if expected_outputs == 0:
if result is None or isinstance(result, VoidPromise):
return None
else:
raise Exception(f"Workflow local execution expected 0 outputs but something received {result}")
if (1 < expected_outputs == len(result)) or (result is not None and expected_outputs == 1):
return create_native_named_tuple(ctx, result, self.python_interface)
raise ValueError("expected outputs and actual outputs do not match")
def execute(self, **kwargs):
raise Exception("Should not be called")
def _local_execute(self, ctx: FlyteContext, **kwargs) -> Union[Tuple[Promise], Promise, VoidPromise]:
# This is done to support the invariant that Workflow local executions always work with Promise objects
# holding Flyte literal values. Even in a wf, a user can call a sub-workflow with a Python native value.
for k, v in kwargs.items():
if not isinstance(v, Promise):
t = self.python_interface.inputs[k]
kwargs[k] = Promise(var=k, val=TypeEngine.to_literal(ctx, v, t, self.interface.inputs[k].type))
# The output of this will always be a combination of Python native values and Promises containing Flyte
# Literals.
function_outputs = self.execute(**kwargs)
# First handle the empty return case.
# A workflow function may return a task that doesn't return anything
# def wf():
# return t1()
# or it may not return at all
# def wf():
# t1()
# In the former case we get the task's VoidPromise, in the latter we get None
if isinstance(function_outputs, VoidPromise) or function_outputs is None:
if len(self.python_interface.outputs) != 0:
raise FlyteValueException(
function_outputs,
f"{function_outputs} received but interface has {len(self.python_interface.outputs)} outputs.",
)
return VoidPromise(self.name)
# Because we should've already returned in the above check, we just raise an error here.
if len(self.python_interface.outputs) == 0:
raise FlyteValueException(
function_outputs, f"{function_outputs} received but should've been VoidPromise or None."
)
expected_output_names = list(self.python_interface.outputs.keys())
if len(expected_output_names) == 1:
# Here we have to handle the fact that the wf could've been declared with a typing.NamedTuple of
# length one. That convention is used for naming outputs - and single-length-NamedTuples are
# particularly troublesome but elegant handling of them is not a high priority
# Again, we're using the output_tuple_name as a proxy.
if self.python_interface.output_tuple_name and isinstance(function_outputs, tuple):
wf_outputs_as_map = {expected_output_names[0]: function_outputs[0]}
else:
wf_outputs_as_map = {expected_output_names[0]: function_outputs}
else:
wf_outputs_as_map = {expected_output_names[i]: function_outputs[i] for i, _ in enumerate(function_outputs)}
# Basically we need to repackage the promises coming from the tasks into Promises that match the workflow's
# interface. We do that by extracting out the literals, and creating new Promises
wf_outputs_as_literal_dict = translate_inputs_to_literals(
ctx,
wf_outputs_as_map,
flyte_interface_types=self.interface.outputs,
native_types=self.python_interface.outputs,
)
# Recreate new | |
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from __future__ import unicode_literals
from django.db import models
from django.utils.safestring import mark_safe
from django.core.validators import MinValueValidator
from enumfields import EnumField
from enumfields import Enum # Uses <NAME>'s "enum34" backport
from django.urls import reverse # for get_absolute_url
from django.contrib import admin
import os
class Chirality(Enum):
R = 'right'
S = 'left'
NONE = 'none'
class ComponentType(Enum):
CC = 'CC'
LIGAND = 'Ligand'
MOF = 'Mof'
REACTANT = 'Reactant'
PRODUCT = 'Product'
class LigandCategory(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100, blank=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('ligandcategory.views.details', args=[str(self.id)])
def get_admin_url(self):
return reverse("admin:%s_%s_change" % (self._meta.app_label, self._meta.model_name), args=(self.id,))
class Meta:
verbose_name = "LigandCategory"
verbose_name_plural = "LigandCategories"
class ReactionCategory(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100, blank=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('reactioncategory.views.details', args=[str(self.id)])
def get_admin_url(self):
return reverse("admin:%s_%s_change" % (self._meta.app_label, self._meta.model_name), args=(self.id,))
class Meta:
verbose_name = "ReactionCategory"
verbose_name_plural = "ReactionCategories"
class DataType(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=45, blank=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('datatype.views.details', args=[str(self.id)])
def get_admin_url(self):
return reverse("admin:%s_%s_change" % (self._meta.app_label, self._meta.model_name), args=(self.id,))
class Meta:
db_table = 'DataType'
class FunctionalGroup(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100, blank=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('functionalgroup.views.details', args=[str(self.id)])
def get_admin_url(self):
return reverse("admin:%s_%s_change" % (self._meta.app_label, self._meta.model_name), args=(self.id,))
class Meta:
db_table = 'FunctionalGroup'
class ChemicalCompound(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200, blank=True)
nick = models.CharField(max_length=100, blank=True)
formula = models.CharField(max_length=100, blank=True)
synthesis = models.TextField(blank=True)
analysis = models.TextField(blank=True)
mass = models.FloatField(
validators=[MinValueValidator(0.0)],
blank=True, null=True)
functional_group = models.ForeignKey(
FunctionalGroup,
on_delete=models.DO_NOTHING,
related_name='chemicalcompound',
blank=True, null=True)
chirality = EnumField(Chirality, max_length=5, blank=True, null=True)
date_last_modified = models.DateTimeField(auto_now=True)
date_creation = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('chemicalcompound.views.details', args=[str(self.id)])
def get_admin_url(self):
return reverse("admin:%s_%s_change" % (self._meta.app_label, self._meta.model_name), args=(self.id,))
class Meta:
db_table = 'ChemicalCompound'
class BaseLigand(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200, blank=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('baseligand.views.details', args=[str(self.id)])
def get_admin_url(self):
return reverse("admin:%s_%s_change" % (self._meta.app_label, self._meta.model_name), args=(self.id,))
class Meta:
db_table = 'BaseLigand'
class Ligand(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200, blank=True)
nick = models.CharField(max_length=100, blank=True)
formula = models.CharField(max_length=100, blank=True)
synthesis = models.TextField(blank=True)
analysis = models.TextField(blank=True)
mass = models.FloatField(
validators=[MinValueValidator(0.0)],
blank=True, null=True)
category = models.ForeignKey(
LigandCategory,
on_delete=models.DO_NOTHING,
related_name='ligands',
blank=True, null=True)
functional_group = models.ForeignKey(
FunctionalGroup,
on_delete=models.DO_NOTHING,
related_name='ligands',
blank=True, null=True)
chirality = EnumField(Chirality, max_length=5, blank=True, null=True)
connections = models.PositiveIntegerField(blank=True, null=True)
base_ligand = models.ForeignKey(
BaseLigand,
on_delete=models.DO_NOTHING,
db_column='base_ligand',
related_name='ligands',
blank=True, null=True)
date_last_modified = models.DateTimeField(auto_now=True)
date_creation = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('ligand.views.details', args=[str(self.id)])
def get_admin_url(self):
return reverse("admin:%s_%s_change" % (self._meta.app_label, self._meta.model_name), args=(self.id,))
@property
def category_name(self):
return self.category.__str__
@property
def functional_group_name(self):
return self.functional_group.__str__
@property
def base_ligand_name(self):
return self.base_ligand.__str__
class Meta:
db_table = 'Ligand'
class Mof(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200, blank=True)
nick = models.CharField(max_length=100, blank=True)
formula = models.CharField(max_length=100, blank=True)
synthesis = models.TextField(blank=True)
analysis = models.TextField(blank=True)
mass = models.FloatField(
validators=[MinValueValidator(0.0)],
blank=True, null=True)
topology = models.CharField(max_length=10, blank=True)
ligands = models.ManyToManyField(
Ligand,
through='MofLigand',
through_fields=('mof', 'ligand'),
related_name='mofs'
)
date_last_modified = models.DateTimeField(auto_now=True)
date_creation = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('mof.views.details', args=[str(self.id)])
def get_admin_url(self):
return reverse("admin:%s_%s_change" % (self._meta.app_label, self._meta.model_name), args=(self.id,))
@property
def all_ligands(self):
return ', '.join([a.nick for a in self.ligands.all()])
class Meta:
db_table = 'Mof'
verbose_name = "MOF"
verbose_name_plural = "MOFs"
class MofLigand(models.Model):
id = models.AutoField(primary_key=True)
mof = models.ForeignKey(
Mof,
on_delete=models.DO_NOTHING)
ligand = models.ForeignKey(
Ligand,
on_delete=models.DO_NOTHING)
def __str__(self):
return str(self.id)
@property
def mof_name(self):
return self.mof.name
@property
def ligand_name(self):
return self.ligand.name
@property
def ligand_nick(self):
return self.ligand.nick
@property
def ligand_functional_group(self):
return self.ligand.functional_group.name
class Meta:
db_table = 'Mof_Ligand'
class Reaction(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200, blank=True)
notes = models.TextField(blank=True)
category = models.ForeignKey(
ReactionCategory,
on_delete=models.DO_NOTHING,
related_name='reactions',
blank=True, null=True)
catalysts_cc = models.ManyToManyField(
ChemicalCompound,
through='ReactionCatalystCC',
through_fields=('reaction', 'component'),
related_name='reaction_catalysts',
blank=True,
)
catalysts_ligand = models.ManyToManyField(
Ligand,
through='ReactionCatalystLigand',
through_fields=('reaction', 'component'),
related_name='reaction_catalysts',
blank=True,
)
catalysts_mof = models.ManyToManyField(
Mof,
through='ReactionCatalystMof',
through_fields=('reaction', 'component'),
related_name='reaction_catalysts',
blank=True,
)
reactants = models.ManyToManyField(
ChemicalCompound,
through='ReactionReactant',
through_fields=('reaction', 'component'),
related_name='reaction_reactants',
blank=True,
)
products = models.ManyToManyField(
ChemicalCompound,
through='ReactionProduct',
through_fields=('reaction', 'component'),
related_name='reaction_products',
blank=True,
)
date_last_modified = models.DateTimeField(auto_now=True)
date_creation = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('reaction.views.details', args=[str(self.id)])
def get_admin_url(self):
return reverse("admin:%s_%s_change" % (self._meta.app_label, self._meta.model_name), args=(self.id,))
@property
def all_catalysts_cc(self):
return ', '.join([a.nick for a in self.catalysts_cc.all()])
@property
def all_catalysts_ligand(self):
return ', '.join([a.nick for a in self.catalysts_ligand.all()])
@property
def all_catalysts_mof(self):
return ', '.join([a.nick for a in self.catalysts_mof.all()])
@property
def all_reactants(self):
return ', '.join([a.nick for a in self.reactants.all()])
@property
def all_products(self):
return ', '.join([a.nick for a in self.products.all()])
class Meta:
db_table = 'Reaction'
# Attachment without any classification, accepts more or less everything.
# Optional short_description
# dev extra info:
# All models have a one to many with attachments.
# DJango has many to one, but not one to many:
# To model this in Django we create a foreign key from the attachment to the model.
# We create classes that inherits from Attachment.
#(in the backend Django creates a one-to-one relationship between Attachment and AttachmentXXX)
class Attachment(models.Model):
id = models.AutoField(primary_key=True)
file = models.FileField(upload_to="attachment/")
description_short = models.CharField(max_length=200, blank=True,
verbose_name="Short Description")
#position field (used for ordering in grappelli)
position = models.PositiveSmallIntegerField("Position", null=True)
class Meta:
ordering = ['position']
@property
def basename(self):
return os.path.basename(self.file.name)
class AttachmentReaction(Attachment):
reaction = models.ForeignKey('Reaction',
related_name='attachments',
on_delete=models.CASCADE) # Delete the attachment if reaction is deleted.
class AttachmentMof(Attachment):
mof = models.ForeignKey('Mof',
related_name='attachments',
on_delete=models.CASCADE) # Delete the attachment if mof is deleted.
class AttachmentLigand(Attachment):
ligand = models.ForeignKey('Ligand',
related_name='attachments',
on_delete=models.CASCADE) # Delete the attachment if ligand is deleted.
class AttachmentChemicalCompound(Attachment):
chemicalcompound = models.ForeignKey('ChemicalCompound',
related_name='attachments',
on_delete=models.CASCADE) # Delete the attachment if chemicalcompound is deleted.
class ReactionData(models.Model):
id = models.AutoField(primary_key=True)
reaction = models.ForeignKey(
Reaction,
on_delete=models.DO_NOTHING,
blank=True, null=True)
data_type = models.ForeignKey(
DataType,
on_delete=models.DO_NOTHING,
blank=True)
data_file = models.FileField(upload_to="reaction_data/")
def __str__(self):
return str(self.id)
class Meta:
db_table = 'ReactionData'
class ReactionCatalystCC(models.Model):
id = models.AutoField(primary_key=True)
reaction = models.ForeignKey(
Reaction,
on_delete=models.DO_NOTHING)
component = models.ForeignKey(
ChemicalCompound,
on_delete=models.DO_NOTHING)
rate_constant = models.FloatField(blank=True, null=True)
conversion = models.FloatField(blank=True, null=True)
ee = models.FloatField(blank=True, null=True)
de = models.FloatField(blank=True, null=True)
# yield renamed because it was a Python reserved word.
yield_field = models.FloatField(db_column='yield', blank=True, null=True)
amount = models.FloatField(blank=True, null=True)
notes = models.TextField(blank=True)
def __str__(self):
return str(self.id)
@property
def component_type(self):
return ComponentType.CC.label
@property
def reaction_name(self):
return self.reaction.name
@property
def reaction_category(self):
return self.reaction.category
@property
def component_name(self):
return self.component.name
@property
def component_nick(self):
return self.component.nick
@property
def component_functional_group(self):
return self.component.functional_group.name
@property
def component_chirality(self):
return self.component.chirality
@property
def component_url(self):
return self.component.get_absolute_url()
class Meta:
db_table = 'Reaction_Catalyst_CC'
class ReactionCatalystLigand(models.Model):
id = models.AutoField(primary_key=True)
reaction = models.ForeignKey(
Reaction,
on_delete=models.DO_NOTHING)
component = models.ForeignKey(
Ligand,
on_delete=models.DO_NOTHING)
rate_constant = models.FloatField(blank=True, null=True)
conversion = models.FloatField(blank=True, null=True)
ee = models.FloatField(blank=True, null=True)
de = models.FloatField(blank=True, null=True)
# yield renamed because it was a Python reserved word.
yield_field = models.FloatField(db_column='yield', blank=True, null=True)
amount = models.FloatField(blank=True, null=True)
notes = models.TextField(blank=True)
def __str__(self):
return str(self.id)
@property
def component_type(self):
return ComponentType.LIGAND.label
@property
def reaction_name(self):
return self.reaction.name
@property
def reaction_category(self):
return self.reaction.category
@property
def component_name(self):
return self.component.name
@property
def component_nick(self):
return self.component.nick
@property
def component_functional_group(self):
return self.component.functional_group.name
@property
def component_chirality(self):
return self.component.chirality
@property
def component_url(self):
return self.component.get_absolute_url()
class Meta:
db_table = 'Reaction_Catalyst_Ligand'
class ReactionCatalystMof(models.Model):
id = models.AutoField(primary_key=True)
reaction = models.ForeignKey(
Reaction,
on_delete=models.DO_NOTHING)
component = models.ForeignKey(
Mof,
on_delete=models.DO_NOTHING)
rate_constant = models.FloatField(blank=True, null=True)
conversion = models.FloatField(blank=True, null=True)
ee = models.FloatField(blank=True, null=True)
de = models.FloatField(blank=True, null=True)
# yield renamed because it was a Python reserved word.
yield_field = models.FloatField(db_column='yield', blank=True, null=True)
amount = models.FloatField(blank=True, null=True)
notes = models.TextField(blank=True)
def __str__(self):
return str(self.id)
@property
def component_type(self):
return ComponentType.MOF.label
@property
def reaction_name(self):
return self.reaction.name
@property
def reaction_category(self):
return self.reaction.category
@property
def component_name(self):
return self.component.name
@property
def component_nick(self):
return self.component.nick
@property
def component_url(self):
return self.component.get_absolute_url()
class Meta:
db_table = 'Reaction_Catalyst_Mof'
class ReactionProduct(models.Model):
id = models.AutoField(primary_key=True)
reaction = models.ForeignKey(
Reaction,
on_delete=models.DO_NOTHING)
component = models.ForeignKey(
ChemicalCompound,
on_delete=models.DO_NOTHING)
def __str__(self):
return str(self.id)
@property
def component_type(self):
return ComponentType.PRODUCT.label
@property
def reaction_name(self):
return self.reaction.name
@property
def reaction_category(self):
return self.reaction.category.__str__
@property
def component_name(self):
return self.component.name
@property
def component_nick(self):
return self.component.nick
@property
def component_functional_group(self):
return self.component.functional_group.name
@property
def component_chirality(self):
return self.component.chirality
@property
def component_url(self):
return | |
3218 3285 2840 1787 2139 446
227 5216 3873 2626 6310 5574 5121 5740 3783 1516 179 4476
228 5762 8422 8903 9353 8916 6325 1366 7352 8788 7357
229 1217 4034 3655 7181 5806 2452 8341 1144 7609 6938 8510
230 4299 1862 3627 9007 4107 9842 2772 662 1783 728 9082
231 3650 5508 423 8904 3982 3280 5364 3445 6103 408 6010
232 3296 9186 8549 9169 9737 9450 9771 9903 1713 6005 4249 5500 8234 7198
233 2943 197 9513 8045 4319 5715 8387 8686 7830 3387 7868 6111
234 2288 1838 6309 8849 7502 6320 1841 883 6901 2742 6878
235 6886 5287 5931 238 5967 9009 2292 935 87 5595
236 3425 2402 9222 6855 4872 3467 3373 9616 6783
237 3296 8196 5285 1670 7081 7652 3918 630 6265 7359
238 9197 6107 1668 7977 8299 6003 6317 205 5107 2996 585 7547
239 2852 4038 5412 2632 2986 2155 5684 836 3995 189 9076
240 4576 4035 9192 4073 6314 9867 5133 8270 7608 1117 9663
241 3200 5920 3266 324 1286 4103 9192 2794 578 79 6858
242 801 4706 835 3686 137 1452 398 1007 5848 3000 1883 7452
243 2240 34 4963 7748 5340 1451 5292 189 2225 3223 9915 7964 2621
244 5539 4455 296 1230 3119 9486 5939 500 8078 6583 6392 5748
245 9473 7686 4043 6194 8047 5682 7571 6134 9626 2643 6271
246 576 964 3590 7694 3215 5296 52 1496 5370 3375 1501 7887 767
247 5456 3523 7684 3078 8387 301 5326 4621 5296 8147 1683 7512
248 2457 2561 4907 8293 713 3735 7699 9013 1609 1336 1881 1210 799
249 5056 6788 1573 5608 4685 6866 6132 84
250 5935 4902 9511 8203 9103 935 6607 2778 315
251 704 7489 8771 5381 3169 6842 1117 3894 4951 2200 9081 7354 5693
252 4320 5633 2 1319 7434 9070 6031 8851 1814 2519 8047 9980
253 2401 2155 9289 843 9967 3859 5567 5337 890 1115 1087
254 7552 3074 6891 9963 8401 9779 5236 7832 1597 190
255 7619 9096 9545 5968 9123 6614 7898 923 92 538
256 7772 2819 1316 3849 5867 1228 995 726 6171 4316
257 8224 647 649 3018 1612 3791 8720 8435 8630 3416 506
258 4485 7 3877 3389 4638 4953 3577 8559 3293 6238 5343
259 7265 6629 5225 4824 2514 4276 9013 9558 215 8152 2555 287
260 5282 150 6279 8683 6872 502 5016 2426 4061
261 9600 9264 9353 9933 3536 6164 9257 6713 9403 8767
262 453 1576 5803 9804 4049 1620 5908 2681 948 2422 222
263 3536 5572 1226 4467 8685 8368 9715 2040 915 5437 8383
264 5537 71 72 137 9514 3821 3536 8114 7317 1270 9115
265 4417 3300 397 3086 9007 3474 6899 5686 9284 8922 2587 542
266 6209 9762 5446 8967 9833 9387 7213 7348 9437 7070
267 2240 7275 2790 5418 555 4238 5487 7861 2962 590 1718 5048 5499 8703
268 4960 6784 7650 9067 8429 9149 2962 2035 1563 6205 7263
269 2849 6755 1606 935 9256 5002 8685 1038 1979 7186 5815 4282 3995 7069
270 8777 1771 2605 6510 9490 5300 2011 9437
271 8897 8611 3716 2854 2347 5037 9518 1494 2555 9643
272 6371 7652 9127 4199 9644 8974 4786 5524 9630 7802 6814
273 5344 5920 5186 8163 4871 7784 4586 1228 3505 7125 8280 6554
274 139 6838 7403 7218 9552 2354 1493 4214 3485 7231
275 3952 4482 3012 9446 7559 5610 7757 2672 1170 860 650
276 8992 741 9990 7913 5642 9937 1145 6878 7109
277 8994 355 1988 5799 9004 2924 2765 688 658 6389 2749 2622
278 4976 3045 7537 6060 2224 6225 1618 1909 4956 2333
279 8470 1923 1316 4261 4199 9326 2102 6713 1691 8956 9725
280 6785 2820 422 1639 5548 2288 3421 624 3418 2269 414 5631
281 7296 769 2500 8214 1450 1259 6161 5972 3030 9308 1405 5918 7429
282 8576 6945 5795 8964 2725 5830 743 3881 7949 8498 9719 8484 4604 4447
283 960 8097 5347 6054 6249 9675 9038 5426 5749 7448 2379
284 9792 294 2407 6792 5867 7884 242 79 3784 6674 9842 3289 2201 447
285 1248 5307 1517 8121 1906 693 1239 1881 923 1212
286 9504 1984 1032 8745 6603 2608 3592 2643 9620 8884 9246
287 5506 8105 562 2895 3314 5139 5718 3292 1949 1023
288 1700 4362 1131 844 5885 1617 1973 1755 1405 6846 5471
289 1155 5254 1574 79 2674 333 4082 9139 916 5563 6205 4862
290 7456 9250 9766 9096 6444 2861 2772 3830 5880 9756
291 8450 3205 7785 6570 5869 1390 15 6258 7827 760 7706 8143 5509
292 4899 644 3399 3210 1292 9453 5647 6096 4131 5274 9084
293 9987 6374 2007 509 8432 5715 214 1271 5658 9979 3514 7230
294 9537 9099 8454 321 2665 2379 5806 9795 5652 1880
295 9635 7560 4555 847 4016 4882 6211 564 4438 5367 7416 5241 7709
296 9968 8642 9288 4425 7762 2703 9232 7880 7506 2582 9341
297 2666 6794 9533 6097 917 8950 6583 6613 5853 5598 6751
298 7949 773 1158 5543 1833 3276 4461 526 5773 8430 7386 7292 2495
299 224 3555 326 2442 8749 8270 7951 5507 2484 4985 4443 8490
300 7873 6723 7652 7791 6766 8527 5009 5941 8088 9947 6974
301 2980 1174 3494 263 9288 2825 6668 6897 979 470 3225
302 1691 2436 1415 6920 1610 2215 7213 1903 6738 2262 8047 1693
303 6274 2435 2980 8406 4490 9931 6674 5619 8405 886 5572 3133
304 8130 518 5959 5996 6703 8624 9585 6931 8532 9141 3220 4154 7070 9013
305 1371 135 3304 653 3600 9567 4826 3387 3711
306 5062 5510 2089 3436 3603 7282 5011 1400 6426 6778
307 2240 2658 7307 133 6439 9927 6547 4578 5297 1075 7271
308 6501 7127 4750 9810 9011 6420 7543 4020 5822
309 1184 577 3079 1513 3212 6605 8812 144 1268 9273 1599
310 6082 8291 7492 134 4655 2576 8241 9658 1883
311 962 6889 2986 2018 4910 6562 5425 3316 3353 6527 5567
312 2433 4386 1347 7713 348 2507 6540 321 4090 411 1468
313 547 2377 7498 4717 2986 5008 323 117 375 1053 8255
314 3264 8224 4901 3910 1288 492 4808 2962 2067 3351 2456
315 5281 9156 5676 6703 9137 1496 8854 2296 4476 7453
316 641 3587 4630 5921 1993 4938 2347 9842 5589 4182 1115
317 8065 7652 5191 4812 2159 7916 8847 1140 4568 3387 5503
318 2500 1671 6938 583 9463 8504 7866 8895 1146 9631
319 5186 7659 7196 2058 3211 6574 6703 6537 1754 79 5532 4186
320 9216 5829 8774 5291 8781 2448 2962 8147 6613 2263 7096 6233
321 4961 6116 5541 7047 170 2987 4237 48 4531 5914 1183
322 5537 5219 1703 520 6380 5484 3186 7413 3671 1368 8764 29
323 1803 7942 2632 4555 7982 2704 6929 8914 7902 3894 7614
324 4640 7652 4837 3559 4425 7274 7403 8716 7344 6521 2821
325 864 716 1006 7215 2832 2961 1433 9082 5340 4382
326 8577 6950 8622 9967 2130 4243 6165 4182 5271 7413
327 225 1602 422 2567 6791 2962 5619 1752 8346 2075 1340
328 9154 4326 7526 4554 2316 9041 3799 888 3417 3131
329 3936 5057 674 9579 8005 5382 9979 5195 2029 2319 4981 87 4955
330 7776 1825 7398 1452 3594 8044 7345 979 9118 7294
331 3202 2118 9514 5740 2 3440 3059 9783 1914 7466
332 4901 625 9978 561 1459 473 9082 315 7002
333 3428 6023 1193 5265 626 4660 1333 2390 6295 5045 1045 4765 9941
334 7106 5507 2095 364 2159 8818 918 4440 5529 187
335 6786 1347 7094 5915 5739 2962 144 1490 2838 91 5821
336 8481 8677 8872 9610 6411 7533 9864 9106 8148 6262 3836
337 5603 4100 2949 7657 5677 7086 2068 9912 93 9118 5503
338 7522 7110 8237 6635 7500 5805 8079 3953 1145 3415 7609 4858
339 5378 2503 1002 1803 4430 93 6258 4887 79 3658 7902
340 2658 294 3174 711 3145 1005 5103 2288 4242 3316 219 4383
341 384 2240 7876 6726 9096 9195 8307 | |
<reponame>WDZRMPCBIT/SCoRE
from collections import defaultdict
# We use "Number" in a bunch of places throughout to try to generalize ints and floats.
# Unfortunately, mypy doesn't like this very much, so we have to "type: ignore" a bunch of things.
# But it makes for a nicer induced grammar, so it's worth it.
from numbers import Number
from typing import Dict, List, NamedTuple, Set, Tuple
import logging
import re
from allennlp.semparse.domain_languages.domain_language import (DomainLanguage, ExecutionError,
PredicateType, predicate)
from allennlp.semparse.contexts.table_question_knowledge_graph import MONTH_NUMBERS
from allennlp.semparse.contexts import TableQuestionContext
from ..context.table_question_context import Date
from ..evaluator import target_values_map, check_denotation, to_value_list, tsv_unescape_list
logger = logging.getLogger("root") # pylint: disable=invalid-name
class Row(NamedTuple):
# Maps column names to cell values
values: Dict[str, str]
class Column(NamedTuple):
name: str
class StringColumn(Column):
pass
class ComparableColumn(Column):
pass
class DateColumn(ComparableColumn):
pass
class NumberColumn(ComparableColumn):
pass
class WikiTableAbstractLanguage(DomainLanguage):
# pylint: disable=too-many-public-methods,no-self-use
"""
Derive from wikitable language, but support extract sketches
"""
def __init__(self, table_context: TableQuestionContext) -> None:
super().__init__(start_types={Number, Date, List[str]})
self.table_context = table_context
self.table_data = [Row(row) for row in table_context.table_data]
# if the last colum is total, remove it
_name = f"string_column:{table_context.column_index_to_name[0]}"
if _name in table_context.table_data[-1] and "total" in table_context.table_data[-1][_name]:
self.table_data.pop()
column_types = table_context.column_types
if "string" in column_types:
self.add_predicate('filter_in', self.filter_in)
self.add_predicate('filter_not_in', self.filter_not_in)
if "date" in column_types:
self.add_predicate('filter_date_greater', self.filter_date_greater)
self.add_predicate('filter_date_greater_equals', self.filter_date_greater_equals)
self.add_predicate('filter_date_lesser', self.filter_date_lesser)
self.add_predicate('filter_date_lesser_equals', self.filter_date_lesser_equals)
self.add_predicate('filter_date_equals', self.filter_date_equals)
self.add_predicate('filter_date_not_equals', self.filter_date_not_equals)
if "number" in column_types or "num2" in column_types:
self.add_predicate('filter_number_greater', self.filter_number_greater)
self.add_predicate('filter_number_greater_equals', self.filter_number_greater_equals)
self.add_predicate('filter_number_lesser', self.filter_number_lesser)
self.add_predicate('filter_number_lesser_equals', self.filter_number_lesser_equals)
self.add_predicate('filter_number_equals', self.filter_number_equals)
self.add_predicate('filter_number_not_equals', self.filter_number_not_equals)
self.add_predicate('max', self.max)
self.add_predicate('min', self.min)
self.add_predicate('average', self.average)
self.add_predicate('sum', self.sum)
self.add_predicate('diff', self.diff)
if "date" in column_types or "number" in column_types or "num2" in column_types:
self.add_predicate('argmax', self.argmax)
self.add_predicate('argmin', self.argmin)
# Adding entities and numbers seen in questions as constants.
for entity in table_context._entity2id:
self.add_constant(entity, entity)
for number in table_context._num2id:
self.add_constant(str(number), float(number), type_=Number)
for date_str in table_context._date2id:
date_obj = Date.make_date(date_str)
self.add_constant(date_str, date_obj, type_=Date)
self.table_graph = table_context.get_table_knowledge_graph()
# Adding column names as constants. Each column gets added once for every
# type in the hierarchy going from its concrete class to the base Column. String columns
# get added as StringColumn and Column, and date and number columns get added as DateColumn
# (or NumberColumn), ComparableColumn, and Column.
for column_name, column_types in table_context.column2types.items():
for column_type in column_types:
typed_column_name = f"{column_type}_column:{column_name}"
column: Column = None
if column_type == 'string':
column = StringColumn(typed_column_name)
elif column_type == 'date':
column = DateColumn(typed_column_name)
self.add_constant(typed_column_name, column, type_=ComparableColumn)
elif column_type in ['number', 'num2']:
column = NumberColumn(typed_column_name)
self.add_constant(typed_column_name, column, type_=ComparableColumn)
self.add_constant(typed_column_name, column, type_=Column)
self.add_constant(typed_column_name, column)
column_type_name = str(PredicateType.get_type(type(column)))
def get_nonterminal_productions(self) -> Dict[str, List[str]]:
"""
grammar induction may fail in cases where certain non-terminals are not present
but we still need these nonterminals during search
TODO: this function includes some hotfixes
"""
production_dict = super(WikiTableAbstractLanguage, self).get_nonterminal_productions()
# TODO: hotfix add str entry if is missing
if "str" not in production_dict:
production_dict["str"] = []
if "Date" not in production_dict:
production_dict["Date"] = []
if "Number" not in production_dict:
production_dict["Number"] = []
_a = "List[str] -> [<Row,Column:List[str]>, Row, Column]"
if _a not in production_dict["List[str]"]:
production_dict["List[str]"].append(_a)
_b = "<Row,Column:List[str]> -> select"
if "<Row,Column:List[str]>" not in production_dict:
production_dict["<Row,Column:List[str]>"] = [_b]
return production_dict
@staticmethod
def _get_sketch_productions(actions: Dict) -> Dict:
"""
v1: sketch operations do not include filter, date; columns are replaced with placeholder
v2: sketch only replace columns, entities like numbers, date function
"""
new_action_dic = defaultdict(list)
pruned = []
for non_terminal, productions in actions.items():
if non_terminal in ["Column", "StringColumn", "NumberColumn",
"DateColumn", "ComparableColumn", "Date", "str"]:
place_holder_prod = f"{non_terminal} -> #PH#"
new_action_dic[non_terminal].append(place_holder_prod)
elif non_terminal in ["Number"]:
new_prod_list = []
for prod in actions[non_terminal]:
_, rhs = prod.split(" -> ")
try: # if this is a terminal
float(rhs)
place_holder_prod = f"{non_terminal} -> #PH#"
new_prod_list.append(place_holder_prod)
except:
new_prod_list.append(prod)
new_action_dic[non_terminal] = new_prod_list
elif non_terminal == "List[Row]":
new_prod_list = []
place_holder_prod = f"{non_terminal} -> #PH#"
new_prod_list.append(place_holder_prod)
# same as
# new_prod_list.append("List[Row] -> [<Row,Column:List[Row]>, Row, Column]")
new_action_dic[non_terminal] = new_prod_list
else:
new_action_dic[non_terminal] = actions[non_terminal]
return new_action_dic
@staticmethod
def _get_slot_productions(actions: Dict) -> Dict:
"""
filling slots of sketches
"""
new_action_dic = defaultdict(list)
pruned = []
for non_terminal, productions in actions.items():
if non_terminal in ["Column", "StringColumn", "NumberColumn",
"DateColumn", "ComparableColumn", "str", "Date"]:
new_action_dic[non_terminal] = actions[non_terminal]
elif non_terminal in ["Number"]:
new_prod_list = []
for prod in actions[non_terminal]:
_, rhs = prod.split(" -> ")
try: # if this is a terminal
float(rhs)
new_prod_list.append(prod)
except:
pass
new_action_dic[non_terminal] = new_prod_list
elif non_terminal == "List[Row]":
new_action_dic[non_terminal] = actions[non_terminal][:]
# same as
# new_action_dic[non_terminal].remove('List[Row] -> [<Row,Column:List[Row]>, Row, Column]')
for prod in new_action_dic[non_terminal]:
_, rhs = prod.split(" -> ")
_t = rhs[1:-1].split(", ")[0]
if _t in actions:
new_action_dic[_t] = actions[_t][:]
return new_action_dic
@staticmethod
def get_slot_dict(actions: List) -> Dict:
"""
Slot_dict: action_index to its type
"""
slot_dict = dict()
for action_ind, action in enumerate(actions):
lhs, rhs = action.split(" -> ")
if lhs in ["Column", "StringColumn", "NumberColumn", "ComparableColumn",
"DateColumn", "str", "Number", "Date"] and rhs == "#PH#":
slot_dict[action_ind] = lhs
elif lhs == "List[Row]" and rhs == "#PH#":
slot_dict[action_ind] = lhs
return slot_dict
def evaluate_logical_form(self,
logical_form: str,
target_value: List[str],
target_canon: List[str]) -> bool:
"""
Taken from Chen's script
"""
target_value_strings = tsv_unescape_list(target_value)
normalized_target_value_strings = [ TableQuestionContext.normalize_string(value)
for value in target_value_strings]
canon_value_strings = tsv_unescape_list(target_canon)
target_value_list = to_value_list(normalized_target_value_strings, canon_value_strings)
try:
denotation = self.execute(logical_form)
except ExecutionError:
logger.warning(f'Failed to execute: {logical_form}')
return False
except Exception as ex:
err_template = "Exception of type {0} occurred. Arguments:\n{1!r}"
message = err_template.format(type(ex).__name__, ex.args)
logger.warning(f'{message}')
if isinstance(denotation, list):
denotation_list = [str(denotation_item) for denotation_item in denotation]
else:
denotation_list = [str(denotation)]
denotation_value_list = to_value_list(denotation_list)
return check_denotation(target_value_list, denotation_value_list)
# Things below here are language predicates, until you get to private methods. We start with
# general predicates that are always included in the language, then move to
# column-type-specific predicates, which only get added if we see columns of particular types
# in the table.
@predicate
def all_rows(self) -> List[Row]:
return self.table_data
@predicate
def select(self, rows: List[Row], column: Column) -> List[str]:
"""
Select function takes a list of rows and a column and returns a list of cell values as
strings.
"""
if isinstance(rows, list):
return [row.values[column.name] for row in rows]
else:
# could also be a row, adhoc added
return rows.values[column.name]
# remove it for now, same as returns a list of rows;
# but most of the time, it's not helping
# @predicate
def same_as(self, src_column: StringColumn, filter_value: str, column: Column) -> List[Row]:
"""
Takes a row and a column and returns a list of rows from the full set of rows that contain
the same value under the given column as the given row.
"""
rows = self.filter_in(self.all_rows(), src_column, filter_value)
if len(rows) == 0:
raise ExecutionError("same as gets zero rows")
row = rows[0]
cell_value = row.values[column.name]
return_list = []
for table_row in self.table_data:
if table_row.values[column.name] == cell_value:
return_list.append(table_row)
return return_list
# @predicate
# remove it for now, use corenlp date annotations
def date(self, year: Number, month: Number, day: Number) -> Date:
"""
Takes three numbers and returns a ``Date`` object whose year, month, and day are the three
numbers in that order.
"""
return Date(year, month, day) # type: ignore
@predicate
def first(self, rows: List[Row]) -> Row:
"""
Takes an expression that evaluates to a list of rows, and returns the first one in that
list.
"""
if not rows:
# logger.warning("Trying to get first row from an empty list")
raise ExecutionError("first gets no rows")
return rows[0]
@predicate
def last(self, rows: List[Row]) -> Row:
"""
Takes an expression that evaluates to a list of rows, and returns the last one in that
list.
"""
if not rows:
# logger.warning("Trying to get first row from an empty list")
raise ExecutionError("last gets no rows")
elif len(rows) == 1:
raise ExecutionError("use first instead!")
return rows[-1]
@predicate
def previous(self, row: Row) -> Row:
"""
Takes an expression that evaluates to a single row, and returns the row that occurs before
the input row in the original set of rows. If the input row happens to be the top row, we
will return an empty list.
"""
if not row:
raise | |
<gh_stars>10-100
# The MIT License (MIT)
#
# Copyright (C) 2016 - <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from . import event as core_event
from functools import partial
import babeltrace as bt
import enum
class InvalidPeriodDefinition(Exception):
pass
# period definition registry, owner of the whole tree of periods
class PeriodDefinitionRegistry:
def __init__(self):
self._root_period_defs = set()
self._named_period_defs = {}
# name to hierarchy
self._full_period_path = {}
def period_full_path(self, name):
return self._full_period_path[name]
def has_period_def(self, name):
return name in self._named_period_defs
def add_full_period_path(self, period_name, parent_name):
period_path = [period_name]
period_path_str = ""
if parent_name is None:
self._full_period_path[period_name] = period_name
return
parent = self.get_period_def(parent_name)
while parent is not None:
period_path.append(parent.name)
parent = parent.parent
period_path.reverse()
for i in period_path:
if len(period_path_str) == 0:
period_path_str = i
else:
period_path_str = "%s/%s" % (period_path_str, i)
self._full_period_path[period_name] = period_path_str
def add_period_def(self, parent_name, period_name, begin_expr, end_expr,
begin_captures_exprs, end_captures_exprs):
# validate unique period name (if named)
if self.has_period_def(period_name):
raise InvalidPeriodDefinition('Cannot redefine period "{}"'.format(
period_name))
# validate that parent exists if it's set
if parent_name is not None and not self.has_period_def(parent_name):
fmt = 'Cannot find parent period named "{}" (as parent of ' \
'period "{}")'
msg = fmt.format(parent_name, period_name)
raise InvalidPeriodDefinition(msg)
# create period, and associate parent and children
parent = None
if parent_name is not None:
parent = self.get_period_def(parent_name)
period_def = PeriodDefinition(parent, period_name, begin_expr,
end_expr, begin_captures_exprs,
end_captures_exprs)
if parent is not None:
parent.children.add(period_def)
# validate new period definition
PeriodDefinitionValidator(period_def)
if period_def.parent is None:
self._root_period_defs.add(period_def)
if period_def.name is not None:
self._named_period_defs[period_def.name] = period_def
self.add_full_period_path(period_name, parent_name)
def get_period_def(self, name):
return self._named_period_defs.get(name)
@property
def root_period_defs(self):
for period_def in self._root_period_defs:
yield period_def
@property
def named_period_defs(self):
return self._named_period_defs
@property
def is_empty(self):
return len(self._root_period_defs) == 0 and \
len(self._named_period_defs) == 0
# definition of a period
class PeriodDefinition:
def __init__(self, parent, name, begin_expr, end_expr,
begin_captures_exprs, end_captures_exprs):
self._parent = parent
self._children = set()
self._name = name
self._begin_expr = begin_expr
self._end_expr = end_expr
self._begin_captures_exprs = begin_captures_exprs
self._end_captures_exprs = end_captures_exprs
@property
def name(self):
return self._name
@property
def parent(self):
return self._parent
@property
def begin_expr(self):
return self._begin_expr
@property
def end_expr(self):
return self._end_expr
@property
def begin_captures_exprs(self):
return self._begin_captures_exprs
@property
def end_captures_exprs(self):
return self._end_captures_exprs
@property
def children(self):
return self._children
class _Expression:
pass
class _BinaryExpression(_Expression):
def __init__(self, lh_expr, rh_expr):
self._lh_expr = lh_expr
self._rh_expr = rh_expr
@property
def lh_expr(self):
return self._lh_expr
@property
def rh_expr(self):
return self._rh_expr
class _UnaryExpression(_Expression):
def __init__(self, expr):
self._expr = expr
@property
def expr(self):
return self._expr
class LogicalNot(_UnaryExpression):
def __repr__(self):
return '!({})'.format(self.expr)
class LogicalAnd(_BinaryExpression):
def __repr__(self):
return '({} && {})'.format(self.lh_expr, self.rh_expr)
class LogicalOr(_BinaryExpression):
def __repr__(self):
return '({} || {})'.format(self.lh_expr, self.rh_expr)
class GlobEq(_BinaryExpression):
def __init__(self, lh_expr, rh_expr):
super().__init__(lh_expr, rh_expr)
self._compile()
def _compile(self):
import fnmatch
import re
pattern = self.rh_expr.value
regex = fnmatch.translate(pattern)
self._regex = re.compile(regex)
@property
def regex(self):
return self._regex
def __repr__(self):
return '({} =* {})'.format(self.lh_expr, self.rh_expr)
class Eq(_BinaryExpression):
def __repr__(self):
return '({} == {})'.format(self.lh_expr, self.rh_expr)
class Lt(_BinaryExpression):
def __repr__(self):
return '({} < {})'.format(self.lh_expr, self.rh_expr)
class LtEq(_BinaryExpression):
def __repr__(self):
return '({} <= {})'.format(self.lh_expr, self.rh_expr)
class Gt(_BinaryExpression):
def __repr__(self):
return '({} > {})'.format(self.lh_expr, self.rh_expr)
class GtEq(_BinaryExpression):
def __repr__(self):
return '({} >= {})'.format(self.lh_expr, self.rh_expr)
class Number(_Expression):
def __init__(self, value):
self._value = value
@property
def value(self):
return self._value
def __repr__(self):
return '{}'.format(self.value)
class String(_Expression):
def __init__(self, value):
self._value = value
@property
def value(self):
return self._value
def __repr__(self):
return '"{}"'.format(self.value)
@enum.unique
class DynScope(enum.Enum):
AUTO = 'auto'
TPH = '$pkt_header'
SPC = '$pkt_ctx'
SEH = '$header'
SEC = '$stream_ctx'
EC = '$ctx'
EP = '$payload'
class _SingleChildNode(_Expression):
def __init__(self, child):
self._child = child
@property
def child(self):
return self._child
class ParentScope(_SingleChildNode):
def __repr__(self):
return '$parent.{}'.format(self.child)
class BeginScope(_SingleChildNode):
def __repr__(self):
return '$begin.{}'.format(self.child)
class EventScope(_SingleChildNode):
def __repr__(self):
return '$evt.{}'.format(self.child)
class DynamicScope(_SingleChildNode):
def __init__(self, dyn_scope, child):
super().__init__(child)
self._dyn_scope = dyn_scope
@property
def dyn_scope(self):
return self._dyn_scope
def __repr__(self):
if self._dyn_scope == DynScope.AUTO:
return repr(self.child)
return '{}.{}'.format(self.dyn_scope.value, self.child)
class EventFieldName(_Expression):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
def __repr__(self):
return self._name
class EventName(_Expression):
def __repr__(self):
return '$name'
class IllegalExpression(Exception):
pass
class PeriodDefinitionValidator:
def __init__(self, period_def):
self._period_def = period_def
self._validate_expr_cbs = {
LogicalNot: self._validate_unary_expr,
LogicalAnd: self._validate_binary_expr,
LogicalOr: self._validate_binary_expr,
GlobEq: self._validate_comp,
Eq: self._validate_comp,
Lt: self._validate_comp,
LtEq: self._validate_comp,
Gt: self._validate_comp,
GtEq: self._validate_comp,
ParentScope: self._validate_parent_scope,
}
self._validate_expr(period_def.begin_expr)
self._validate_expr(period_def.end_expr)
def _validate_unary_expr(self, not_expr):
self._validate_expr(not_expr.expr)
def _validate_binary_expr(self, and_expr):
self._validate_expr(and_expr.lh_expr)
self._validate_expr(and_expr.rh_expr)
def _validate_parent_scope(self, scope):
if self._period_def.parent is None:
raise IllegalExpression('Cannot refer to parent context without '
'a named parent period')
if type(scope.child) is not BeginScope:
raise IllegalExpression('Must refer to the begin context in a '
'parent context')
self._validate_expr(scope.child)
def _validate_comp(self, comp_expr):
self._validate_expr(comp_expr.lh_expr)
self._validate_expr(comp_expr.rh_expr)
def _validate_expr(self, expr):
if type(expr) in self._validate_expr_cbs:
self._validate_expr_cbs[type(expr)](expr)
class _MatchContext:
def __init__(self, evt, begin_evt, parent_begin_evt):
self._evt = evt
self._begin_evt = begin_evt
self._parent_begin_evt = parent_begin_evt
@property
def evt(self):
return self._evt
@property
def begin_evt(self):
return self._begin_evt
@property
def parent_begin_evt(self):
return self._parent_begin_evt
_DYN_SCOPE_TO_BT_CTF_SCOPE = {
DynScope.TPH: bt.CTFScope.TRACE_PACKET_HEADER,
DynScope.SPC: bt.CTFScope.STREAM_PACKET_CONTEXT,
DynScope.SEH: bt.CTFScope.STREAM_EVENT_HEADER,
DynScope.SEC: bt.CTFScope.STREAM_EVENT_CONTEXT,
DynScope.EC: bt.CTFScope.EVENT_CONTEXT,
DynScope.EP: bt.CTFScope.EVENT_FIELDS,
}
def _resolve_event_expr(event, expr):
# event not found
if event is None:
return
# event name
if type(expr.child) is EventName:
return event.name
# default, automatic dynamic scope
dyn_scope = DynScope.AUTO
if type(expr.child) is DynamicScope:
# select specific dynamic scope
expr = expr.child
dyn_scope = expr.dyn_scope
if type(expr.child) is EventFieldName:
expr = expr.child
if dyn_scope == DynScope.AUTO:
# automatic dynamic scope
if expr.name in event:
return event[expr.name]
# event field not found
return
# specific dynamic scope
bt_ctf_scope = _DYN_SCOPE_TO_BT_CTF_SCOPE[dyn_scope]
return event.field_with_scope(expr.name, bt_ctf_scope)
assert(False)
# This exquisite function takes an expression and resolves it to
# an actual value (Python's number/string) considering the current
# matching context.
def _resolve_expr(expr, match_context):
if type(expr) is ParentScope:
begin_scope = expr.child
event_scope = begin_scope.child
return _resolve_event_expr(match_context.parent_begin_evt, event_scope)
if type(expr) is BeginScope:
# event in the begin context
event_scope = expr.child
return _resolve_event_expr(match_context.begin_evt, event_scope)
if type(expr) is EventScope:
# current event
return _resolve_event_expr(match_context.evt, expr)
if type(expr) is Number:
return expr.value
if type(expr) is String:
return expr.value
assert(False)
class _Matcher:
def __init__(self, expr, match_context):
self._match_context = match_context
self._expr_matchers = {
LogicalAnd: self._and_expr_matches,
LogicalOr: self._or_expr_matches,
LogicalNot: self._not_expr_matches,
GlobEq: self._glob_eq_expr_matches,
Eq: partial(self._comp_expr_matches, lambda lh, rh: lh == rh),
Lt: partial(self._comp_expr_matches, lambda lh, rh: lh < rh),
LtEq: partial(self._comp_expr_matches, lambda lh, rh: lh <= rh),
Gt: partial(self._comp_expr_matches, lambda lh, rh: lh > rh),
GtEq: partial(self._comp_expr_matches, lambda lh, rh: lh >= rh),
}
self._matches = self._expr_matches(expr)
def _and_expr_matches(self, expr):
return (self._expr_matches(expr.lh_expr) and
self._expr_matches(expr.rh_expr))
def _or_expr_matches(self, expr):
return (self._expr_matches(expr.lh_expr) or
self._expr_matches(expr.rh_expr))
def _not_expr_matches(self, expr):
return not self._expr_matches(expr.expr)
def _glob_eq_expr_matches(self, expr):
def compfn(lh, rh):
return bool(expr.regex.match(lh))
return self._comp_expr_matches(compfn, expr)
def _comp_expr_matches(self, compfn, expr):
lh_value = _resolve_expr(expr.lh_expr, self._match_context)
rh_value = _resolve_expr(expr.rh_expr, self._match_context)
# make sure both sides are found
if lh_value is None or rh_value is None:
return False
# cast RHS to int if LHS is an int
if type(lh_value) is int and type(rh_value) is float:
rh_value = int(rh_value)
# compare types first
if type(lh_value) is not type(rh_value):
return False
# compare field to a literal value
return compfn(lh_value, rh_value)
def _expr_matches(self, expr):
return self._expr_matchers[type(expr)](expr)
@property
def matches(self):
return self._matches
def _expr_matches(expr, match_context):
return _Matcher(expr, match_context).matches
def create_conjunction_from_exprs(exprs):
if len(exprs) == 0:
return
cur_expr = exprs[0]
for expr in exprs[1:]:
cur_expr = LogicalAnd(cur_expr, expr)
return cur_expr
def create_disjunction_from_exprs(exprs):
if len(exprs) == 0:
return
cur_expr = exprs[0]
for expr in exprs[1:]:
cur_expr = LogicalOr(cur_expr, expr)
return cur_expr
@enum.unique
class PeriodEngineCallbackType(enum.Enum):
PERIOD_BEGIN = 1
PERIOD_END = 2
class Period:
def __init__(self, definition, parent, begin_evt, begin_captures):
begin_evt_copy = core_event.Event(begin_evt)
self._begin_evt = begin_evt_copy
| |
isinstance(val, str)) and hasattr(val, "__iter__"):
raise TypeError("{0}: y_test must be a 1d iterable"
"".format(fname_))
else:
raise TypeError("{0}: y_test should be a pandas DataFrame with one "
"column or a 1d iterable (ndarray, etc.)"
"".format(fname_))
# get class labels from the .classes_ property of the classifier and number
# of classes by taking the length of clabs. will be used throughout the
# rest of the function, and are also needed for color map assignment.
clabs = mce.classes_
nclasses = len(clabs)
# check color maps; if "auto", check if nclasses > 2, and assign colors or
# color maps. else check that length == 3 and that elements are str.
if cmaps == "auto":
if nclasses == 2: cmaps = _mcs_cmaps_binary
else: cmaps = _mcs_cmaps_multi
elif hasattr(cmaps, "__iter__") and (not isinstance(cmaps, str)):
pass
# else raise TypeError
else:
raise TypeError("{0}: cmaps must either be \"auto\" or (str, str, str)"
" of valid colors or color maps".format(fname_))
# check values in cc; both must be in [0, 1) and be floats.
_cc_msg = "{0}: cc must be (float, float)".format(fname_)
if (hasattr(cc, "__iter__") == False) or isinstance(cc, str):
raise TypeError(_cc_msg)
if len(cc) != 2: raise ValueError(_cc_msg)
for _c in cc:
if (not isinstance(_c, float)) and (not isinstance(_c, int)):
raise TypeError(_cc_msg)
# if norm_true is True, set to "true", else if False, set to None. if not
# boolean, raise a TypeError to the user
if isinstance(norm_true, bool):
if norm_true == True: norm_true = "true"
else: norm_true = None
else: raise TypeError("{0}: error: norm_true must be bool".format(fname_))
# dictionary of statistics
stats_dict = {}
# compute confusion matrix and put in stats_dict
cmat = confusion_matrix(y_test, mce.predict(X_test), normalize = norm_true)
stats_dict["confusion_matrix"] = cmat
# compute misclassification rates
mc_rates = [None for _ in range(nclasses)]
for i in range(nclasses):
# misclassification rate is 1 - correct / sum of all
mc_rates[i] = 1 - cmat[i][i] / sum(cmat[i])
# add entry in stats_dict
stats_dict["mc_rates"] = mc_rates
# add number of classes to stats_dict
stats_dict["n_classes"] = nclasses
# predict values from X_test and get accuracy to add to stats_dict
y_test_pred = mce.predict(X_test)
stats_dict["accuracy"] = mce.score(X_test, y_test)
# compute precision and add to stats_dict; if nclasses > 2, then the entry
# for precision is an array of nclasses labels (for one vs. rest precision).
# "macro_precision" will be the macro average (average of all individual
# precision statistics) and "micro_precision" will be the micro average
# (total tp / total tp + total fp). in the two-class case, the
# "micro_precision" and "macro_precision" keys will be None, and "precision"
# will only be a single scalar value returned by precision_score.
if nclasses == 2:
stats_dict["precision"] = precision_score(y_test, y_test_pred)
stats_dict["macro_precision"] = None
stats_dict["micro_precision"] = None
else:
stats_dict["precision"] = precision_score(y_test, y_test_pred,
average = None)
stats_dict["macro_precision"] = precision_score(y_test, y_test_pred,
average = "macro")
stats_dict["micro_precision"] = precision_score(y_test, y_test_pred,
average = "micro")
# compute ROC AUC and add to stats dict. in the multiclass case, "auc" will
# be a list of nclasses labels (computed in one vs. rest fashion), while
# "macro_auc" will have macro average of all AUC scores. we need to binarize
# our multiclass predictions so we change shape from (N, 1) to (N, nclasses)
y_test_bins, y_test_pred_bins = None, None
if nclasses == 2:
stats_dict["auc"] = roc_auc_score(y_test, y_test_pred)
stats_dict["macro_auc"] = None
else:
# wrap y_test as a DataFrame and call get_dummies to one-hot encode for
# the multiclass case. first need to treat all the entries as a string
# or else get_dummies will not binarize. column names are "class_k" for
# each label k in our multiclass problem.
# if y_test is a DataFrame, use iloc to index
if isinstance(y_test, DataFrame):
y_test_bins = get_dummies(DataFrame(map(str, y_test.iloc[:, 0]),
columns = ["class"]))
# else just apply map and wrap in DataFrame
else:
y_test_bins = get_dummies(DataFrame(map(str, y_test),
columns = ["class"]))
# do the same for y_pred_test, which is a 1d iterable
y_test_pred_bins = get_dummies(DataFrame(map(str, y_test_pred),
columns = ["class"]))
# replace "class_" in each of the columns with empty string
y_test_bins.columns = tuple(map(lambda x: x.replace("class_", ""),
list(y_test_bins.columns)))
y_test_pred_bins.columns = tuple(map(lambda x: x.replace("class_", ""),
list(y_test_pred_bins.columns)))
# calculate one vs. rest AUC scores
stats_dict["auc"] = roc_auc_score(y_test_bins, y_test_pred_bins,
multi_class = "ovr", average = None)
# calculate macro average or AUC scores (average = "macro")
stats_dict["macro_auc"] = roc_auc_score(y_test_bins, y_test_pred_bins,
multi_class = "ovr")
# compute recall and add to stats dict. in the multiclass case, the "recall"
# key will hold an array of one vs. rest recall scores, while it will be a
# single float in the binary classification case. "macro_recall" and
# "micro_recall" will give macro and micro recall scores, and will be None
# in the binary classification case.
if nclasses == 2:
stats_dict["recall"] = recall_score(y_test, y_test_pred)
stats_dict["macro_recall"] = None
stats_dict["micro_recall"] = None
else:
stats_dict["recall"] = recall_score(y_test, y_test_pred, average = None)
stats_dict["macro_recall"] = recall_score(y_test, y_test_pred,
average = "macro")
stats_dict["micro_recall"] = recall_score(y_test, y_test_pred,
average = "micro")
# compute true and false positive rates for the ROC curve. if nclasses > 2,
# then fpr and tpr will be 2d arrays, where each row i contains the fpr or
# tpr for the one vs. all ROC curve for class label i.
fpr, tpr = None, None
if nclasses == 2: fpr, tpr, _ = roc_curve(y_test, y_test_pred)
else:
# set up fpr and tpr as being length nclasses
fpr = [None for _ in range(nclasses)]
tpr = [None for _ in range(nclasses)]
# for each of the labels in clabs corresponding to a column in
# y_test_bins and y_test_pred_bins, compute one vs. all fpr and tpr. we
# do not need to specify positive label since y_test_bins and
# y_test_pred_bins are both indicator matrices.
for i in range(nclasses):
fpr[i], tpr[i], _ = roc_curve(y_test_bins.iloc[:, i],
y_test_pred_bins.iloc[:, i])
# compute precision-recall curves. if nclasses > 2, then prr and rcr
# (precision and recall rates respectively) will be 2d arrays, where each
# row i contains the prr or rcr for the ovr precision-recall curve for i.
prr, rcr = None, None
if nclasses == 2: prr, rcr, _ = precision_recall_curve(y_test, y_test_pred)
else:
# set up prr and rcr as being length nclasses
prr = [None for _ in range(nclasses)]
rcr = [None for _ in range(nclasses)]
# for each label in clabs corresponding to a column in y_test_bins and
# y_test_pred_bins, compute one vs. all prr and rcr.
for i in range(nclasses):
prr[i], rcr[i], _ = \
precision_recall_curve(y_test_bins.iloc[:, i],
y_test_pred_bins.iloc[:, i])
### figure setup ###
# if figsize is "auto" (default), determine plot size based on whether the
# problem is a binary classification problem or a multiclass one.
if figsize == "auto":
if nclasses == 2: figsize = _mcs_figsize_binary
else: figsize = _mcs_figsize_multi
# else figsize is user specified; generate subplots with specified style
with axes_style(style = style):
fig, axs = subplots(nrows = 1, ncols = 3, figsize = figsize)
# flatten the axes (for ease of iterating through them)
axs = ravel(axs)
# forces all plot areas to be square (finally!)
for ax in axs: ax.axis("square")
# set best option
best_ = ""
if best_model is True: best_ = "best "
# set model name; if auto, set to object name
if model_name == "auto": model_name = str(mce).split("(")[0]
# set overall figure title so that the size of the plots is not affected by
# length of the model name, as there is much less space above each plot
# compared to the whole figure. we use plural curves when nclasses > 2.
# note that the suptitle method returns the corresponding Text instance.
fst_ = fig.suptitle("Confusion matrix, ROC curve{0}, and PRC{0} for {1}{2}"
"".format("" if nclasses == 2 else "s", best_,
model_name), fontsize = "large")
# note that since tight_layout() does not respect the figure title, we can
# cheat by forcing our subplot titles | |
<reponame>CDAT/libcdms
#! /usr/local/bin/python
# -*- Mode: Python; tab-width: 4 -*-
#
# Author: <NAME> <<EMAIL>>
# Copyright 1996, 1997 by <NAME>
# All Rights Reserved.
#
# This software is provided free for non-commercial use. If you are
# interested in using this software in a commercial context, or in
# purchasing support, please contact the author.
RCS_ID = '$Id$'
# python modules
import os
import regex
import socket
import stat
import string
import sys
import time
# async modules
import asyncore
import asynchat
# medusa modules
import http_date
import producers
import status_handler
import logger
VERSION_STRING = string.split(RCS_ID)[2]
from counter import counter
# ===========================================================================
# Request Object
# ===========================================================================
class http_request:
# default reply code
reply_code = 200
request_counter = counter()
# Whether to automatically use chunked encoding when
#
# HTTP version is 1.1
# Content-Length is not set
# Chunked encoding is not already in effect
#
# If your clients are having trouble, you might want to disable this.
use_chunked = 1
# by default, this request object ignores user data.
collector = None
def __init__ (self, *args):
# unpack information about the request
(self.channel, self.request,
self.command, self.uri, self.version,
self.header) = args
self.outgoing = fifo()
self.reply_headers = {
'Server' : 'Medusa/%s' % VERSION_STRING,
'Date' : http_date.build_http_date (time.time())
}
self.request_number = http_request.request_counter.increment()
self._split_uri = None
self._header_cache = {}
# --------------------------------------------------
# reply header management
# --------------------------------------------------
def __setitem__ (self, key, value):
self.reply_headers[key] = value
def __getitem__ (self, key):
return self.reply_headers[key]
def has_key (self, key):
return self.reply_headers.has_key (key)
def build_reply_header (self):
return string.join (
[self.response(self.reply_code)] + map (
lambda x: '%s: %s' % x,
self.reply_headers.items()
),
'\r\n'
) + '\r\n\r\n'
# --------------------------------------------------
# split a uri
# --------------------------------------------------
# <path>;<params>?<query>#<fragment>
path_regex = regex.compile (
# path params query fragment
'\\([^;?#]*\\)\\(;[^?#]*\\)?\\(\\?[^#]*\)?\(#.*\)?'
)
def split_uri (self):
if self._split_uri is None:
if self.path_regex.match (self.uri) != len(self.uri):
raise ValueError, "Broken URI"
else:
self._split_uri = map (lambda i,r=self.path_regex: r.group(i), range(1,5))
return self._split_uri
def get_header_with_regex (self, head_reg, group):
for line in self.header:
if head_reg.match (line) == len(line):
return head_reg.group(group)
return ''
def get_header (self, header):
header = string.lower (header)
hc = self._header_cache
if not hc.has_key (header):
h = header + ': '
hl = len(h)
for line in self.header:
if string.lower (line[:hl]) == h:
r = line[hl:]
hc[header] = r
return r
hc[header] = None
return None
else:
return hc[header]
# --------------------------------------------------
# user data
# --------------------------------------------------
def collect_incoming_data (self, data):
if self.collector:
self.collector.collect_incoming_data (data)
else:
self.log_info(
'Dropping %d bytes of incoming request data' % len(data),
'warning'
)
def found_terminator (self):
if self.collector:
self.collector.found_terminator()
else:
self.log_info (
'Unexpected end-of-record for incoming request',
'warning'
)
def push (self, thing):
if type(thing) == type(''):
self.outgoing.push (producers.simple_producer (thing))
else:
self.outgoing.push (thing)
def response (self, code=200):
message = self.responses[code]
self.reply_code = code
return 'HTTP/%s %d %s' % (self.version, code, message)
def error (self, code):
self.reply_code = code
message = self.responses[code]
s = self.DEFAULT_ERROR_MESSAGE % {
'code': code,
'message': message,
}
self['Content-Length'] = len(s)
self['Content-Type'] = 'text/html'
# make an error reply
self.push (s)
self.done()
# can also be used for empty replies
reply_now = error
def done (self):
"finalize this transaction - send output to the http channel"
# ----------------------------------------
# persistent connection management
# ----------------------------------------
# --- BUCKLE UP! ----
connection = string.lower (get_header (CONNECTION, self.header))
close_it = 0
wrap_in_chunking = 0
if self.version == '1.0':
if connection == 'keep-alive':
if not self.has_key ('Content-Length'):
close_it = 1
else:
self['Connection'] = 'Keep-Alive'
else:
close_it = 1
elif self.version == '1.1':
if connection == 'close':
close_it = 1
elif not self.has_key ('Content-Length'):
if self.has_key ('Transfer-Encoding'):
if not self['Transfer-Encoding'] == 'chunked':
close_it = 1
elif self.use_chunked:
self['Transfer-Encoding'] = 'chunked'
wrap_in_chunking = 1
else:
close_it = 1
elif self.version is None:
# Although we don't *really* support http/0.9 (because we'd have to
# use \r\n as a terminator, and it would just yuck up a lot of stuff)
# it's very common for developers to not want to type a version number
# when using telnet to debug a server.
close_it = 1
outgoing_header = producers.simple_producer (self.build_reply_header())
if close_it:
self['Connection'] = 'close'
if wrap_in_chunking:
outgoing_producer = producers.chunked_producer (
producers.composite_producer (self.outgoing)
)
# prepend the header
outgoing_producer = producers.composite_producer (
fifo([outgoing_header, outgoing_producer])
)
else:
# prepend the header
self.outgoing.push_front (outgoing_header)
outgoing_producer = producers.composite_producer (self.outgoing)
# apply a few final transformations to the output
self.channel.push_with_producer (
# globbing gives us large packets
producers.globbing_producer (
# hooking lets us log the number of bytes sent
producers.hooked_producer (
outgoing_producer,
self.log
)
)
)
self.channel.current_request = None
if close_it:
self.channel.close_when_done()
def log_date_string (self, when):
return time.strftime (
'%d/%b/%Y:%H:%M:%S ',
time.gmtime(when)
) + tz_for_log
def log (self, bytes):
self.channel.server.logger.log (
self.channel.addr[0],
'%d - - [%s] "%s" %d %d\n' % (
self.channel.addr[1],
self.log_date_string (time.time()),
self.request,
self.reply_code,
bytes
)
)
responses = {
100: "Continue",
101: "Switching Protocols",
200: "OK",
201: "Created",
202: "Accepted",
203: "Non-Authoritative Information",
204: "No Content",
205: "Reset Content",
206: "Partial Content",
300: "Multiple Choices",
301: "Moved Permanently",
302: "Moved Temporarily",
303: "See Other",
304: "Not Modified",
305: "Use Proxy",
400: "Bad Request",
401: "Unauthorized",
402: "Payment Required",
403: "Forbidden",
404: "Not Found",
405: "Method Not Allowed",
406: "Not Acceptable",
407: "Proxy Authentication Required",
408: "Request Time-out",
409: "Conflict",
410: "Gone",
411: "Length Required",
412: "Precondition Failed",
413: "Request Entity Too Large",
414: "Request-URI Too Large",
415: "Unsupported Media Type",
500: "Internal Server Error",
501: "Not Implemented",
502: "Bad Gateway",
503: "Service Unavailable",
504: "Gateway Time-out",
505: "HTTP Version not supported"
}
# Default error message
DEFAULT_ERROR_MESSAGE = string.join (
['<head>',
'<title>Error response</title>',
'</head>',
'<body>',
'<h1>Error response</h1>',
'<p>Error code %(code)d.',
'<p>Message: %(message)s.',
'</body>',
''
],
'\r\n'
)
# ===========================================================================
# HTTP Channel Object
# ===========================================================================
class http_channel (asynchat.async_chat):
# use a larger default output buffer
ac_out_buffer_size = 1<<16
current_request = None
channel_counter = counter()
def __init__ (self, server, conn, addr):
self.channel_number = http_channel.channel_counter.increment()
self.request_counter = counter()
asynchat.async_chat.__init__ (self, conn)
self.server = server
self.addr = addr
self.set_terminator ('\r\n\r\n')
self.in_buffer = ''
self.creation_time = int (time.time())
self.check_maintenance()
def __repr__ (self):
ar = asynchat.async_chat.__repr__(self)[1:-1]
return '<%s channel#: %s requests:%s>' % (
ar,
self.channel_number,
self.request_counter
)
# Channel Counter, Maintenance Interval...
maintenance_interval = 500
def check_maintenance (self):
if not self.channel_number % self.maintenance_interval:
self.maintenance()
def maintenance (self):
self.kill_zombies()
# 30-minute zombie timeout. status_handler also knows how to kill zombies.
zombie_timeout = 30 * 60
def kill_zombies (self):
now = int (time.time())
for channel in asyncore.socket_map.values():
if channel.__class__ == self.__class__:
if (now - channel.creation_time) > channel.zombie_timeout:
channel.close()
# --------------------------------------------------
# send/recv overrides, good place for instrumentation.
# --------------------------------------------------
# this information needs to get into the request object,
# so that it may log correctly.
def send (self, data):
result = asynchat.async_chat.send (self, data)
self.server.bytes_out.increment (len(data))
return result
def recv (self, buffer_size):
try:
result = asynchat.async_chat.recv (self, buffer_size)
self.server.bytes_in.increment (len(result))
return result
except MemoryError:
# --- Save a Trip to Your Service Provider ---
# It's possible for a process to eat up all the memory of
# the machine, and put it in an extremely wedged state,
# where medusa keeps running and can't be shut down. This
# is where MemoryError tends to get thrown, though of
# course it could get thrown elsewhere.
sys.exit ("Out of Memory!")
def handle_error (self):
t, v = sys.exc_info()[:2]
if t is SystemExit:
raise t, v
else:
asynchat.async_chat.handle_error (self)
def log (self, *args):
pass
# --------------------------------------------------
# async_chat methods
# --------------------------------------------------
def collect_incoming_data (self, data):
if self.current_request:
# we are receiving data (probably POST data) for a request
self.current_request.collect_incoming_data (data)
else:
# we are receiving header (request) data
self.in_buffer = self.in_buffer + data
def found_terminator (self):
if self.current_request:
self.current_request.found_terminator()
else:
header = self.in_buffer
self.in_buffer = ''
lines = string.split (header, '\r\n')
# --------------------------------------------------
# crack the request header
# --------------------------------------------------
while lines and not lines[0]:
# as per the suggestion of http-1.1 section 4.1, (and
# <NAME> <<EMAIL>>), ignore a leading
# blank lines (buggy browsers tack it onto the end of
# POST requests)
lines = lines[1:]
if not lines:
self.close_when_done()
return
request = lines[0]
command, uri, version = crack_request (request)
header = join_headers (lines[1:])
r = http_request (self, request, command, uri, version, header)
self.request_counter.increment()
self.server.total_requests.increment()
if command is None:
self.log_info ('Bad HTTP request: %s' % repr(request), 'error')
r.error (400)
return
# --------------------------------------------------
# handler selection and dispatch
# --------------------------------------------------
for h in self.server.handlers:
if h.match (r):
try:
self.current_request = r
# This isn't used anywhere.
# r.handler = h # CYCLE
h.handle_request (r)
except:
self.server.exceptions.increment()
(file, fun, line), t, v, tbinfo = asyncore.compact_traceback()
self.log_info(
'Server Error: %s, %s: file: %s line: %s' % (t,v,file,line),
'error')
try:
r.error (500)
except:
pass
return
# no handlers, so complain
r.error (404)
def writable (self):
# this is just the normal async_chat 'writable', here for comparison
return self.ac_out_buffer or len(self.producer_fifo)
def writable_for_proxy (self):
# this version of writable supports the idea of a 'stalled' producer
# [i.e., it's not ready to produce any output yet] This is needed by
# the proxy, which will be waiting for the magic combination of
# 1) hostname resolved
# 2) connection made
# 3) data available.
if self.ac_out_buffer:
return 1
elif len(self.producer_fifo):
p = self.producer_fifo.first()
if hasattr (p, 'stalled'):
return not p.stalled()
else:
return 1
# ===========================================================================
# HTTP Server Object
# ===========================================================================
class http_server (asyncore.dispatcher):
SERVER_IDENT = 'HTTP Server (V%s)' % VERSION_STRING
channel_class = http_channel
def __init__ (self, ip, port, resolver=None, logger_object=None):
self.ip = ip
self.port = port
asyncore.dispatcher.__init__ (self)
self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
self.handlers = []
if not logger_object:
logger_object = logger.file_logger (sys.stdout)
self.set_reuse_addr()
self.bind ((ip, port))
# lower this to 5 if your OS complains
self.listen (1024)
host, port = self.socket.getsockname()
if not ip:
self.log_info('Computing default hostname', 'warning')
ip = socket.gethostbyname (socket.gethostname())
try:
self.server_name = socket.gethostbyaddr (ip)[0]
except socket.error:
self.log_info('Cannot do reverse lookup', 'warning')
self.server_name = ip # use the IP address as the "hostname"
self.server_port = port
self.total_clients = counter()
self.total_requests = counter()
self.exceptions = counter()
self.bytes_out = counter()
self.bytes_in = counter()
if not logger_object:
logger_object = logger.file_logger (sys.stdout)
if resolver:
self.logger = logger.resolving_logger (resolver, logger_object)
else:
self.logger = logger.unresolving_logger (logger_object)
self.log_info (
'Medusa (V%s) started at %s'
'\n\tHostname: %s'
'\n\tPort:%d'
'\n' % (
VERSION_STRING,
time.ctime(time.time()),
self.server_name,
port,
)
)
def writable (self):
return 0
def handle_read (self):
pass
def readable (self):
return self.accepting
def handle_connect (self):
pass
def handle_accept (self):
self.total_clients.increment()
try:
conn, addr = self.accept()
except socket.error:
# linux: on rare occasions we get a bogus socket back from
# accept. socketmodule.c:makesockaddr complains that the
# address family is unknown. We don't want the whole server
# to shut down because of this.
self.log_info ('warning: server accept() threw an exception', 'warning')
return
except TypeError:
# unpack non-sequence. this can happen when a read event
# fires on a listening socket, but when we call accept()
# we get EWOULDBLOCK, so dispatcher.accept() returns None.
# Seen on FreeBSD3.
self.log_info ('warning: server accept() threw EWOULDBLOCK', 'warning')
return
self.channel_class (self, conn, addr)
def install_handler | |
len(masks) != len(trace_indexes):
raise ValueError("masks and trace_indexes do not have the same size.")
for i,v in zip(trace_indexes, masks):
self.trace_masks[i] = v
else:
self.trace_masks[trace_indexes] = masks
# --------- #
# Shift #
# --------- #
def get_yshifted_traceweight_mask(self, yshift, subpixelization=5):
""" """
return self.get_shifted_tracematch(0, yshift, build_tracemask=False).get_traceweight_mask(subpixelization)
def get_shifted_tracematch(self, xshift, yshift, build_tracemask=False):
""" """
tmap = TraceMatch()
tmap.set_trace_line(self._xys + np.asarray([[xshift,yshift],[xshift,yshift]]),
trace_indexes= self.trace_indexes,
width=self.width, build_tracemask=build_tracemask)
return tmap
# --------- #
# GETTER #
# --------- #
def get_sub_tracematch(self, traces, build_tracemask=False):
""" """
tmap_ = TraceMatch()
flagin = np.in1d(self.trace_indexes, traces)
tmap_.set_trace_line(self._xys[flagin],
trace_indexes = self.trace_indexes[flagin],
width=self.width, build_tracemask=build_tracemask)
return tmap_
# Trace crossing
def get_traces_crossing_x(self, xpixel, ymin=-1, ymax=1e5):
""" traceindexes of the traces crossing the 'xpixel' vertical line
Returns
-------
list of indexes
"""
return self.get_traces_crossing_line([xpixel,ymin],[xpixel,ymax])
def get_traces_crossing_x_ybounds(self, xpixel, ymin=-1, ymax=1e5):
""" traceindexes of the traces crossing the 'xpixel' vertical line
Returns
-------
list of indexes
"""
line = geometry.LineString([[xpixel,ymin],[xpixel,ymax]])
mpoly = geometry.MultiPolygon([self.trace_polygons[i_]
for i_ in self.get_traces_crossing_x(xpixel, ymin=ymin, ymax=ymax) ])
return np.asarray([m.intersection(line).xy[1]for m in mpoly])
def get_traces_crossing_y(self, ypixel, xmin=-1, xmax=1e5):
""" traceindexes of the traces crossing the 'ypixel' horizonthal line
Returns
-------
list of indexes
"""
return self.get_traces_crossing_line([xmin,ypixel],[xmax,ypixel])
def get_traces_crossing_line(self, pointa, pointb):
""" traceindexes of traces crossing the vertival line formed by the [a,b] vector
Parameters
----------
pointa, pointb: [xcoord, ycoord]
coordinates of the 2 points defining the line
Returns
-------
list of indexes
"""
line = geometry.LineString([pointa,pointb])
return [idx for idx in self.trace_indexes if self.trace_polygons[idx].crosses(line)]
# Boundaries
def get_trace_xbounds(self, traceindex):
""" get the extremal x-ccd coordinates covered by the trace """
return np.asarray(np.round(np.percentile(np.asarray(self.trace_vertices[traceindex]).T[0], [0,100])), dtype="int")
def get_trace_ybounds(self, traceindex):
""" get the extremal y-ccd coordinates covered by the trace """
return np.asarray(np.round(np.percentile(np.asarray(self.trace_vertices[traceindex]).T[1], [0,100])), dtype="int")
def get_trace_vertices(self, traceindex):
""" traceindex -> vertices
returns the vertices of the given traceindex
Returns
-------
array (vertices)
"""
return self.trace_vertices[traceindex]
def get_finetuned_trace_vertices(self, traceindex, x, y, width,
polydegree=2, **kwargs):
""" The builds a fine tuned trace of the given traceindex.
The tuning uses x, y position and build a polygon around.
=> You must have run match_tracematch_and_sep()
Parameters
----------
traceindex: [int]
Index of the trace you want to fine tune
x, y: [array, array]
Position around which the polygon will be built.
polydegree: [positive-int] -optional-
Degree of the polynome that will be used to define the trace.
(See 'width' for details on the width of the trace polygon)
=> If polydegree is higher than the number of sep object detected
belonging to this trace, polydegree will the reduced to that number
=> If polydegree ends up being lower of equal to 0, None is returned
width: [float / None] -optional-
Width of the polygon (in pixels)
**kwargs goes to spectralmatching.get_boxing_polygone()
"""
xbounds = self.get_trace_xbounds(traceindex)
prop = { **dict(dy=1), **kwargs }
return np.asarray(get_boxing_polygone(x, y, rangex=np.linspace(xbounds[0], xbounds[1], polydegree+5),
width= width, polydegree=polydegree, get_vertices=True, **prop))
def get_finetuned_trace(self, traceindex, x, y, polydegree=2, **kwargs):
""" Builds the best guess fine tuning of the trace given the x, y position
Parameters
----------
traceindex: [int]
Index of the trace you want to fine tune
x, y: [array, array]
Position around which the polygon will be built.
polydegree: [positive-int] -optional-
Degree of the polynome that will be used to define the trace.
(See 'width' for details on the width of the trace polygon)
=> If polydegree is higher than the number of sep object detected
belonging to this trace, polydegree will the reduced to that number
=> If polydegree ends up being lower of equal to 0, None is returned
**kwargs goes to spectralmatching.get_boxing_polygone() [cannot be width, fixed to 0]
Returns
-------
x, y
"""
xbounds = self.get_trace_xbounds(traceindex)
prop = {**dict(dy=1), **kwargs}
return np.asarray(get_boxing_polygone(x, y, rangex=np.arange(*xbounds),
width= 0, polydegree=polydegree, get_vertices=True, **prop))
# Masking #
def get_trace_mask(self, traceindex, update=True, rebuild=False, updateonly=False):
""" traceindex -> color
get a weight mask for the given trace.
Parameters
----------
Returns
-------
Weighting mask (or None if both update and updateonly)
"""
# - Do you want the one that already exists (if any)?
if traceindex in self.trace_masks and not rebuild:
return self.trace_masks[traceindex].toarray()
# - Let's build the mask
if _HAS_SHAPELY:
mask = self._get_shapely_trace_mask_(traceindex)
else:
mask = self._get_color_trace_mask_(traceindex)
# - Shall we save it?
if update:
self.set_trace_masks(sparse.csr_matrix(mask), traceindex)
if updateonly:
del mask
return
return mask
def _get_color_trace_mask_(self, traceindex):
""" Use the tracebuild colors trick
= Time depends on the subpixelization, 5 takes about 1s =
"""
r, g, b, a = self._tracecolor[traceindex]
mask = ((self._rmap==r)*(self._gmap==g)*(self._bmap==b)).reshape(*self._mapshape)
final_mask = mask if self.subpixelization == 1 else \
measure.block_reduce(mask, (self.subpixelization, self.subpixelization) )/float(self.subpixelization**2)
def _get_shapely_trace_mask_(self, traceindex):
""" Based on Shapely, measure the intersection area between traces and pixels.
= Takes about 1s =
"""
return verts_to_mask(self.trace_vertices[traceindex])
def _load_trace_mask_(self, traceindexe ):
""" """
_ = self.get_trace_mask(traceindexe, updateonly=True)
def get_traceweight_mask(self, subpixelization=5):
""" """
if self._maskimage is None or subpixelization != self.subpixelization:
self.build_tracemask(subpixelization)
mask = (self._rmap > 0 ).reshape(*self._mapshape)
return mask if self.subpixelization == 1 else \
measure.block_reduce(mask, (self.subpixelization, self.subpixelization) )/float(self.subpixelization**2)
def get_notrace_mask(self, subpixelization=5, asbool=True):
""" a 2D boolean mask that is True for places in the CCD without trace. """
return ~np.asarray(self.get_traceweight_mask(subpixelization), dtype="bool")
# Trace Location #
def get_trace_source(self, x, y, a=1, b=1, theta=0):
""" ccdpixels -> traceindex
The method get the RGBA values within an ellipe centered in `x` and `y`
with a major and minor axes length `a` and `b` and angle `theta`.
The index of the traces that maximize the color overlap is then returned.
Parameters
----------
x, y: [float, float]
x and y central position of the ellipses
a, b: [float, float] -optional-
major and minor axis lengths
theta: [float] -optional-
rotation angle (in rad) of the ellipse.
Returns
-------
traceindex
"""
try:
from sep import mask_ellipse
except:
raise ImportError("You need sep (Python verion of Sextractor) to run this method => sudo pip install sep")
masking = np.zeros(self._mapshape, dtype="bool")
mask_ellipse(masking, x*self.subpixelization,
y*self.subpixelization, a*self.subpixelization,
b*self.subpixelization, theta=theta)
if not np.any(masking):
return np.asarray([])
sum_mask = np.sum( np.sum([(self._facecolors.flatten()==v)
for v in np.concatenate(self._maskimage.T.T[masking])],
axis=0).reshape(self._facecolors.shape), axis=1)
return np.asarray(self.trace_indexes)[sum_mask==sum_mask.max()] if sum_mask.max()>0 else np.asarray([])
def get_traces_within_polygon(self, polyverts):
""" Which traces are fully contained within the given polygon (defined by the input vertices)
Parameters
----------
polyverts: [2D-array]
Coordinates of the points defining the polygon.
Returns
-------
list of trace indexes
"""
if _HAS_SHAPELY:
globalpoly = geometry.Polygon(polyverts)
return [idx_ for idx_ in self.trace_indexes if globalpoly.contains(geometry.Polygon(self.trace_vertices[idx_]))]
else:
from matplotlib import patches
globalpoly = patches.Polygon(polyverts)
return [idx_ for idx_ in self.trace_indexes if
np.all([globalpoly.contains_point(vtr) for vtr in self.trace_vertices[idx_]])]
# --------- #
# PLOTTER #
# --------- #
def display_traces(self, ax, traceindex, facecolors="None", edgecolors="k",
autoscale=True,**kwargs):
""" """
from matplotlib import patches
# Several indexes given
if tools.is_arraylike(traceindex):
if not tools.is_arraylike(facecolors):
facecolors = [facecolors]*len(traceindex)
if not tools.is_arraylike(edgecolors):
edgecolors = [edgecolors]*len(traceindex)
ps = [patches.Polygon(self.trace_vertices[idx_],
facecolor=facecolors[i], edgecolor=edgecolors[i], **kwargs)
for i,idx_ in enumerate(traceindex)]
ip = [ax.add_patch(p_) for p_ in ps]
# One index given
else:
p_ = patches.Polygon(self.trace_vertices[traceindex],
facecolor=facecolors, edgecolor=edgecolors, **kwargs)
ip = [ax.add_patch(p_)]
if autoscale:
ax.autoscale(True, tight=True)
return ip
# ------------ #
# Methods #
# ------------ #
def build_tracemask(self, subpixelization=5, width=2048, height=2048):
"""
This will build the internal tools to identify the connections between
traceindex and ccd-pixels
Returns
-------
Void
"""
self._properties["subpixelization"] = subpixelization
# - 1 Trace, 1 Color !
# List because of python3
nonunique_RGBA = np.asarray( list(zip(np.random.randint(5,90, size=self.ntraces*3),
np.random.randint(91,175, size=self.ntraces*3),
np.random.randint(176,254, size=self.ntraces*3),
[255]*self.ntraces*3)) )
b = np.ascontiguousarray(nonunique_RGBA).view(np.dtype((np.void,nonunique_RGBA.dtype.itemsize * nonunique_RGBA.shape[1])))
# This is made for faster identification later on
self._derived_properties['facecolor'] = nonunique_RGBA[np.unique(b, return_index=True)[1]][:self.ntraces]
self._derived_properties['tracecolor'] = {i:c for i,c in zip(self.trace_indexes,self._facecolors)}
verts = [(self.trace_vertices[i]+np.asarray([0.5,0.5]))*self.subpixelization for i in self.trace_indexes]
self._derived_properties['maskimage'] = \
np.asarray(polygon_mask(verts, width*self.subpixelization, height*self.subpixelization,
facecolor=self._facecolors, edgecolor=self._facecolors, get_fullcolor=True))
# - Save detailed information for matching later on
r, g, b, a = self._maskimage.T
self._derived_properties['mapshape'] = (width*self.subpixelization, height*self.subpixelization)
self._derived_properties['rmap'] = r.ravel(order='F')
self._derived_properties['gmap'] = g.ravel(order='F')
self._derived_properties['bmap'] = b.ravel(order='F')
| |
from abc import ABCMeta, abstractmethod
import matplotlib.pyplot as plt
import numpy as np
import scipy.linalg as la
from scipy.optimize import minimize
from scipy.special import factorial, factorial2
from ssmtoybox.mtran import SphericalRadialTransform, UnscentedTransform, GaussHermiteTransform, \
FullySymmetricStudentTransform
from ssmtoybox.utils import vandermonde, n_sum_k
from .bqkern import RBFGauss, RQ, RBFStudent
class Model(object, metaclass=ABCMeta):
"""
Base class for all models of the integrated function in the BQ quadrature context. It is intended to be used
by the subclasses of the `BQTransform` (i.e. Gaussian process and t-process quadrature moment transforms). The
Model class ties together the kernel and the point-set used by the underlying quadrature rule. In modelling
terms, the Model is composed of a kernel and point-set, that is, `Model` *has-a* `Kernel` and `points`.
Parameters
----------
dim : int
Dimension of the points (integration domain).
kern_par : ndarray
Kernel parameters in a vector.
kern_str : str
String abbreviation for the kernel.
point_str : str
String abbreviation for the point-set.
point_par : dict
Any parameters for constructing desired point-set.
estimate_par : bool
Set to `True` if you wanna re-compute kernel expectations based on changing parameters, like in
`MarginalInference`.
Attributes
----------
Model._supported_points_ : list
Each element of the list is an acronym of a point-set.
Model._supported_kernels_ : list
Each element of the list is an acronym of a kernel.
kernel : Kernel
Kernel used by the Model.
points : ndarray
Quadrature rule point-set.
str_pts : str
str_pts_par : str
String representation of the kernel parameter values.
emv : float
Expected model variance.
ivar : float
Variance of the integral.
dim_in : int
Dimension of the point-set.
num_pts : int
Number of points.
eye_d : ndarray
eye_n : ndarray
Pre-allocated identity matrices to ease the computations.
Notes
-----
The model of the integrand relies on a Kernel class, that is, it is either a GP or TP regression model.
"""
_supported_points_ = ['sr', 'ut', 'gh', 'fs']
_supported_kernels_ = ['rbf', 'rq', 'rbf-student']
def __init__(self, dim, kern_par, kern_str, point_str, point_par, estimate_par):
# init kernel and sigma-points
self.kernel = Model.get_kernel(dim, kern_str, kern_par)
self.points = Model.get_points(dim, point_str, point_par)
# turn on/off re-computation of kernel expectations
self.estimate_par = estimate_par
# save for printing
self.str_pts = point_str
self.str_pts_par = str(point_par)
# may no longer be necessary now that jitter is in kernel
self.dim_in, self.num_pts = self.points.shape
self.eye_d, self.eye_n = np.eye(self.dim_in), np.eye(self.num_pts)
# init variables for passing kernel expectations and kernel matrix inverse
self.q, self.Q, self.R, self.iK = None, None, None, None
# expected model variance and integral model variance
self.model_var = None
self.integral_var = None
def __str__(self):
"""
Prettier string representation.
Returns
-------
: str
String representation including short name of the point-set, the kernel and its parameter values.
"""
return '{}\n{} {}'.format(self.kernel, self.str_pts, self.str_pts_par)
@abstractmethod
def predict(self, test_data, fcn_obs, par=None):
"""
Model predictions based on test points and the kernel parameters.
Parameters
----------
test_data : ndarray
Test points where to generate data.
fcn_obs : ndarray
Observed function values at the point-set locations.
par : ndarray
Kernel parameters, default `par=None`.
Returns
-------
mean : ndarray
Model predictive mean at the test point locations.
var : ndarray
Model predictive variance at the test point locations.
Notes
-----
This is an abstract method. Implementation needs to be provided by the subclass.
"""
pass
@abstractmethod
def bq_weights(self, par, *args):
"""
Weights of the Bayesian quadrature.
Parameters
----------
par : ndarray
Kernel parameters.
args : tuple
Returns
-------
wm : ndarray
Weights for computation of the transformed mean.
Wc : ndarray
Weights for computation of the transformed covariance.
Wcc : ndarray
Weights for computation of the transformed cross-covariance.
emvar : ndarray
Expected model variance.
ivar : ndarray
Integral variance.
"""
pass
@abstractmethod
def exp_model_variance(self, par, *args):
"""
Expected model variance :math:`\mathbb{E}_x[\mathbb{V}_f[f(x)]]`.
Parameters
----------
par : ndarray
Kernel parameters.
args : tuple
Returns
-------
: float
Expected model variance.
"""
pass
@abstractmethod
def integral_variance(self, par, *args):
"""
Integral variance :math:`\mathbb{V}_f[\mathbb{E}_x[f(x)]]`.
Parameters
----------
par : ndarray
Kernel parameters.
args : tuple
Returns
-------
: float
Variance of the integral.
"""
pass
@abstractmethod
def neg_log_marginal_likelihood(self, log_par, fcn_obs, x_obs, jitter):
"""
Negative logarithm of marginal likelihood of the model given the kernel parameters and the function
observations.
Parameters
----------
log_par : ndarray
Logarithm of the kernel parameters.
fcn_obs : ndarray
Observed function values at the inputs supplied in `x_obs`.
x_obs : ndarray
Function inputs.
jitter : ndarray
Regularization term for kernel matrix inversion.
Returns
-------
float
Negative log marginal likelihood.
Notes
-----
Intends to be used as an objective function passed into the optimizer, thus it needs to subscribe to certain
implementation conventions.
"""
pass
def optimize(self, log_par_0, fcn_obs, x_obs, method='BFGS', **kwargs):
"""Optimize kernel parameters.
Find optimal values of kernel parameters by minimizing chosen criterion given the point-set and the function
observations.
Parameters
----------
log_par_0 : ndarray
Initial guess of the kernel log-parameters.
fcn_obs : ndarray
Observed function values at the point-set locations.
x_obs : ndarray
Function inputs.
method : str
Optimization method for `scipy.optimize.minimize`, default method='BFGS'.
**kwargs
Keyword arguments for the `scipy.optimize.minimize`.
Returns
-------
: scipy.optimize.OptimizeResult
Results of the optimization in a dict-like structure returned by `scipy.optimize.minimize`.
See Also
--------
scipy.optimize.minimize
"""
obj_func = self.neg_log_marginal_likelihood
jac = True
jitter = 1e-8 * np.eye(x_obs.shape[1])
return minimize(obj_func, log_par_0, args=(fcn_obs, x_obs, jitter), method=method, jac=jac, **kwargs)
def plot_model(self, test_data, fcn_obs, par=None, fcn_true=None, in_dim=0):
"""
Plot of predictive mean and variance of the fitted model of the integrand. Since we're plotting a function with
multiple inputs and outputs, we need to specify which is to be plotted.
Parameters
----------
test_data : ndarray
1D array of locations, where the function is to be evaluated for plotting.
fcn_obs : ndarray
Observed function values at the point-set locations.
par : ndarray
Kernel parameters, default `par=None`.
fcn_true :
True function values.
in_dim : int
Index of the input dimension to plot.
Returns
-------
Notes
-----
Not tested very much, likely to misbehave.
"""
assert in_dim <= self.dim_in - 1
fcn_obs = np.squeeze(fcn_obs)
fcn_true = np.squeeze(fcn_true)
# model predictive mean and variance
mean, var = self.predict(test_data, fcn_obs, par=par)
std = np.sqrt(var)
test_data = np.squeeze(test_data[in_dim, :])
# set plot title according to model
fig_title = self.__class__.__name__ + ' model of the integrand'
# plot training data, predictive mean and variance
fig = plt.figure(fig_title)
plt.fill_between(test_data, mean - 2 * std, mean + 2 * std, color='0.1', alpha=0.15)
plt.plot(test_data, mean, color='k', lw=2)
plt.plot(self.points[in_dim, :], fcn_obs, 'ko', ms=8)
# true function values at test points if provided
if fcn_true is not None:
plt.plot(test_data, fcn_true, lw=2, ls='--', color='tomato')
plt.show()
@staticmethod
def get_points(dim, points, point_par):
"""
Construct desired point-set for integration. Calls methods of classical quadrature classes.
Parameters
----------
dim : int
points : string
String abbreviation for the point-set.
point_par : dict
Parameters for constructing desired point-set.
Returns
-------
: ndarray
Point set in (D, N) array, where D is dimension and N number of points.
Notes
-----
List of supported points is kept in ``_supported_points_`` class variable.
"""
points = points.lower()
# make sure points is supported
if points not in Model._supported_points_:
print('Points {} not supported. Supported points are {}.'.format(points, Model._supported_points_))
return None
if point_par is None:
point_par = {}
# create chosen points
if points == 'sr':
return SphericalRadialTransform.unit_sigma_points(dim)
elif points == 'ut':
return UnscentedTransform.unit_sigma_points(dim, **point_par)
elif points == 'gh':
return GaussHermiteTransform.unit_sigma_points(dim, **point_par)
elif points == 'fs':
return FullySymmetricStudentTransform.unit_sigma_points(dim, **point_par)
@staticmethod
def get_kernel(dim, kernel, par):
"""
Initializes desired kernel.
Parameters
----------
dim : int
Dimension of input (integration domain).
kernel : str
String abbreviation of the kernel.
par : ndarray
Parameters of the kernel.
Returns
-------
: Kernel
A subclass of Kernel.
Notes
-----
List of supported kernels is kept in ``_supported_kernels_`` class variable.
"""
kernel = kernel.lower()
# make sure kernel is supported
if kernel not in Model._supported_kernels_:
print('Kernel {} not supported. Supported kernels are {}.'.format(kernel, Model._supported_kernels_))
return None
# initialize the chosen kernel
if kernel == 'rbf':
return RBFGauss(dim, par)
elif kernel == 'rbf-student':
return RBFStudent(dim, par)
elif kernel == 'rq':
return | |
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=UserWarning) # to surpress future warnings
import pandas as pd
import sys
import textstat
import numpy as numpy
import math
import gensim
from string import ascii_lowercase
#import Use_NN as nn
import re
from sklearn.svm import LinearSVC , SVC
from sklearn import preprocessing
from mlxtend.plotting import plot_decision_regions, plot_confusion_matrix
from sklearn.neighbors import KNeighborsClassifier , RadiusNeighborsClassifier
from collections import Counter
import string
import spacy
import string
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
#test22
removeUnWanted = re.compile('[\W_]+') #strip off the damn characters
tagQues = ["isn't she","don't they","aren't we","wasn't it","didn't he","weren't we","haven't they","hasn't she","hadn't he","hadn't we","won't she","won't they","won't she","can't he","mustn't he","are we","does she","is it","was she","did they","were you","has she","has he","had we","had you","will they","will he","will she","will he","can she","must they"]
metaphor = []
def loadMetaphors(filename):
with open(filename) as f:
content = f.readlines()
content = [x.strip() for x in content]
return content
# Re-WRITE THIS FUNCTION
def checkMetaphors(text,list_=metaphor):
if any(word in text for word in list_):
return 1 #if we have found any of the words for it
else: # if we cannot find any
return 0 # if we have not found any of the words
# Pandas Method to read our CSV to make it easier
def read_csv(filepath):
#parseDate = ['review_date']
#dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d')
#colName = ['customer_id','product_category', 'review_id', 'star_rating','helpful_votes','total_votes','vine','verified_purchase','review_body','review_date']
#df_chunk = pd.read_csv(filepath, sep='\t', header=0, chunksize=500000, error_bad_lines=False,parse_dates=parseDate, dtype=column_dtypes, usecols=colName, date_parser=dateparse)
df_chunk = pd.read_csv(filepath, sep=',', header=0)
#df_chuck = df_chuck.fillna(0)
return df_chunk
def countOfWords(text):
text = str(text)
count = len(re.findall(r'\w+', text))
return count
def commentCleaner(df,ColumnName):
df[commentParent] = df[commentParent].str.lower()
# df[commentParent] = df[commentParent].str.replace("[^abcdefghijklmnopqrstuvwxyz1234567890' ]", "")
def get_good_tokens(sentence):
replaced_punctation = list(map(lambda token: re.sub('[^0-9A-Za-z!?]+', '', token), sentence))
removed_punctation = list(filter(lambda token: token, replaced_punctation))
return removed_punctation
# Converts to POS Tags that can be used for other stuff
# def tag(sent):
# words=nltk.word_tokenize(sent)
# tagged=nltk.pos_tag(words)
# return tagged
def tagQuestions(text,list_=tagQues):
text = str(text)
if any(word in text for word in list_):
return 1
else: # if we cannot find any
return 0
#Checks for Nouns , To Implement the method found in Cindy Chung's Physc Paper (Search for Cindy Chung and <NAME> and cite here)
# def checkForNouns(text,method='None'):
# counter = 0
# counter2 = 0
# if "aa" in text: #Dummy variable to inform that it is outside , so we dont' track them
# return counter
# else:
# wrb = tag(text)
# index = 0
# for row in wrb:
# POSTag = wrb[index][1]
# # print(POSTag)
# if (POSTag in "IN") or (POSTag in "PRP") or (POSTag in "DT") or (POSTag in "CC") or (POSTag in "VB") or (POSTag in "VB") or (POSTag in "PRP$") or (POSTag is "RB"):
# counter = counter+1
# else:
# counter2 = counter2+1
# index = index + 1
# if "function" in method:
# return counter
# elif "ratio" in method:
# return abs(counter2/counter)
# else:
# return counter2
#Calculates the amount of interjections
def getInterjections(blah):
# blah = blah.lower()
blah = str(blah)
doc = nlp(blah)
result = 0
for word in doc:
if word.pos_ is 'INTJ': #INTJ is interjection with spacy
result += 1
return result
#Count the Number of Hyperboles for us to do calculations
def getHyperboles(blah,dataFrameObject):
# blah = blah.lower()
doc = nlp(blah)
flag = False # a flag to check if there is the word is being found or not
result = 0 # number of strong subjective/sentiments
for word in doc:
if word.is_punct is False: #This is not a punctuation anyway , so we can take a look at what to do next
# print(word)
checkIndex = dataFrameObject.loc[dataFrameObject['Word']==word.text] # check the index
if checkIndex.empty:
result += 0 #ignore this as we did not find any positive hyperbole
flag = False
else:
flag = True
if len(checkIndex) == 0:
flag = False
if word is "'" :
flag = False
if flag is True:
if len(checkIndex) == 1: # if there is only one item for it
t = dataFrameObject[dataFrameObject.Word==word.text].Subjectivity.item()
# print("length is",(t))
# print("This gets executed !")
else:
t = dataFrameObject[dataFrameObject.Word==word.text].iloc[0] #
t = t['Subjectivity']
# print("length is",(t))
# print("The other one gets executed !")
if t == 'strongsubj':
result += 2 #strong sentiment
else:
result += 1 #weak sentiment or neutral
return result
# Our method to count the Punctuation for it
def getPunctuation(text):
text = str(text)
punctuation = []
for char in text:
if char in string.punctuation:
punctuation.append(char)
counter = Counter(punctuation)
if len(punctuation) == 0:
return 0 # if we only have none in it
if len(punctuation) == 1 or len(counter) == 1:
return 1 # if we only have 1
else :
return 2 #we have multiple elements inside , we just get the total number of them
# Made more Pythonic by implementing the suggestion from https://stackoverflow.com/questions/49078267/counting-upper-case-words-in-a-variable-in-python
def countTotalCaps(text):
text = str(text)
count = (sum(map(str.isupper,text.split())))
if count == 0:
return 0
elif count == 1:
return 1
else:
return 2
def removePunctuation(text):
return (text.translate((str.maketrans('', '', string.punctuation))))
# checks for the quotation marks if they are present in the system and would return the amount that is present
def detectQuotationMarks(text):
text = str(text)
startIndex = text.find('\"')
if startIndex == -1:
return 0 #if we did not get the quotation mark for it
else:
return 1 # if we have found the quitation mark
def checkForExclamation(text):
text = str(text)
#return 1 if there is 1 , and 2 if there are multiple uses of markers , and 0 if there is none
result = 0
for char in text:
if char == '!':
result +=1
if result == 0:
return 0 # if there is nothing at all
elif result == 1:
return 1 # if there is 1
else:
return 2 # if there is more than 1
return result
def cleanSpecialCharacters(text):
return (re.sub( '[^a-z0-9\']', ' ', text))
def scrub_words(text):
"""Basic cleaning of texts."""
"""Taken from https://github.com/kavgan/nlp-in-practice/blob/master/text-pre-processing/Text%20Preprocessing%20Examples.ipynb """
# remove html markup
text=re.sub("(<.*?>)","",text)
#remove non-ascii and digits
text=re.sub("(\\W|\\d)"," ",text)
# remove the extra spaces that we have so that it is easier for our split :) Taken from https://stackoverflow.com/questions/2077897/substitute-multiple-whitespace-with-single-whitespace-in-python
text=re.sub(' +', ' ', text).strip()
return text
def LemenSpacy(text,useNLPObj=False,isFirstTime=False):
# if isFirstTime and useNLPObj:
# nlp = spacy.load("en_core_web_sm")
# print("Load Spacy")
# nlp.tokenizer = Tokenizer(nlp.vocab) #lod our customized tokenizer overwritten method
# isFirstTime = False
text = text.lower()
doc = nlp(text)
tokens = []
for token in doc:
if token.is_punct is False:
if token.orth_ == 've': #special handling case
tokens.append("'ve")
elif token.orth_ == " ":
tokens.append(" ")
else:
if token.lemma_ == '-PRON-':
tokens.append(token.orth_)
else:
tokens.append(token.lemma_)
return (' '.join(tokens))
#return tokens
def sentiment_analyzer_scores(sentence):
sentence = str(sentence)
score = analyser.polarity_scores(sentence)
#print(score["compound"])
return score['compound']
#print("{:-<40} {}".format(sentence, str(score)))
#Main Method
if __name__ == '__main__':
nlp = spacy.load("en_core_web_sm") # load the NLP toolkit software
commentChild = 'Original' # name of the field for child
commentParent = 'Parent'
analyser = SentimentIntensityAnalyzer()
df = pd.read_csv('/data/pradeesh/data/test_alta_dataset.csv') # Read the Classifier Software
df2 = pd.read_csv('/data/pradeesh/data/MPQAHyperbole.csv') # add the path to the files , so it can be read properly
#df2.drop(df.filter(regex="Unname"),axis=1, inplace=True) #do some clean ups
# LEMENTIZE the parents comment
# df['lemen_parent'] = df['Parent'].apply(LemenSpacy)
df[commentChild] = df[commentChild].astype(str)
## FOR THE COMMENTS
df['exclamation_comment'] = df[commentChild].apply(checkForExclamation) #detect exclamation
df['tagQuestions_comment'] = df[commentParent].apply(tagQuestions) # detect tag questions
df['interjections_comment'] = df[commentChild].apply(getInterjections) # get any interjections if there are present
df['punch_comment'] = df[commentChild].apply(getPunctuation) # get the no of punctuations to be used as features
df['quotation_comment'] = df[commentChild].apply(detectQuotationMarks) # adding to detect qutation marks
df['totalCaps_comment'] = df[commentChild].apply(countTotalCaps) # adding support to count total number of CAPS
print("processing sentiment comment for child")
df['sentiment_comment'] = df[commentChild].apply(sentiment_analyzer_scores) #adding support to analyze the sentiment of the score
#clean now
df['noOfWords_comment'] = df[commentChild].apply(countOfWords) #count no of words
df[commentChild] = df[commentChild].apply(removePunctuation)
df[commentChild] = df[commentChild].astype(str)
df['hyperbole_comment'] = df[commentChild].apply(getHyperboles,dataFrameObject=df2) # get the no of punctuations to be used as features
## FOR THE PARENT COMMENTS
df['exclamation_parent'] = df[commentParent].apply(checkForExclamation) #detect exclamation
df['tagQuestions_parent'] = df[commentParent].apply(tagQuestions) # detect tag questions
df['interjections_parent'] = df[commentParent].apply(getInterjections) # get any interjections if there are present
df['punch_parent'] = df[commentParent].apply(getPunctuation) # get the no of punctuations to be used as features
df['quotation_parent'] = df[commentParent].apply(detectQuotationMarks) # adding to detect qutation marks
df['totalCaps_parent'] = df[commentParent].apply(countTotalCaps) # adding support to count total number of CAPS
df['noOfWords_parent'] = df[commentParent].apply(countOfWords) # adding support for the nof of parent comments
print("processing sentiment scores for | |
= self.target_feature_list[-1].end_timestamp
self.end_clip_number = self.target_feature_list[-1].end_clip_number
def __str__(self):
print_string = 'SHRP2 NDS Video Event\n\n'
if self.preceding_feature:
print_string += 'Preceding Feature:\n{}\n\n'.format(
self.preceding_feature)
print_string += 'Target Features:\n{}\n\n'.format(self.target_feature_list)
if self.following_feature:
print_string += 'Following Feature:\n{}\n\n'.format(
self.following_feature)
return print_string
class VehicleRightOfWayIncursionEvent:
def __init__(self, event_id, target_feature_list,
preceding_feature=None, following_feature=None):
"""Create a new 'Event' object.
Args:
event_id: int. The position of the event in the source video relative to
other events.
target_feature_list: Feature List. A list of features the event of
interest could contain.
preceding_feature: Feature. An auxiliary feature strictly different in
type from the target feature that should be included in the event if it
occurs just before the target feature in the source video.
following_feature: Feature. An auxiliary feature strictly different in
type from the target feature that should be included in the event if it
occurs just after the target feature in the source video.
"""
self.event_id = event_id
self.target_feature_list = target_feature_list
for target_feature in self.target_feature_list:
target_feature.event_id = self.event_id
self.start_clip_number = self.target_feature_list[0].start_clip_number
self.end_clip_number = self.target_feature_list[-1].end_clip_number
self.length = self.end_clip_number - self.start_clip_number
self._preceding_feature = preceding_feature
self._following_feature = following_feature
# self.contains_stopped_on_crossing_violation = None
self.contains_veh_adv_on_se_corr = None
self.contains_veh_adv_on_ne_corr = None
self.contains_veh_adv_on_sw_corr = None
self.contains_veh_adv_on_nw_corr = None
self.contains_veh_rec_on_se_corr = None
self.contains_veh_rec_on_ne_corr = None
self.contains_veh_rec_on_sw_corr = None
self.contains_veh_rec_on_nw_corr = None
self.contains_veh_std_on_se_corr = None
self.contains_veh_std_on_ne_corr = None
self.contains_veh_std_on_sw_corr = None
self.contains_veh_std_on_nw_corr = None
self.train_is_present = None
def find_violations(self, classifications):
# find vehicle violations
self.contains_veh_adv_on_se_corr = np.any(classifications[:, 30])
self.contains_veh_adv_on_ne_corr = np.any(classifications[:, 33])
self.contains_veh_adv_on_sw_corr = np.any(classifications[:, 36])
self.contains_veh_adv_on_nw_corr = np.any(classifications[:, 39])
self.contains_veh_rec_on_se_corr = np.any(classifications[:, 42])
self.contains_veh_rec_on_ne_corr = np.any(classifications[:, 45])
self.contains_veh_rec_on_sw_corr = np.any(classifications[:, 48])
self.contains_veh_rec_on_nw_corr = np.any(classifications[:, 51])
self.contains_veh_std_on_se_corr = np.any(classifications[:, 54])
self.contains_veh_std_on_ne_corr = np.any(classifications[:, 57])
self.contains_veh_std_on_sw_corr = np.any(classifications[:, 60])
self.contains_veh_std_on_nw_corr = np.any(classifications[:, 63])
self.train_is_present = np.any(
classifications[:,
[6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]])
@property
def preceding_feature(self):
return self._preceding_feature
@preceding_feature.setter
def preceding_feature(self, preceding_feature):
self._preceding_feature = preceding_feature
self.start_clip_number = self.preceding_feature.start_clip_number
@property
def following_feature(self):
return self._following_feature
@following_feature.setter
def following_feature(self, following_feature):
self._following_feature = following_feature
# if this event's following feature is being reassigned to a later event,
# the 'following_feature' argument will be None
if self.following_feature:
self.end_clip_number = self.following_feature.end_clip_number
else:
self.end_clip_number = self.target_feature_list[-1].end_clip_number
def __str__(self):
print_string = 'SHRP2 NDS Video Event\n\n'
if self.preceding_feature:
print_string += 'Preceding Feature:\n{}\n\n'.format(
self.preceding_feature)
print_string += 'Target Features:\n{}\n\n'.format(self.target_feature_list)
if self.following_feature:
print_string += 'Following Feature:\n{}\n\n'.format(
self.following_feature)
return print_string
class PedestrianRightOfWayIncursionEvent:
def __init__(self, event_id, target_feature_list,
preceding_feature=None, following_feature=None):
"""Create a new 'Event' object.
Args:
event_id: int. The position of the event in the source video relative to
other events.
target_feature_list: Feature List. A list of features the event of
interest could contain.
preceding_feature: Feature. An auxiliary feature strictly different in
type from the target feature that should be included in the event if it
occurs just before the target feature in the source video.
following_feature: Feature. An auxiliary feature strictly different in
type from the target feature that should be included in the event if it
occurs just after the target feature in the source video.
"""
self.event_id = event_id
self.target_feature_list = target_feature_list
for target_feature in self.target_feature_list:
target_feature.event_id = self.event_id
self.start_clip_number = self.target_feature_list[0].start_clip_number
self.end_clip_number = self.target_feature_list[-1].end_clip_number
self.length = self.end_clip_number - self.start_clip_number
self._preceding_feature = preceding_feature
self._following_feature = following_feature
# self.contains_stopped_on_crossing_violation = None
self.contains_ped_on_sth_corr = None
self.contains_ped_on_nth_corr = None
self.train_is_present = None
def find_violations(self, classifications):
# find vehicle violations
self.contains_ped_on_sth_corr = np.any(classifications[:, 66])
self.contains_ped_on_sth_corr = np.any(classifications[:, 67])
self.train_is_present = np.any(
classifications[:,
[6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]])
@property
def preceding_feature(self):
return self._preceding_feature
@preceding_feature.setter
def preceding_feature(self, preceding_feature):
self._preceding_feature = preceding_feature
self.start_clip_number = self.preceding_feature.start_clip_number
@property
def following_feature(self):
return self._following_feature
@following_feature.setter
def following_feature(self, following_feature):
self._following_feature = following_feature
# if this event's following feature is being reassigned to a later event,
# the 'following_feature' argument will be None
if self.following_feature:
self.end_clip_number = self.following_feature.end_clip_number
else:
self.end_clip_number = self.target_feature_list[-1].end_clip_number
def __str__(self):
print_string = 'SHRP2 NDS Video Event\n\n'
if self.preceding_feature:
print_string += 'Preceding Feature:\n{}\n\n'.format(
self.preceding_feature)
print_string += 'Target Features:\n{}\n\n'.format(self.target_feature_list)
if self.following_feature:
print_string += 'Following Feature:\n{}\n\n'.format(
self.following_feature)
return print_string
class Trip:
def __init__(self, report_clip_numbers,
report_probs, class_name_map, non_event_weight_scale=0.05,
minimum_event_length=1):
self.class_names = class_name_map
self.class_ids = {value: key for key, value in self.class_names.items()}
report_class_ids = np.apply_along_axis(
func1d=self.feature_fn, axis=1, arr=report_probs)
self.feature_sequence = []
feature_id = 0
class_id = report_class_ids[0]
start_clip_number = report_clip_numbers[0]
for i in range(1, len(report_class_ids)):
if report_class_ids[i] != class_id:
end_clip_number = report_clip_numbers[i - 1]
# the beginning of the next feature has been reached.
# create an object for the preceding feature.
self.feature_sequence.append(Feature(
feature_id, class_id, self.class_names[class_id],
start_clip_number, end_clip_number))
feature_id += 1
class_id = report_class_ids[i]
start_clip_number = report_clip_numbers[i]
if i == len(report_class_ids) - 1:
self.feature_sequence.append(Feature(
feature_id, class_id, self.class_names[class_id],
start_clip_number, start_clip_number))
elif i == len(report_class_ids) - 1:
end_clip_number = report_clip_numbers[i]
self.feature_sequence.append(Feature(
feature_id, class_id, self.class_names[class_id],
start_clip_number, end_clip_number))
self.weight_scale = non_event_weight_scale
self.minimum_event_length = minimum_event_length
def feature_fn(self, clip_probs):
gates_ascending = self.class_ids['gates_ascending']
gates_descending = self.class_ids['gates_descending']
if clip_probs[gates_ascending] >= .25 \
and clip_probs[gates_ascending] > clip_probs[gates_descending]:
return gates_ascending
if clip_probs[gates_descending] >= .25 \
and clip_probs[gates_descending] > clip_probs[gates_ascending]:
return gates_descending
gates_down = self.class_ids['gates_down']
gates_up = self.class_ids['gates_up']
if clip_probs[gates_descending] < .25 \
and clip_probs[gates_ascending] < .25 \
and clip_probs[gates_down] >= .6 \
and clip_probs[gates_up] < clip_probs[gates_down] / 3:
return gates_down
else:
return gates_up
def get_stopped_on_crossing_incursion_feature_sequence(
self, report_clip_numbers, smooth_probs,
smoothing_factor):
feature_sequence = []
# extract features for use in finding gate stopped_on_crossing_incursions
incursion_states = self.report_probs[:, [55, 58, 61, 64]]
if smooth_probs:
incursion_states = IO.smooth_probs(
incursion_states, smoothing_factor)
incursion_states = np.round(incursion_states).astype(np.uint8)
feature_id = 0
current_state = np.any(incursion_states[0])
start_clip_number = report_clip_numbers[0]
for i in range(1, len(incursion_states)):
ith_gate_light_state = np.any(incursion_states[i])
if ith_gate_light_state != current_state:
end_clip_number = report_clip_numbers[i - 1]
# the beginning of the next feature has been reached.
# create an object for the preceding feature.
feature_sequence.append(Feature(
feature_id, current_state,
start_clip_number, end_clip_number))
feature_id += 1
current_state = np.any(incursion_states[i])
start_clip_number = report_clip_numbers[i]
if i == len(incursion_states) - 1:
print('current_state: {}'.format(current_state))
feature_sequence.append(Feature(
feature_id, current_state,
start_clip_number, start_clip_number))
elif i == len(incursion_states) - 1:
end_clip_number = report_clip_numbers[i]
print('current_state: {}'.format(current_state))
feature_sequence.append(Feature(
feature_id, current_state,
start_clip_number, end_clip_number))
return feature_sequence
def find_stopped_on_crossing_incursion_events(self):
events = []
event_id = 0
i = 0
weight = 0.0
while i < len(self.stopped_on_crossing_incursion_feature_sequence):
current_feature = self.stopped_on_crossing_incursion_feature_sequence[i]
i += 1
if current_feature.state:
target_feature_list = [current_feature]
longest_target_feature_gap = 0
weight += current_feature.length
while i < len(self.stopped_on_crossing_incursion_feature_sequence):
current_feature = self.stopped_on_crossing_incursion_feature_sequence[i]
i += 1
if current_feature.state:
current_feature_gap = current_feature.start_clip_number - \
target_feature_list[-1].end_clip_number
if longest_target_feature_gap < current_feature_gap:
longest_target_feature_gap = current_feature_gap
target_feature_list.append(current_feature)
weight += current_feature.length
else:
weight -= self.weight_scale * current_feature.length
if weight <= 0:
break
current_event = StoppedOnCrossingIncursionEvent(event_id=event_id,
target_feature_list=target_feature_list)
weight = 0
if current_event.length >= self.minimum_event_length:
events.append(current_event)
event_id += 1
for event in events:
print('start_clip_number', event.start_clip_number)
print('end_clip_number', event.end_clip_number)
classifications = np.round(
self.report_probs[event.start_clip_number - 1:event.end_clip_number]
).astype(np.uint8)
print('classifications', classifications)
event.find_violations(classifications)
return events
def get_ped_right_of_way_incursion_feature_sequence(
self, report_clip_numbers, smooth_probs,
smoothing_factor):
feature_sequence = []
# extract features for use in finding gate ped_right_of_way_incursions
incursion_states = self.report_probs[:, [66, 67]]
if smooth_probs:
incursion_states = IO.smooth_probs(
incursion_states, smoothing_factor)
incursion_states = np.round(incursion_states).astype(np.uint8)
feature_id = 0
current_state = np.any(incursion_states[0])
start_clip_number = report_clip_numbers[0]
for i in range(1, len(incursion_states)):
ith_gate_light_state = np.any(incursion_states[i])
if ith_gate_light_state != current_state:
end_clip_number = report_clip_numbers[i - 1]
print('current_state: {}'.format(current_state))
# the beginning of the next feature has been reached.
# create an object for the preceding feature.
feature_sequence.append(Feature(
feature_id, current_state,
start_clip_number, end_clip_number))
feature_id += 1
current_state = np.any(incursion_states[i])
start_clip_number = report_clip_numbers[i]
if i == len(incursion_states) - 1:
print('current_state: {}'.format(current_state))
feature_sequence.append(Feature(
feature_id, current_state,
start_clip_number, start_clip_number))
elif i == len(incursion_states) - 1:
end_clip_number = report_clip_numbers[i]
print('current_state: {}'.format(current_state))
feature_sequence.append(Feature(
feature_id, current_state,
start_clip_number, end_clip_number))
return feature_sequence
def find_ped_right_of_way_incursion_events(self):
events = []
event_id = 0
i = 0
weight = 0.0
while i < len(self.ped_right_of_way_incursion_feature_sequence):
current_feature = self.ped_right_of_way_incursion_feature_sequence[i]
i += 1
if current_feature.state:
target_feature_list = [current_feature]
longest_target_feature_gap = 0
weight += current_feature.length
while i < len(self.ped_right_of_way_incursion_feature_sequence):
current_feature = self.ped_right_of_way_incursion_feature_sequence[i]
i += 1
if current_feature.state:
current_feature_gap = current_feature.start_clip_number - \
target_feature_list[-1].end_clip_number
if longest_target_feature_gap < current_feature_gap:
longest_target_feature_gap = current_feature_gap
target_feature_list.append(current_feature)
weight += current_feature.length
else:
weight -= self.weight_scale * current_feature.length
if weight <= 0:
break
current_event = PedestrianRightOfWayIncursionEvent(event_id=event_id,
target_feature_list=target_feature_list)
| |
"""
Click command to do static annotation searching via Stevedore plugins.
"""
import datetime
import errno
import os
import re
from abc import ABCMeta, abstractmethod
import yaml
from stevedore import named
from code_annotations import annotation_errors
from code_annotations.exceptions import ConfigurationException
from code_annotations.helpers import VerboseEcho
class AnnotationConfig:
"""
Configuration shared among all Code Annotations commands.
"""
def __init__(self, config_file_path, report_path_override=None, verbosity=1, source_path_override=None):
"""
Initialize AnnotationConfig.
Args:
config_file_path: Path to the configuration file
report_path_override: Path to write reports to, if overridden on the command line
verbosity: Verbosity level from the command line
source_path_override: Path to search if we're static code searching, if overridden on the command line
"""
self.groups = {}
self.choices = {}
self.optional_groups = []
self.annotation_tokens = []
self.annotation_regexes = []
self.mgr = None
# Global logger, other objects can hold handles to this
self.echo = VerboseEcho()
with open(config_file_path) as config_file:
raw_config = yaml.safe_load(config_file)
self._check_raw_config_keys(raw_config)
self.safelist_path = raw_config['safelist_path']
self.extensions = raw_config['extensions']
self.verbosity = verbosity
self.echo.set_verbosity(verbosity)
self.report_path = report_path_override if report_path_override else raw_config['report_path']
self.echo(f"Configured for report path: {self.report_path}")
self.source_path = source_path_override if source_path_override else raw_config['source_path']
self.echo(f"Configured for source path: {self.source_path}")
self._configure_coverage(raw_config.get('coverage_target', None))
self.report_template_dir = raw_config.get('report_template_dir')
self.rendered_report_dir = raw_config.get('rendered_report_dir')
self.rendered_report_file_extension = raw_config.get('rendered_report_file_extension')
self.rendered_report_source_link_prefix = raw_config.get('rendered_report_source_link_prefix')
self._configure_annotations(raw_config)
self._configure_extensions()
def _check_raw_config_keys(self, raw_config):
"""
Validate that all required keys exist in the configuration file.
Args:
raw_config: Python representation of the YAML config file
Raises:
ConfigurationException on any missing keys
"""
errors = []
for k in ('report_path', 'source_path', 'safelist_path', 'annotations', 'extensions'):
if k not in raw_config:
errors.append(k)
if errors:
raise ConfigurationException(
'The following required keys are missing from the configuration file: \n{}'.format(
'\n'.join(errors)
)
)
def _is_annotation_group(self, token_or_group):
"""
Determine if an annotation is a group or not.
Args:
token_or_group: The annotation being checked
Returns:
True if the type of the annotation is correct for a group, otherwise False
"""
return isinstance(token_or_group, list)
def _is_choice_group(self, token_or_group):
"""
Determine if an annotation is a choice group.
Args:
token_or_group: The annotation being checked
Returns:
True if the type of the annotation is correct for a choice group, otherwise False
"""
return isinstance(token_or_group, dict) and "choices" in token_or_group
def _is_optional_group(self, token_or_group):
"""
Determine if an annotation is an optional group.
Args:
token_or_group: The annotation being checked
Returns:
True if the annotation is optional, otherwise False.
"""
return isinstance(token_or_group, dict) and bool(token_or_group.get("optional"))
def _is_annotation_token(self, token_or_group):
"""
Determine if an annotation has the right format.
Args:
token_or_group: The annotation being checked
Returns:
True if the type of the annotation is correct for a text type, otherwise False
"""
if token_or_group is None:
return True
if isinstance(token_or_group, dict):
# If annotation is a dict, only a few keys are tolerated
return set(token_or_group.keys()).issubset({"choices", "optional"})
return False
def _add_annotation_token(self, token):
if token in self.annotation_tokens:
raise ConfigurationException(f'{token} is configured more than once, tokens must be unique.')
self.annotation_tokens.append(token)
def _configure_coverage(self, coverage_target):
"""
Set coverage_target to the specified value.
Args:
coverage_target:
Returns:
"""
if coverage_target:
try:
self.coverage_target = float(coverage_target)
except (TypeError, ValueError) as error:
raise ConfigurationException(
f'Coverage target must be a number between 0 and 100 not "{coverage_target}".'
) from error
if self.coverage_target < 0.0 or self.coverage_target > 100.0:
raise ConfigurationException(
f'Invalid coverage target. {self.coverage_target} is not between 0 and 100.'
)
else:
self.coverage_target = None
def _configure_group(self, group_name, group):
"""
Perform group configuration and add annotations from the group to global configuration.
Args:
group_name: The name of the group (the key in the configuration dictionary)
group: The list of annotations that comprise the group
Raises:
TypeError if the group is misconfigured
"""
self.groups[group_name] = []
if not group or len(group) == 1:
raise ConfigurationException(f'Group "{group_name}" must have more than one annotation.')
for annotation in group:
for annotation_token in annotation:
annotation_value = annotation[annotation_token]
# Otherwise it should be a text type, if not then error out
if not self._is_annotation_token(annotation_value):
raise ConfigurationException(f'{annotation} is an unknown annotation type.')
# The annotation comment is a choice group
if self._is_choice_group(annotation_value):
self._configure_choices(annotation_token, annotation_value)
# The annotation comment is not mandatory
if self._is_optional_group(annotation_value):
self.optional_groups.append(annotation_token)
self.groups[group_name].append(annotation_token)
self._add_annotation_token(annotation_token)
self.annotation_regexes.append(re.escape(annotation_token))
def _configure_choices(self, annotation_token, annotation):
"""
Configure the choices list for an annotation.
Args:
annotation_token: The annotation token we are setting choices for
annotation: The annotation body (list of choices)
"""
self.choices[annotation_token] = annotation['choices']
def _configure_annotations(self, raw_config):
"""
Transform the configured annotations into more usable pieces and validate.
Args:
raw_config: The dictionary form of our configuration file
Raises:
TypeError if annotations are misconfigured
"""
annotation_tokens = raw_config['annotations']
for annotation_token_or_group_name in annotation_tokens:
annotation = annotation_tokens[annotation_token_or_group_name]
if self._is_annotation_group(annotation):
self._configure_group(annotation_token_or_group_name, annotation)
elif self._is_choice_group(annotation):
self._configure_choices(annotation_token_or_group_name, annotation)
self._add_annotation_token(annotation_token_or_group_name)
self.annotation_regexes.append(re.escape(annotation_token_or_group_name))
elif not self._is_annotation_token(annotation): # pragma: no cover
raise TypeError(
f'{annotation_token_or_group_name} is an unknown type, must be strings or lists.'
)
else:
self._add_annotation_token(annotation_token_or_group_name)
self.annotation_regexes.append(re.escape(annotation_token_or_group_name))
self.echo.echo_v(f"Groups configured: {self.groups}")
self.echo.echo_v(f"Choices configured: {self.choices}")
self.echo.echo_v(f"Annotation tokens configured: {self.annotation_tokens}")
def _plugin_load_failed_handler(self, *args, **kwargs):
"""
Handle failures to load an extension.
Dumps the error and raises an exception. By default these
errors just fail silently.
Args:
*args:
**kwargs:
Raises:
ConfigurationException
"""
self.echo(str(args), fg='red')
self.echo(str(kwargs), fg='red')
raise ConfigurationException('Failed to load a plugin, aborting.')
def _configure_extensions(self):
"""
Configure the Stevedore NamedExtensionManager.
Raises:
ConfigurationException
"""
# These are the names of all of our configured extensions
configured_extension_names = self.extensions.keys()
# Load Stevedore extensions that we are configured for (and only those)
self.mgr = named.NamedExtensionManager(
names=configured_extension_names,
namespace='annotation_finder.searchers',
invoke_on_load=True,
on_load_failure_callback=self._plugin_load_failed_handler,
invoke_args=(self, self.echo),
)
# Output extension names listed in configuration
self.echo.echo_vv("Configured extension names: {}".format(" ".join(configured_extension_names)))
# Output found extension entry points from setup.py|cfg (whether or not they were loaded)
self.echo.echo_vv("Stevedore entry points found: {}".format(str(self.mgr.list_entry_points())))
# Output extensions that were actually able to load
self.echo.echo_v("Loaded extensions: {}".format(" ".join([x.name for x in self.mgr.extensions])))
if len(self.mgr.extensions) != len(configured_extension_names):
raise ConfigurationException('Not all configured extensions could be loaded! Asked for {} got {}.'.format(
configured_extension_names, self.mgr.extensions
))
class BaseSearch(metaclass=ABCMeta):
"""
Base class for searchers.
"""
def __init__(self, config):
"""
Initialize for StaticSearch.
Args:
config: Configuration object
"""
self.config = config
self.echo = self.config.echo
# errors contains formatted error messages
self.errors = []
# annotation_errors contains (annotation, AnnotationErrorType, args) tuples
# This attribute may be parsed by 3rd-parties, such as edx-lint.
self.annotation_errors = []
def format_file_results(self, all_results, results):
"""
Add all extensions' search results for a file to the overall results.
Args:
all_results: Aggregated results to add the results to
results: Results of search() on a single file
Returns:
None, modifies all_results
"""
for annotations in results:
if not annotations:
continue
# TODO: The file_path should be the same for all of these results
# so we should be able to optimize getting file_path and making
# sure it exists in the dict to do this less often.
file_path = annotations[0]['filename']
if file_path not in all_results: # pragma: no cover
all_results[file_path] = []
for annotation in annotations:
# If this is a "choices" type of annotation, split the comment into a list.
# Actually checking the choice validity happens later in _check_results_choices.
if annotation['annotation_token'] in self.config.choices:
annotation['annotation_data'] = re.split(r',\s?|\s', annotation['annotation_data'])
# TODO: De-dupe results? Should only be necessary if more than one
# Stevedore extension is working on the same file type
all_results[file_path].extend(annotations)
def _check_results_choices(self, annotation):
"""
Check that a search result has appropriate choices.
If the following errors are found:
- no choices
- multiple of the same choice
- a choice which is not configured
This function will add the error to self.errors.
Args:
annotation: A single search result dict.
"""
# Not a choice type of annotation, nothing to do
if annotation['annotation_token'] not in self.config.choices:
return None
token = annotation['annotation_token']
found_valid_choices = []
# If the line begins with an annotation token that should have choices, but has no text after the token,
# the first split will be empty.
if annotation['annotation_data'][0] != "":
for choice in annotation['annotation_data']:
if choice not in self.config.choices[token]:
self._add_annotation_error(
annotation,
annotation_errors.InvalidChoice,
(choice, token, self.config.choices[token])
)
elif choice in found_valid_choices:
self._add_annotation_error(annotation, annotation_errors.DuplicateChoiceValue, (choice,))
else:
found_valid_choices.append(choice)
else:
self._add_annotation_error(
annotation,
annotation_errors.MissingChoiceValue,
(token, self.config.choices[token])
)
return None
def _get_group_children(self):
"""
Create a list of all annotation tokens that are part of a group.
Returns:
List of annotation tokens that are configured to | |
ranges = []
baseRange = range(1,51)
for p in self.useddims:
staticParams.append(p)
if p == '1':
ranges.append([1])
else:
ranges.append(range(2,51,random.choice(baseRange)))
return (staticParams, itertools.product(*ranges))
def ppDecl(self, decl, pos):
res = ""
for s in decl:
res += ",".join(decl[s])
res += ": " + s[0][0] + "<"
if s[0][0] != 'scalar':
if s[0][0] == 'vector':
res += s[1][0]
elif s[0][0] != 'matrix':
res += s[1][0] + ', ' + s[0][1]
else:
res += s[1][0] + ', ' + s[1][1]
res += ', ' + pos + '>;\n'
else:
res += pos + '>;\n'
return res
def getDimSize(self, n):
dims = []
for _ in range(n):
d = random.choice(self.dims)
dims.append(d)
self.useddims.update(dims)
return dims
def getStructInfo(self, dims):
typ = None
addInfo = None
# if dims[1] == '1':
# if dims[0] == '1':
# typ = "scalar"
# else:
# typ = "vector"
if dims[0] == dims[1] and dims[0] != '1':
typ = random.choice(['matrix', 'triangular', 'symmetric'])
if typ != 'matrix':
addInfo = random.choice(['l', 'u'])
else:
typ = "matrix"
struct = [typ] + ([] if addInfo is None else [addInfo])
return struct
def Assign(self):
ops = [ 'Add', 'T', 'Kro', 'Mul', 'Matrix' ]
dims = self.getDimSize(2)
struct = self.getStructInfo(dims)
dropops = set()
if struct[0] == 'symmetric':
dropops.update(['Mul'])
lop = getattr(self, 'Matrix')(dims, 'l', struct, dropops)
rop = getattr(self, random.choice(ops))(dims, 'r', struct, dropops)
return lop + " = " + rop + ";"
def Add(self, dims, pos, struct, dropops=None):
if dropops is None: dropops = set()
possops = [ op for op in [ 'Add', 'T', 'Kro', 'Mul', 'Matrix' ] if op not in dropops ]
if dims[0] == dims[1]:
if struct == ['matrix']:
structs = random.choice([ [['triangular','l'],['triangular','u']], [['triangular','u'],['triangular','l']], [['matrix'],['matrix']], [['symmetric','l'],['symmetric','u']], [['symmetric','u'],['symmetric','l']] ])
else:
structs = [copy(struct),copy(struct)]
else:
structs = [['matrix'],['matrix']]
self.numops -= 1
if self.numops > 0:
ops = possops
else:
ops = [ 'Matrix' ]
dopsl, dopsr = copy(dropops), copy(dropops)
if structs[0][0] == 'symmetric':
dopsl.update(['Mul'])
lop = getattr(self, random.choice(ops))(dims, pos, structs[0], dopsl)
if self.numops > 0:
ops = possops
else:
ops = [ 'Matrix' ]
if structs[1][0] == 'symmetric':
dopsr.update(['Mul'])
rop = getattr(self, random.choice(ops))(dims, pos, structs[1], dopsr)
return "(" + lop + " + " + rop + ")"
def Mul(self, dims, pos, struct, dropops=None):
if dropops is None: dropops = set()
possops = [ op for op in [ 'Add', 'T', 'Kro', 'Matrix' ] if op not in dropops ]
dropops.update(['Mul'])
if struct[0] == 'triangular':
k = [dims[0]]
structs = [copy(struct), copy(struct)]
else:
typl = random.choice(['matrix', 'triangular', 'symmetric'])
if typl != 'matrix':
lAddInfo = random.choice(['l', 'u'])
k = [dims[0]]
if k[0] == dims[1]:
typr = random.choice(['matrix', 'triangular', 'symmetric'])
else:
typr = 'matrix'
if typl == typr:
rAddInfo = [ t for t in ['l', 'u'] if t != lAddInfo][0]
elif typr != 'matrix':
rAddInfo = random.choice(['l', 'u'])
else:
rAddInfo = None
else:
lAddInfo = None
typr = random.choice(['matrix', 'triangular', 'symmetric'])
if typr != 'matrix':
rAddInfo = random.choice(['l', 'u'])
k = [dims[1]]
else:
k = self.getDimSize(1)
rAddInfo = None
structs = [[typl] + ([] if lAddInfo is None else [lAddInfo]),[typr] + ([] if rAddInfo is None else [rAddInfo])]
diml = [dims[0], k[0]]
dimr = [k[0], dims[1]]
self.numops -= 1
if self.numops > 0:
ops = possops
else:
ops = [ 'Matrix' ]
lop = getattr(self, random.choice(ops))(diml, pos, structs[0], dropops)
if self.numops > 0:
ops = possops
else:
ops = [ 'Matrix' ]
rop = getattr(self, random.choice(ops))(dimr, pos, structs[1], dropops)
return "(" + lop + " * " + rop + ")"
def T(self, dims, pos, struct, dropops=None):
if dropops is None: dropops = set()
possops = [ op for op in [ 'Add', 'T', 'Kro', 'Matrix' ] if op not in dropops ]
dropops.update(['Mul'])
self.numops -= 1
if self.numops > 0:
ops = possops
else:
ops = [ 'Matrix' ]
tstruct = copy(struct)
if tstruct[0] != 'matrix':
tstruct[1] = [ t for t in ['l', 'u'] if t != tstruct[1]][0]
op = getattr(self, random.choice(ops))(dims[::-1], pos, tstruct, dropops)
return "trans(" + op + ")"
def Kro(self, dims, pos, struct, dropops=None):
if dropops is None: dropops = set()
possops = [ op for op in [ 'Add', 'T', 'Kro', 'Mul', 'Matrix' ] if op not in dropops ]
ids = [0,1]
l = random.choice(ids)
ids.remove(l)
r = ids[0]
self.numops -= 1
if self.numops > 0:
ops = [['Scalar'], possops][l]
else:
ops = [['Scalar'], [ 'Matrix' ]][l]
lop = getattr(self, random.choice(ops))(dims, pos, struct, dropops)
if self.numops > 0:
ops = [['Scalar'], possops][r]
else:
ops = [['Scalar'], [ 'Matrix' ]][r]
rop = getattr(self, random.choice(ops))(dims, pos, struct, dropops)
return "(" + lop + " * " + rop + ")"
def Matrix(self, dims, pos, struct, dropops=None):
key = None
baseName = 'A'
if dims[0] == dims[1] == '1':
key = (('scalar',),)
baseName = 'b'
elif dims[1] == '1':
key = (('vector',), tuple(dims))
else:
key = (tuple(struct), tuple(dims))
inl = self.declin.get(key,[])
inoutl = self.declinout.get(key,[])
outl = self.declout.get(key,[])
op = random.choice( sum([inl,inoutl,outl], [baseName+str(self.matc)]) )
self.matc += 1
exist = isin = isout = isinout = False
if op in inl:
exist = isin = True
elif op in outl:
exist = isout = True
elif op in inoutl:
exist = isinout = True
if not exist:
decl = self.declout if pos == 'l' else self.declin
if key in decl:
decl[key].append(op)
else:
decl[key] = [op]
elif (isin and pos == 'l') or (isout and pos == 'r'):
decl = self.declin if isin else self.declout
decl[key].remove(op)
if not decl[key]:
del decl[key]
if key in self.declinout:
self.declinout[key].append(op)
else:
self.declinout[key] = [op]
else:
decl = None
if isinout:
decl = self.declinout
elif isin:
decl = self.declin
else:
decl = self.declout
if op not in decl[key]:
decl[key] = [op]
return op
def oldMatrix(self, dims, pos, dropops=None):
op = random.choice(self.mats)
exist = isin = isout = isinout = False
if any(map(lambda s: op in s, self.declin.values())):
exist = isin = True
elif any(map(lambda s: op in s, self.declout.values())):
exist = isout = True
elif any(map(lambda s: op in s, self.declinout.values())):
exist = isinout = True
if not exist:
decl = self.declin if pos == 'r' else self.declout
typ = ""
if dims[1] == '1':
if dims[0] == '1':
typ = "scalar<"
else:
typ = "vector<"+str(dims[0]) + ", "
elif dims[0] == dims[1]:
typ = random.choice(['matrix', 'triangular', 'symmetric'])
if typ != 'matrix':
struct = random.choice(['l', 'u'])
typ += "<"+str(dims[0])+", "+struct + ", "
else:
typ = "matrix<"+str(dims[0])+", "+str(dims[1]) + ", "
else:
typ = "matrix<"+str(dims[0])+", "+str(dims[1]) + ", "
decl[typ] = set([op])
elif (isin and pos == 'l') or (isout and pos == 'r'):
src = self.declin if isin else self.declout
typ = filter(lambda typ: op in src[typ], src)[0]
src[typ].remove(op)
if not src[typ]:
del src[typ]
self.declinout[typ] = set([op])
else:
decl = None
if isinout:
decl = self.declinout
elif isin:
decl = self.declin
else:
decl = self.declout
typ = filter(lambda typ: op in decl[typ], decl)[0]
decl[typ].update([op])
return op
def Scalar(self, dims, pos, struct, dropops=None):
return self.Matrix(['1','1'], pos, ['matrix'], dropops)
# key = (('scalar',),)
#
# inl = self.declin.get(key,[])
# inoutl = self.declinout.get(key,[])
# outl = self.declout.get(key,[])
#
# op = random.choice(sum([inl,inoutl,outl], ['b'+str(self.matc)]))
# self.matc += 1
# exist = isin = isout = isinout = False
# if op in inl:
# exist = isin = True
# elif op in outl:
# exist = isout = True
# elif op in inoutl:
# exist = isinout = True
#
# if not exist:
# decl = self.declout if pos == 'l' else self.declin
# if key in decl:
# decl[key].append(op)
# else:
# decl[key] = [op]
# elif (isin and pos == 'l') or (isout and pos == 'r'):
# src = self.declin if isin else self.declout
# src[key].remove(op)
# if not src[key]:
# del src[key]
# if key in self.declinout:
# self.declinout[key].append(op)
# else:
# self.declinout[key] = [op]
# else:
# src = None
# if isinout:
# src = self.declinout
# elif isin:
# src = self.declin
# else:
# src = self.declout
# if op not in src[key]:
# src[key] = [op]
# | |
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_database_backup_destination
short_description: Manage a BackupDestination resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a BackupDestination resource in Oracle Cloud Infrastructure
- For I(state=present), creates a backup destination in an Exadata Cloud@Customer system.
- "This resource has the following action operations in the M(oracle.oci.oci_database_backup_destination_actions) module: change_compartment."
version_added: "2.9.0"
author: Oracle (@oracle)
options:
display_name:
description:
- The user-provided name of the backup destination.
- Required for create using I(state=present).
- Required for update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
type: str
aliases: ["name"]
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment.
- Required for create using I(state=present).
- Required for update when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- Required for delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
type: str
type:
description:
- Type of the backup destination.
- Required for create using I(state=present).
type: str
choices:
- "NFS"
- "RECOVERY_APPLIANCE"
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
- This parameter is updatable.
type: dict
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- This parameter is updatable.
type: dict
local_mount_point_path:
description:
- "**Deprecated.** The local directory path on each VM cluster node where the NFS server location is mounted. The local directory path and the NFS
server location must each be the same across all of the VM cluster nodes. Ensure that the NFS mount is maintained continuously on all of the VM
cluster nodes.
This field is deprecated. Use the mountTypeDetails field instead to specify the mount type for NFS."
- This parameter is updatable.
- Applicable when type is 'NFS'
type: str
mount_type_details:
description:
- ""
- Applicable when type is 'NFS'
type: dict
suboptions:
mount_type:
description:
- Mount type for backup destination.
type: str
choices:
- "SELF_MOUNT"
- "AUTOMATED_MOUNT"
default: "SELF_MOUNT"
local_mount_point_path:
description:
- The local directory path on each VM cluster node where the NFS server location is mounted. The local directory path and the NFS server
location must each be the same across all of the VM cluster nodes. Ensure that the NFS mount is maintained continuously on all of the VM
cluster nodes.
- Required when mount_type is 'SELF_MOUNT'
type: str
nfs_server:
description:
- IP addresses for NFS Auto mount.
- Required when mount_type is 'AUTOMATED_MOUNT'
type: list
elements: str
nfs_server_export:
description:
- Specifies the directory on which to mount the file system
- Required when mount_type is 'AUTOMATED_MOUNT'
type: str
connection_string:
description:
- The connection string for connecting to the Recovery Appliance.
- This parameter is updatable.
- Required when type is 'RECOVERY_APPLIANCE'
type: str
vpc_users:
description:
- The Virtual Private Catalog (VPC) users that are used to access the Recovery Appliance.
- This parameter is updatable.
- Required when type is 'RECOVERY_APPLIANCE'
type: list
elements: str
backup_destination_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the backup destination.
- Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
- Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["id"]
nfs_mount_type:
description:
- NFS Mount type for backup destination.
- This parameter is updatable.
type: str
choices:
- "SELF_MOUNT"
- "AUTOMATED_MOUNT"
nfs_server:
description:
- IP addresses for NFS Auto mount.
- This parameter is updatable.
type: list
elements: str
nfs_server_export:
description:
- Specifies the directory on which to mount the file system
- This parameter is updatable.
type: str
state:
description:
- The state of the BackupDestination.
- Use I(state=present) to create or update a BackupDestination.
- Use I(state=absent) to delete a BackupDestination.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create backup_destination with type = NFS
oci_database_backup_destination:
# required
display_name: display_name_example
compartment_id: "ocid.compartment.oc1..unique_ID"
type: NFS
# optional
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
local_mount_point_path: local_mount_point_path_example
mount_type_details:
# required
local_mount_point_path: local_mount_point_path_example
# optional
mount_type: SELF_MOUNT
- name: Create backup_destination with type = RECOVERY_APPLIANCE
oci_database_backup_destination:
# required
display_name: display_name_example
compartment_id: "ocid.compartment.oc1..unique_ID"
type: RECOVERY_APPLIANCE
connection_string: connection_string_example
vpc_users: [ "null" ]
# optional
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update backup_destination
oci_database_backup_destination:
# required
backup_destination_id: "ocid1.backupdestination.oc1..xxxxxxEXAMPLExxxxxx"
# optional
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
local_mount_point_path: local_mount_point_path_example
connection_string: connection_string_example
vpc_users: [ "null" ]
nfs_mount_type: SELF_MOUNT
nfs_server: [ "null" ]
nfs_server_export: nfs_server_export_example
- name: Update backup_destination using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_database_backup_destination:
# required
display_name: display_name_example
compartment_id: "ocid.compartment.oc1..unique_ID"
# optional
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
local_mount_point_path: local_mount_point_path_example
connection_string: connection_string_example
vpc_users: [ "null" ]
nfs_mount_type: SELF_MOUNT
nfs_server: [ "null" ]
nfs_server_export: nfs_server_export_example
- name: Delete backup_destination
oci_database_backup_destination:
# required
backup_destination_id: "ocid1.backupdestination.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
- name: Delete backup_destination using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_database_backup_destination:
# required
display_name: display_name_example
compartment_id: "ocid.compartment.oc1..unique_ID"
state: absent
"""
RETURN = """
backup_destination:
description:
- Details of the BackupDestination resource acted upon by the current operation
returned: on success
type: complex
contains:
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the backup destination.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- The user-provided name of the backup destination.
returned: on success
type: str
sample: display_name_example
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
type:
description:
- Type of the backup destination.
returned: on success
type: str
sample: NFS
associated_databases:
description:
- List of databases associated with the backup destination.
returned: on success
type: complex
contains:
id:
description:
- The database L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
db_name:
description:
- The display name of the database that is associated with the backup destination.
returned: on success
type: str
sample: db_name_example
connection_string:
description:
- For a RECOVERY_APPLIANCE backup destination, the connection string for connecting to the Recovery Appliance.
returned: on success
type: str
sample: connection_string_example
vpc_users:
description:
- For a RECOVERY_APPLIANCE backup destination, the Virtual Private Catalog (VPC) users that are used to access the Recovery Appliance.
returned: on success
type: list
sample: []
local_mount_point_path:
description:
- The local directory path on each VM cluster node where the NFS server location is mounted. The local directory path and the NFS server
location must each be the same across all of the VM cluster nodes. Ensure that the NFS mount is maintained continuously on all of the VM
cluster nodes.
returned: on success
type: str
sample: local_mount_point_path_example
nfs_mount_type:
description:
- NFS Mount type for backup destination.
returned: on success
type: str
sample: SELF_MOUNT
nfs_server:
description:
- Host names or IP addresses for NFS Auto mount.
returned: on success
type: list
sample: []
nfs_server_export:
description:
- Specifies the directory on which to mount the file system
returned: on success
type: str
sample: nfs_server_export_example
lifecycle_state:
description:
- The current lifecycle state of the backup destination.
returned: on success
type: str
sample: ACTIVE
time_created:
description:
- The date and time the backup destination was created.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
lifecycle_details:
description:
- A descriptive text associated with the lifecycleState.
Typically contains additional displayable text
returned: on success
type: str
sample: lifecycle_details_example
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
| |
import numpy as np
from numpy import *
import matplotlib.pyplot as mpl
import h5py
import sys
import os
from matplotlib.pyplot import *
from plotnine import *
import pandas as p
import myggplottheme
class BSE:
def __init__(self,fileG4,draw=False,useG0=False,symmetrize_G4=False,phSymmetry=False,calcRedVertex=False,calcCluster=False,nkfine=100,oldFormat=False,shiftedK=False,newMaster=False,allq=False,iq=0,evenFreqOnly=True,wCutOff=1):
self.fileG4 = fileG4
self.draw = draw
self.useG0 = useG0
self.symmetrize_G4 = symmetrize_G4
self.calcCluster = calcCluster
self.calcRedVertex = calcRedVertex
self.phSymmetry = phSymmetry
self.oldFormat = oldFormat
self.shiftedK = shiftedK
self.newMaster = newMaster
self.allq = allq
self.iq=iq
self.evenFreqOnly = evenFreqOnly
self.wCutOff = wCutOff
self.readData()
self.setupMomentumTables()
self.iK0 = self.K_2_iK(0.0, 0.0)
self.determine_specialK()
print ("Index of (pi,pi): ",self.iKPiPi)
print ("Index of (pi,0): ",self.iKPi0)
if self.symmetrize_G4: self.symmetrizeG4()
if self.vertex_channel in ("PARTICLE_PARTICLE_UP_DOWN", "PARTICLE_PARTICLE_SUPERCONDUCTING", "PARTICLE_PARTICLE_SINGLET"):
self.calcSCClusterSus()
if self.vertex_channel in ("PARTICLE_PARTICLE_SINGLET"): sys.exit("PARTICLE_PARTICLE_SINGLET channel has singular chi0");
self.calcChi0Cluster()
self.calcGammaIrr()
if self.vertex_channel in ("PARTICLE_PARTICLE_SUPERCONDUCTING","PARTICLE_PARTICLE_UP_DOWN"):
self.symmetrizeGamma()
if calcCluster == False: self.buildChi0Lattice(nkfine)
self.buildKernelMatrix()
self.calcKernelEigenValues()
title = "Leading eigensolutions of BSE for U="+str(self.U)+", t'="+str(self.tp)+r", $\langle n\rangle$="+str(round(self.fill,4))+", T="+str(round(self.temp,4))
if self.vertex_channel in ("PARTICLE_HOLE_TRANSVERSE","PARTICLE_HOLE_MAGNETIC"):
self.calcSWaveSus()
# print("Cluster spin susceptibility: ",sum(self.G4)/(float(self.Nc)*self.invT))
if self.draw: self.plotLeadingSolutions(self.Kvecs,self.lambdas,self.evecs[:,:,:],title)
if calcRedVertex: self.calcReducibleLatticeVertex()
if self.vertex_channel in ("PARTICLE_PARTICLE_SUPERCONDUCTING","PARTICLE_PARTICLE_UP_DOWN"):
if calcCluster == False:
self.calcReducibleLatticeVertex()
self.calcSCSus()
self.determineFS(); FSpoints = array(self.FSpoints)
self.calcPd0FS(FSpoints)
self.calcSCClusterSus()
self.calcPd0(wCutOff=1)
self.Pd0 = self.calcProjectionsKwn(self.chi0M,self.dwave,0)/(self.invT*self.Nc)
print("\nPd0(T) = ",self.Pd0)
self.Vd = self.calcProjectionsKwn(self.GammaM,self.dwave,2)
print("Vd(T) = ",self.Vd)
# Pd0 = self.calcProjections(self.chi0M,self.dwave)
prod=self.Vd*self.Pd0
print("Vd(T)*Pd0(T) = ",prod)
print("PMd(T) = ",self.calcProjectionsKwn(self.pm,self.dwave,1))
print("\nPpp0(T) = ",self.calcProjections(self.chi0M,self.pxpwave)/(self.invT*self.Nc))
print("Vpp(T) = ",self.calcProjections(self.GammaM,self.pxpwave))
print("PMpp(T) = ",self.calcProjections(self.pm,self.pxpwave))
self.Vd0 = self.calcProjections(self.GammaM,self.dwave,wCutOff=0)/self.Nc**2
print("\nVd_piT_piT = ", self.Vd0)
print("\n")
if self.found_d: self.calcPdFromEigenFull(self.ind_d)
self.calcReducibleClusterVertex()
if self.calcRedVertex & (self.calcCluster==False):
# FSpoints = array([16,12,9,5,2,29,25,20,23,27,30,6,11,15])
iwG40 = int(self.NwG4/2); nFs = int(FSpoints.shape[0])
GRFS = np.sum(self.GammaRed[iwG40-1:iwG40+1,:,iwG40-1:iwG40+1,:],axis=(0,2))[FSpoints,:][:,FSpoints]/4.
print ("s-wave projection of GammaRed averaged: ", real(np.sum(GRFS)/float(nFs*nFs)))
gkd = self.dwave(self.Kvecs[FSpoints,0],self.Kvecs[FSpoints,1])
r1 = real(np.dot(gkd,np.dot(GRFS,gkd)))
print ("d-wave projection of GammaRed averaged: ", r1/np.dot(gkd, gkd)/float(nFs))
GRFSpp = self.GammaRed[iwG40,:,iwG40,:][FSpoints,:][:,FSpoints]
GRFSpm = self.GammaRed[iwG40,:,iwG40-1,:][FSpoints,:][:,FSpoints]
print ("s-wave projection of GammaRed(piT,piT): ", real(np.sum(GRFSpp)/float(nFs*nFs)))
print ("s-wave projection of GammaRed(piT,-piT): ", real(np.sum(GRFSpm)/float(nFs*nFs)))
r1 = real(np.dot(gkd,np.dot(GRFSpp,gkd)))/np.dot(gkd, gkd)/float(nFs)
r2 = real(np.dot(gkd,np.dot(GRFSpm,gkd)))/np.dot(gkd, gkd)/float(nFs)
print ("d-wave projection of GammaRed(piT,piT): " , r1)
print ("d-wave projection of GammaRed(piT,-piT): ", r2)
GRFSpp = self.GammaRed[iwG40+1,:,iwG40+1,:][FSpoints,:][:,FSpoints]
GRFSpm = self.GammaRed[iwG40+1,:,iwG40-2,:][FSpoints,:][:,FSpoints]
print ("s-wave projection of GammaRed(3piT,3piT): ", real(np.sum(GRFSpp)/float(nFs*nFs)))
print ("s-wave projection of GammaRed(3piT,-3piT): ", real(np.sum(GRFSpm)/float(nFs*nFs)))
r1 = real(np.dot(gkd,np.dot(GRFSpp,gkd)))/np.dot(gkd, gkd)/float(nFs)
r2 = real(np.dot(gkd,np.dot(GRFSpm,gkd)))/np.dot(gkd, gkd)/float(nFs)
print ("d-wave projection of GammaRed(3piT,3piT): " , r1)
print ("d-wave projection of GammaRed(3piT,-3piT): ", r2)
GRFSpp = self.GammaCluster[iwG40,:,iwG40,:][FSpoints,:][:,FSpoints]
GRFSpm = self.GammaCluster[iwG40,:,iwG40-1,:][FSpoints,:][:,FSpoints]
r1 = real(np.dot(gkd,np.dot(GRFSpp,gkd)))/np.dot(gkd, gkd)/float(nFs)
r2 = real(np.dot(gkd,np.dot(GRFSpm,gkd)))/np.dot(gkd, gkd)/float(nFs)
print ("d-wave projection of GammaCluster(piT,piT): " , r1)
print ("d-wave projection of GammaCluster(piT,-piT): ", r2)
self.calcGammaRedEvals()
def readData(self):
f = h5py.File(self.fileG4,'r')
if self.oldFormat == True:
self.cluster = array(f["domains"]["CLUSTER"]["REAL_SPACE"]["super-basis"]["data"])
print("Cluster vectors:",self.cluster)
self.iwm = array(f['parameters']['vertex-channel']['w-channel'])[0] # transferred frequency in units of 2*pi*temp
self.qchannel = array(f['parameters']['vertex-channel']['q-channel'])
a = array(f['parameters']['vertex-channel']['vertex-measurement-type'])[:]
self.vertex_channel = ''.join(chr(i) for i in a)
self.invT = array(f['parameters']['physics-parameters']['beta'])[0]
self.temp = 1.0/self.invT
self.U = array(f['parameters']['2D-Hubbard-model']['U'])[0]
self.tp = array(f['parameters']['2D-Hubbard-model']['t-prime'])[0]
self.fill = array(f['parameters']['physics-parameters']['density'])[0]
self.dens = array(f['DCA-loop-functions']['density']['data'])
# Now read the 4-point Green's function
G4Re = array(f['functions']['G4_k_k_w_w']['data'])[:,:,:,:,0,0,0,0,0]
G4Im = array(f['functions']['G4_k_k_w_w']['data'])[:,:,:,:,0,0,0,0,1]
self.G4 = G4Re+1j*G4Im
# G4[iw1,iw2,ik1,ik2]
# Now read the cluster Green's function
GRe = array(f['functions']['cluster_greens_function_G_k_w']['data'])[:,:,0,0,0,0,0]
GIm = array(f['functions']['cluster_greens_function_G_k_w']['data'])[:,:,0,0,0,0,1]
self.Green = GRe + 1j * GIm
# Now read the self-energy
s = np.array(f['functions']['Self_Energy']['data'])
self.sigma = s[:,:,0,0,0,0,0] + 1j *s[:,:,0,0,0,0,1]
# Now load frequency data
self.wn = np.array(f['domains']['frequency-domain']['elements'])
self.wnSet = np.array(f['domains']['vertex-frequency-domain (COMPACT)']['elements'])
# Now read the K-vectors
self.Kvecs = array(f['domains']['CLUSTER']['MOMENTUM_SPACE']['elements']['data'])
# Now read other Hubbard parameters
self.t = np.array(f['parameters']['2D-Hubbard-model']['t'])[0]
self.mu = np.array(f['DCA-loop-functions']['chemical-potential']['data'])[-1]
self.NwTP = 2*np.array(f['parameters']['function-parameters']['two-particle-functions']['fermionic-frequencies'])[0]
else:
self.cluster = array(f["domains"]["CLUSTER"]["REAL_SPACE"]["super-basis"]["data"])
print("Cluster vectors:",self.cluster)
self.iwm = array(f['parameters']['four-point']['frequency-transfer'])[0] # transferred frequency in units of 2*pi*temp
self.qchannel = array(f['parameters']['four-point']['momentum-transfer'])
a = array(f['parameters']['four-point']['type'])[:]
self.vertex_channel = ''.join(chr(i) for i in a)
self.invT = array(f['parameters']['physics']['beta'])[0]
self.temp = 1.0/self.invT
self.U = array(f['parameters']['single-band-Hubbard-model']['U'])[0]
self.tp = array(f['parameters']['single-band-Hubbard-model']['t-prime'])[0]
self.fill = array(f['parameters']['physics']['density'])[0]
self.dens = array(f['DCA-loop-functions']['density']['data'])
self.nk = array(f['DCA-loop-functions']['n_k']['data'])
if self.newMaster == False:
G4Re = array(f['functions']['G4_k_k_w_w']['data'])[:,:,:,:,0,0,0,0,0]
G4Im = array(f['functions']['G4_k_k_w_w']['data'])[:,:,:,:,0,0,0,0,1]
self.G4 = G4Re+1j*G4Im
else:
if self.allq == False:
# olf format: order of indices: w1,w2,K1,K2
# G4Re = array(f['functions']['G4']['data'])[0,:,:,0,:,:,0,0,0,0,0]
# G4Im = array(f['functions']['G4']['data'])[0,:,:,0,:,:,0,0,0,0,1]
# New format: order of indices: w1,K1,w2,K2
G4Re = array(f['functions']['G4']['data'])[0,0,:,:,:,:,0,0,0,0,0]
G4Im = array(f['functions']['G4']['data'])[0,0,:,:,:,:,0,0,0,0,1]
print("G4 shape:",G4Re.shape)
else:
# order of indices: Q,w1,K1,w2,K2
G4Re = array(f['functions']['G4']['data'])[0,self.iq,:,:,:,:,0,0,0,0,0]
G4Im = array(f['functions']['G4']['data'])[0,self.iq,:,:,:,:,0,0,0,0,1]
self.G4 = G4Re+1j*G4Im
# Now reorder G4
self.G4=self.G4.swapaxes(1,2) # Now it G4's shape is w1,w2,K1,K2
GRe = array(f['functions']['cluster_greens_function_G_k_w']['data'])[:,:,0,0,0,0,0]
GIm = array(f['functions']['cluster_greens_function_G_k_w']['data'])[:,:,0,0,0,0,1]
self.Green = GRe + 1j * GIm
s = np.array(f['functions']['Self_Energy']['data'])
self.sigma = s[:,:,0,0,0,0,0] + 1j *s[:,:,0,0,0,0,1]
# self.sigma = s[:,:,0,:,0,:,0] + 1j *s[:,:,0,:,0,:,1]
self.wn = np.array(f['domains']['frequency-domain']['elements'])
self.wnSet = np.array(f['domains']['vertex-frequency-domain (COMPACT)']['elements'])
self.Kvecs = array(f['domains']['CLUSTER']['MOMENTUM_SPACE']['elements']['data'])
self.t = np.array(f['parameters']['single-band-Hubbard-model']['t'])[0]
self.mu = np.array(f['DCA-loop-functions']['chemical-potential']['data'])[-1]
self.NwTP = 2*np.array(f['parameters']['domains']['imaginary-frequency']['four-point-fermionic-frequencies'])[0]
self.qmcSign = list(f['DCA-loop-functions']['sign']['data'])
print("QMC sign:",self.qmcSign)
self.NwG4 = self.G4.shape[0]
self.Nc = self.Green.shape[1]
self.NwG = self.Green.shape[0]
self.nt = self.Nc*self.NwG4
if self.allq == False:
self.iQ = self.K_2_iK(self.qchannel[0],self.qchannel[1])
else:
self.iQ = self.iq;self.qchannel = self.Kvecs[self.iq]
self.iwG40 = int(self.NwG4/2)
self.iwG0 = int(self.NwG/2)
print("Transferred frequency iwm = ",self.iwm)
# print("Momentum transfer index, Q:",self.iQ,self.Kvecs[self.iq])
print("Transferred momentum q = ",self.qchannel)
print ("Index of transferred momentum: ", self.iQ)
print("Vertex channel = ",self.vertex_channel)
print("Inverse temperature = ",self.invT)
print("U = ",self.U)
print("t-prime = ",self.tp)
print("target filling = ",self.fill)
print("actual filling = ",self.dens)
print ("K-vectors: ",self.Kvecs)
print ("NwG4: ",self.NwG4)
print ("NwG : ",self.NwG)
print ("Nc : ",self.Nc)
f.close()
# Now remove vacuum part of charge G4
if (self.vertex_channel=="PARTICLE_HOLE_CHARGE"):
if (self.qchannel[0] == 0) & (self.qchannel[1] == 0):
for ik1 in range(self.Nc):
for ik2 in range(self.Nc):
for iw1 in range(self.NwG4):
for iw2 in range(self.NwG4):
iw1Green = iw1 - self.iwG40 + self.iwG0
iw2Green = iw2 - self.iwG40 + self.iwG0
self.G4[iw1,iw2,ik1,ik2] -= 2.0 * self.Green[iw1Green,ik1] * self.Green[iw2Green,ik2]
def determineFS(self):
self.FSpoints=[]
Kset = self.Kvecs.copy()
for iK in range(self.Nc):
if Kset[iK,0] > np.pi: Kset[iK,0] -= 2*np.pi
if Kset[iK,1] > np.pi: Kset[iK,1] -= 2*np.pi
for iK in range(self.Nc):
if abs(abs(self.Kset[iK,0])+abs(self.Kset[iK,1]) - np.pi) <= 1.0e-4:
self.FSpoints.append(iK)
def symmetrizeG4(self):
if self.iwm==0:
self.apply_symmetry_in_wn(self.G4)
print("Imposing symmetry in wn")
if self.vertex_channel in ("PARTICLE_PARTICLE_SUPERCONDUCTING","PARTICLE_PARTICLE_UP_DOWN"):
# print("G4.shape:",self.G4.shape)
self.apply_transpose_symmetry(self.G4)
print("Imposing transpose symmetry")
if self.phSymmetry: self.apply_ph_symmetry_pp(self.G4)
# 16A cluster [[4,2],[0,4]]
if (self.cluster[0,0] == 4.0 and self.cluster[0,1] == 2.0 and self.cluster[1,0] == 0.0 and self.cluster[1,1] == 4.0):
import symmetrize_Nc16A; sym=symmetrize_Nc16A.symmetrize()
print("symmetrizing 16A cluster")
sym.apply_point_group_symmetries_Q0(self.G4)
elif (self.cluster[0,0] == 4 and self.cluster[0,1] == 0 and self.cluster[1,0] == 0 and self.cluster[1,1] == 4):
import symmetrize_Nc16B; sym=symmetrize_Nc16B.symmetrize()
print("symmetrizing 16B cluster")
sym.apply_point_group_symmetries_Q0(self.G4)
elif (self.cluster[0,0] == 2 and self.cluster[0,1] == 2 and self.cluster[1,0] == -4 and self.cluster[1,1] == 2):
import symmetrize_Nc12; sym=symmetrize_Nc12.symmetrize()
print("symmetrizing 12A cluster")
sym.apply_point_group_symmetries_Q0(self.G4)
elif (self.cluster[0,0] == 4 and self.cluster[0,1] == 4 and self.cluster[1,0] == 4 and self.cluster[1,1] == -4):
import symmetrize_Nc32A_v2; sym=symmetrize_Nc32A_v2.symmetrize()
print("symmetrizing 32A cluster")
sym.apply_point_group_symmetries_Q0(self.G4)
elif (self.cluster[0,0] == 2 and self.cluster[0,1] == 2 and self.cluster[1,0] == -2 and self.cluster[1,1] == 2):
import symmetrize_Nc8; sym=symmetrize_Nc8.symmetrize()
print("symmetrizing 8-site cluster")
sym.apply_point_group_symmetries_Q0(self.G4)
def calcChi0Cluster(self):
print ("Now calculating chi0 on cluster")
self.chic0 = zeros((self.NwG4,self.Nc),dtype='complex')
Nc=self.Nc; NwG4=self.NwG4; NwG=self.NwG
nt = Nc*NwG4
if self.vertex_channel in ("PARTICLE_PARTICLE_SUPERCONDUCTING","PARTICLE_PARTICLE_UP_DOWN","PARTICLE_PARTICLE_SINGLET"):
for iw in range(0,NwG4):
for ik in range(Nc):
iw1 = int(iw - NwG4/2 + NwG/2) # convert to frequency for single-particle G
ikPlusQ = int(self.iKSum[self.iKDiff[self.iK0,ik],self.iQ]) # -k+Q
minusiwPlusiwm = int(min(max(NwG-iw1-1 + self.iwm,0),NwG-1)) # -iwn + iwm
c1 = self.Green[iw1,ik] * self.Green[minusiwPlusiwm,ikPlusQ]
self.chic0[iw,ik] = c1
else:
for iw in range(NwG4):
for ik in range(Nc):
iw1 = int(iw - NwG4/2 + NwG/2)
ikPlusQ = int(self.iKSum[ik,self.iQ]) # k+Q
iwPlusiwm = int(min(max(iw1 + self.iwm,0),NwG-1)) # iwn+iwm
#print("iw1,ik,iwPlusiwm,ikPlusQ",iw1,ik,iwPlusiwm,ikPlusQ)
c1 = - self.Green[iw1,ik] * self.Green[iwPlusiwm,ikPlusQ]
self.chic0[iw,ik] = c1
self.chic0M = np.diag(self.chic0.reshape(nt))
# for PARTICLE_PARTICLE_SINGLET, chic0 also appears for k2=q-k1
if self.vertex_channel in ("PARTICLE_PARTICLE_SINGLET"):
for iw in range(0,NwG4):
for ik in range(Nc):
i1 = ik + iw * Nc
ikPlusQ = int(self.iKSum[self.iKDiff[self.iK0,ik],self.iQ]) # -k+Q
minusiwPlusiwm = int(min(max(NwG4-iw-1 + self.iwm,0),NwG-1)) # -iwn + iwm
i2 = ikPlusQ + minusiwPlusiwm * Nc # k2 = q-k1
self.chic0M[i1,i2] += self.chic0[iw,ik]
def calcChi0CPPDwave(self):
print ("Now calculating chi0pp(q=0,iwm) on cluster")
Nc=self.Nc; NwG4=self.NwG4; NwG=self.NwG
self.chic0pp = zeros((NwG4),dtype=complex)
for iwm in range(0,NwG4):
for iwn in range(0,NwG):
for iK in range(0,Nc):
gK = cos(self.Kvecs[iK,0]) - cos(self.Kvecs[iK,1])
minusiwPlusiwm = int(min(max(NwG-iwn-1 + iwm,0),NwG-1)) # -iwn + iwm
miK = self.iKDiff[self.iK0,iK]
self.chic0pp[iwm] += self.Green[iwn,iK] * self.Green[minusiwPlusiwm,miK] * gK**2
self.chic0pp *= self.temp/self.Nc
def calcGammaIrr(self):
# Calculate the irr. GammaIrr
Nc=self.Nc; NwG4=self.NwG4; NwG=self.NwG; nt = self.nt
self.G4M = np.swapaxes(self.G4,1,2).reshape(nt,nt)
G4M = linalg.inv(self.G4M)
chic0M = linalg.inv(self.chic0M)
self.GammaM = chic0M - G4M
self.GammaM *= float(Nc)*self.invT
self.Gamma = self.GammaM.reshape(NwG4,Nc,NwG4,Nc)
if (self.evenFreqOnly==True) & (self.vertex_channel in ("PARTICLE_PARTICLE_SUPERCONDUCTING","PARTICLE_PARTICLE_UP_DOWN","PARTICLE_PARTICLE_SINGLET")):
print("Solutions restricted to even frequency!!!")
self.Gamma = 0.5*(self.Gamma+self.Gamma[:,:,::-1,:])
self.GammaM = self.Gamma.reshape(nt,nt)
def buildKernelMatrix(self):
# Build kernel matrix Gamma*chi0
| |
= 5350 * sign(self.agent.team)
if self.agent.goalPred.game_seconds - self.agent.gameInfo.seconds_elapsed > 0.1:
if distance2D(self.agent.me.location, penetrationPosition) > 100:
return testMover(self.agent, penetrationPosition, 2300)
else:
if penetrationPosition[2] > 300:
self.activeState = LeapOfFaith(self.agent, -1)
return self.activeState.update()
else:
self.activeState = LeapOfFaith(self.agent, 0)
return self.activeState.update()
def parseCarInfo(carList, index, _max=False):
val = 0
best = None
for each in carList:
if _max:
if each[index] > val:
best = each
val = each[index]
else:
if each[index] < val:
best = each
val = each[index]
return best
def aerialStateManager(agent):
center = Vector([0, 5500 * -sign(agent.team), 200])
if agent.ball.location[2] < 110:
car_state = CarState(
physics=Physics(
velocity=Vector3(z=0, x=0, y=0), location=Vector3(0, 0, 17.1)
)
)
ball_state = BallState(
physics=Physics(
velocity=Vector3(
z=1550,
x=random.randrange(-1500, 1500),
y=random.randrange(-1500, 1500),
),
location=Vector3(0, 0, 350),
)
)
game_state = GameState(cars={agent.index: car_state}, ball=ball_state)
agent.set_game_state(game_state)
agent.activeState = None
if type(agent.activeState) != Wings_Of_Justice or not agent.activeState.active:
pred = agent.ballPred.slices[0]
for i in range(0, agent.ballPred.num_slices):
if i > 60 and i % 3 != 0:
continue
pred = agent.ballPred.slices[i]
tth = pred.game_seconds - agent.gameInfo.seconds_elapsed
if tth <= 0:
continue
if agent.onSurface:
if pred.physics.location.z < 300:
continue
pred_vec = convertStructLocationToVector(pred)
if findDistance(agent.me.location, pred_vec) < 2300 * tth:
_direction = direction(center, pred_vec).flatten()
positioningOffset = 90
aim_loc = pred_vec - _direction.scale(90)
tempAerial = Wings_Of_Justice(agent, pred, aim_loc, tth)
if tempAerial.active:
break
if tempAerial.active:
agent.activeState = tempAerial
def demoTest(agent):
targ = findEnemyClosestToLocation(agent, agent.ball.location)[0]
return demoEnemyCar(agent, targ)
def twos_manager(agent):
agentType = type(agent.activeState)
if agentType != PreemptiveStrike:
if not kickOffTest(agent):
myGoalLoc = Vector([0, 5200 * sign(agent.team), 200])
ballDistanceFromGoal = distance2D(myGoalLoc, agent.ball)
carDistanceFromGoal = distance2D(myGoalLoc, agent.me)
if agentType == LeapOfFaith:
if agent.activeState.active != False:
return
if agentType == Divine_Mandate:
if agent.activeState.active != False:
return
if agentType == airLaunch:
if agent.activeState.active != False:
return
if agentType == BlessingOfDexterity:
if agent.activeState.active != False:
return
if agentType == Wings_Of_Justice:
if agent.activeState.active != False:
return
if agentType == DivineGrace:
if agent.activeState.active != False:
return
if agentType == RighteousVolley:
if agent.activeState.active != False:
return
fastesthit = find_soonest_hit(agent)
hit = fastesthit
openNet = openGoalOpportunity(agent)
agent.openGoal = openNet
agent.timid = False
scared = False
tempDelay = hit.prediction_time - agent.gameInfo.seconds_elapsed
if tempDelay >= agent.enemyBallInterceptDelay - agent.contestedTimeLimit:
if agent.enemyAttacking:
agent.contested = True
if (
distance2D(hit.pred_vector, myGoalLoc) <= 2000
or distance2D(agent.enemyTargetVec, myGoalLoc) <= 2000
or ballDistanceFromGoal <= 2000
):
if agent.enemyAttacking:
agent.contested = True
agent.timid = False
scared = False
if hit.hit_type == 5:
if agentType != Wings_Of_Justice:
agent.activeState = hit.aerialState
return
if not agent.onSurface:
if agent.me.location[2] > agent.recovery_height:
if agentType != DivineGrace:
agent.activeState = DivineGrace(agent)
return
else:
agent.activeState = PreemptiveStrike(agent)
def team_synergy(agent):
agentType = type(agent.activeState)
if agentType != PreemptiveStrike:
if not kickOffTest(agent):
if locked_in(agent, agentType):
return
my_goal = Vector([0, 5200 * sign(agent.team), 200])
inclusive_team = agent.allies[:]
inclusive_team.append(agent.me)
inclusive_team = sorted(inclusive_team, key=lambda x: x.index)
ballDistanceFromGoal = distance2D(my_goal, agent.ball.location)
carDistanceFromGoal = distance2D(my_goal, agent.me.location)
current_ball_position = agent.ball.location
offensive = current_ball_position[1] * sign(agent.team) > 0
team_info = []
if agent.dribbling:
if agentType != AngelicEmbrace:
agent.activeState = AngelicEmbrace(agent)
return
for tm in inclusive_team:
# if agent.team == 1:
if tm.location[1] * sign(agent.team) > current_ball_position[1] * sign(
agent.team
):
dist = distance2D(tm.location, current_ball_position)
# if offensive:
# if player_retreat_status(tm,agent.team):
# dist+=1500
# if agent.ball.location[0] > 1000:
# if tm.location[0] >= agent.ball.location[0]:
# dist = clamp(dist, 0, dist - 1000)
# elif agent.ball.location[0] < -1000:
# if tm.location[0] <= agent.ball.location[0]:
# dist = clamp(dist, 0, dist - 1000)
else:
dist = distance2D(tm.location, my_goal) * 2
team_info.append((tm, dist))
rotations = sorted(team_info, key=lambda x: x[1])
if agent.me.location == rotations[0][0].location:
agent.rotationNumber = 1
elif agent.me.location == rotations[1][0].location:
agent.rotationNumber = 2
else:
agent.rotationNumber = 3
# if agent.hits[4] != None:
# print(f"agent {agent.index} found an aerial target! {agent.time}")
if agent.rotationNumber != 1 and agent.rotationNumber != 2:
if agent.hits[4] != None:
agent.currentHit = agent.hits[4]
agent.ballDelay = agent.currentHit.time_difference()
if agentType != Wings_Of_Justice:
agent.activeState = agent.hits[4].aerialState # .create_copy()
# agent.log.append(f"Going for aerial! {agent.time}")
return
agent.currentHit = find_soonest_hit(agent)
agent.ballDelay = agent.currentHit.time_difference()
if agentType == DivineGrace:
if agent.activeState.active != False:
return
if not agent.onSurface:
if agent.me.location[2] > agent.recovery_height:
if agentType != DivineGrace:
agent.activeState = DivineGrace(agent)
return
if agentType != BlessingOfSafety:
agent.activeState = BlessingOfSafety(agent)
return
boostOpportunity = inCornerWithBoost(agent)
if boostOpportunity != False:
if agent.me.boostLevel <= 50:
getBoost = False
if agent.team == 0:
if boostOpportunity[1] == 0 or boostOpportunity[1] == 1:
getBoost = True
else:
if boostOpportunity[1] == 2 or boostOpportunity[1] == 3:
getBoost = True
if getBoost:
if agentType != HeavenlyReprieve:
agent.activeState = HeavenlyReprieve(
agent, boostOpportunity[0]
)
return
if agent.rotationNumber == 2:
if agent.hits[4] != None:
agent.currentHit = agent.hits[4]
agent.ballDelay = agent.currentHit.time_difference()
if agentType != Wings_Of_Justice:
agent.activeState = agent.hits[4].aerialState # .create_copy()
# agent.log.append(f"Going for aerial! {agent.time}")
return
else:
agent.currentHit = find_soonest_hit(agent)
agent.ballDelay = agent.currentHit.time_difference()
if agentType == DivineGrace:
if agent.activeState.active != False:
return
if not agent.onSurface:
if agent.me.location[2] > agent.recovery_height:
if agentType != DivineGrace:
agent.activeState = DivineGrace(agent)
return
if agentType != BlessingOfSafety:
agent.activeState = BlessingOfSafety(agent)
return
fastesthit = find_soonest_hit(agent)
hit = fastesthit
openNet = openGoalOpportunity(agent)
agent.openGoal = openNet
tempDelay = hit.prediction_time - agent.gameInfo.seconds_elapsed
if tempDelay >= agent.enemyBallInterceptDelay - agent.contestedTimeLimit:
if agent.enemyAttacking:
agent.contested = True
if hit.hit_type == 5:
# print(f"going for aerial {agent.time}")
if agentType != Wings_Of_Justice:
agent.activeState = hit.aerialState # .create_copy()
# agent.log.append(f"Going for aerial! {agent.time}")
return
if agentType == DivineGrace:
if agent.activeState.active != False:
return
if not agent.onSurface:
if agent.me.location[2] > agent.recovery_height:
if agentType != DivineGrace:
agent.activeState = DivineGrace(agent)
return
if butterZone(hit.pred_vector):
agent.contested = True
agent.enemyAttacking = True
if agent.goalPred != None:
agent.contested = True
agent.enemyAttacking = True
goalward = ballHeadedTowardsMyGoal_testing(agent, hit)
agent.goalward = goalward
# if not agent.contested and not goalward:
# if hit.hit_type == 4:
# if agent.hits[1] != None:
# if not butterZone(hit.pred_vector):
# temptime = agent.hits[1].prediction_time - agent.time
# if (
# temptime
# < agent.enemyBallInterceptDelay
# - agent.contestedTimeLimit
# ):
# hit = agent.hits[1]
#
# elif hit.hit_type == 1:
# if agent.hits[0] != None:
# if not butterZone(hit.pred_vector):
# if agent.me.boostLevel > 30:
# temptime = agent.hits[0].prediction_time - agent.time
# if (
# temptime
# < agent.enemyBallInterceptDelay
# - agent.contestedTimeLimit
# ):
# hit = agent.hits[0]
if not agent.contested:
if hit.hit_type == 4:
if agent.hits[1] != None:
if hit.pred_vel[1] * -sign(agent.team) >= 1:
if not butterZone(hit.pred_vector):
temptime = agent.hits[1].prediction_time - agent.time
if (
temptime
< agent.enemyBallInterceptDelay
- agent.contestedTimeLimit
):
hit = agent.hits[1]
if hit.hit_type == 1:
if agent.hits[0] != None:
if agent.hits[0].pred_vel[1] * -sign(agent.team) >= 1:
if not butterZone(hit.pred_vector):
if agent.me.boostLevel > 30:
temptime = (
agent.hits[0].prediction_time - agent.time
)
if (
temptime
< agent.enemyBallInterceptDelay
- agent.contestedTimeLimit
):
hit = agent.hits[0]
if hit.hit_type == 5:
# print(f"going for aerial {agent.time}")
if agentType != Wings_Of_Justice:
agent.activeState = hit.aerialState # .create_copy()
# agent.log.append(f"Going for aerial! {agent.time}")
return
if carDistanceFromGoal > ballDistanceFromGoal:
if agentType != HolyProtector:
agent.activeState = HolyProtector(agent)
return
agent.currentHit = hit
agent.ballDelay = hit.prediction_time - agent.gameInfo.seconds_elapsed
# if agent.team == 1:
# catchViable = ballCatchViable(agent)
# else:
catchViable = False
if goalward:
if hit.hit_type != 2:
if agentType != HolyProtector:
agent.activeState = HolyProtector(agent)
return
else:
if agentType != ScaleTheWalls:
agent.activeState = ScaleTheWalls(agent)
return
else:
if catchViable:
if not agent.dribbling:
agent.currentHit = agent.hits[1]
agent.ballDelay = agent.currentHit.time_difference()
if agent.activeState != Celestial_Arrest:
agent.activeState = Celestial_Arrest(agent)
return
if hit.hit_type == 0: # hit.pred_vector[2] <= agent.groundCutOff:
if agentType != GroundAssault:
agent.activeState = GroundAssault(agent)
return
elif hit.hit_type == 1:
if agentType != HolyGrenade:
agent.activeState = HolyGrenade(agent)
return
elif hit.hit_type == 4:
if agentType != HolyGrenade:
agent.activeState = HolyGrenade(agent)
# print("would have been wallshot before")
return
elif hit.hit_type == 2:
if agentType != ScaleTheWalls:
agent.activeState = ScaleTheWalls(agent)
return
else:
agent.log.append(f"condition leaked through! {hit.hit_type}")
else:
agent.activeState = PreemptiveStrike(agent)
def newTeamStateManager(agent):
agentType = type(agent.activeState)
if (
agentType != PreemptiveStrike
or agentType == PreemptiveStrike
and not agent.activeState.active
):
if not kickOffTest(agent):
myGoalLoc = Vector([0, 5200 * sign(agent.team), 200])
enemyGoalLoc = Vector([0, 5200 * -sign(agent.team), 200])
ballDistanceFromGoal = distance2D(myGoalLoc, agent.ball.location)
carDistanceFromGoal = distance2D(myGoalLoc, agent.me.location)
if locked_in(agent, agentType):
return
fastesthit = agent.sorted_hits[0] # find_soonest_hit(agent)
hit = fastesthit
if (
hit.hit_type == 2
and abs(agent.me.location[1]) > 5100
and len(agent.sorted_hits) > 1
):
fastesthit = agent.sorted_hits[1]
hit = fastesthit
if hit.hit_type == 5:
if agent.enemyBallInterceptDelay + 0.5 < hit.time_difference():
if | |
('st3', ''),
('stp', 'on')],
'hidden': False,
'hide_filters': [],
'layout': 'table',
'linktitle': _('Pending Services'),
'mustsearch': False,
'name': 'pendingsvc',
'num_columns': 5,
'owner': '',
'painters': [('service_description', 'service')],
'play_sounds': False,
'public': True,
'show_filters': [],
'sorters': [],
'title': _('Pending Services'),
'topic': _('Problems')},
'searchhost': {'browser_reload': 60,
'column_headers': 'pergroup',
'datasource': 'hosts',
'description': _('A form for searching hosts using flexible filters'),
'group_painters': [('sitealias', None)],
'hard_filters': [],
'hard_filtervars': [],
'hidden': False,
'hide_filters': [],
'layout': 'table',
'mustsearch': True,
'name': 'searchhost',
'num_columns': 3,
'owner': '',
'painters': host_view_painters,
'play_sounds': False,
'public': True,
'show_filters': [
'host_scheduled_downtime_depth',
'host_in_notification_period',
'host_in_service_period',
'hoststate',
'siteopt',
'hostregex',
'hostgroups',
'opthostgroup',
'opthost_contactgroup',
'host_check_command',
'host_address',
'host_notif_number',
'host_staleness',
'host_tags',
'hostalias',
'host_favorites',
'host_num_services',
],
'sorters': [],
'title': _('Host search'),
'topic': _('Hosts')},
'searchsvc': {'browser_reload': 60,
'column_headers': 'pergroup',
'datasource': 'services',
'description': _('Almost all available filters, used for searching services and maybe doing actions'),
'group_painters': [('sitealias', 'sitehosts'),
('host', 'host')],
'hard_filters': [],
'hard_filtervars': [('is_service_in_notification_period',
'-1'),
('optservicegroup', ''),
('is_service_notifications_enabled',
'-1'),
('is_host_in_notification_period', '-1'),
('is_in_downtime', '-1'),
('is_service_scheduled_downtime_depth',
'-1'),
('is_service_acknowledged', '-1'),
('host', ''),
('is_service_active_checks_enabled',
'-1'),
('service', ''),
('check_command', ''),
('opthostgroup', ''),
('service_output', ''),
('is_service_is_flapping', '-1')],
'hidden': False,
'hide_filters': [],
'layout': 'table',
'mustsearch': True,
'name': 'searchsvc',
'num_columns': 1,
'owner': '',
'painters': service_view_painters,
'play_sounds': False,
'public': True,
'show_filters': ['service_in_notification_period',
'service_in_service_period',
'optservicegroup',
'optservice_contactgroup',
'hostgroups',
'servicegroups',
'service_notifications_enabled',
'host_in_notification_period',
'in_downtime',
'service_scheduled_downtime_depth',
'service_acknowledged',
'hostregex',
'host_address',
'service_active_checks_enabled',
'serviceregex',
'service_display_name',
'check_command',
'hoststate',
'svcstate',
'svchardstate',
'opthostgroup',
'opthost_contactgroup',
'output',
'service_is_flapping',
'svc_last_state_change',
'svc_last_check',
'siteopt',
'aggr_service_used',
'svc_notif_number',
'service_staleness',
'host_tags',
'hostalias',
'host_favorites',
'service_favorites',
],
'sorters': [('site', False),
('site_host', False),
('svcdescr', False)],
'title': _('Service search'),
'topic': _('Services')},
'service': {'browser_reload': 30,
'column_headers': 'pergroup',
'datasource': 'services',
'description': _('Status of a single service, to be used for linking'),
'group_painters': [],
'hard_filters': [],
'hard_filtervars': [],
'hidden': True,
'hide_filters': ['siteopt', 'service', 'host'],
'layout': 'dataset',
'mustsearch': False,
'name': 'service',
'num_columns': 1,
'owner': '',
'painters': [
# 1. Identification and icons
('sitealias', None),
('host', 'hoststatus'),
('service_description', 'servicedesc'),
('service_icons', None),
# 2. State and metrics
('service_state', None),
('svc_plugin_output', None),
('svc_long_plugin_output', None),
('perfometer', None),
('svc_pnpgraph', None),
('svc_metrics', None),
('svc_in_downtime', None),
('svc_in_notifper', None),
# 3. Runtime data, timestamps
('svc_attempt', None),
('svc_notification_number', None),
('svc_state_age', None),
('svc_check_age', None),
('svc_check_cache_info', None),
('svc_next_check', None),
('svc_next_notification', None),
('svc_last_notification', None),
('svc_last_time_ok', None),
('svc_check_latency', None),
('svc_check_duration', None),
# 4. Configuration
('svc_check_interval', None),
('svc_notifper', None),
('svc_contact_groups', None),
('svc_contacts', None),
('svc_group_memberlist', None),
('svc_servicelevel', None),
('svc_check_command', None),
('svc_perf_data', None),
('svc_custom_vars', None),
('check_manpage', None),
('svc_custom_notes', None),
],
'public': True,
'show_filters': [],
'sorters': [],
'linktitle': _('Details'),
'title': _('Service')},
'servicedesc': {'browser_reload': 30,
'column_headers': 'pergroup',
'datasource': 'services',
'description': _('All Services with a certain description'),
'group_painters': [],
'hard_filters': [],
'hard_filtervars': [],
'hidden': True,
'hide_filters': ['service'],
'layout': 'table',
'icon': 'status',
'mustsearch': False,
'name': 'servicedesc',
'num_columns': 2,
'owner': '',
'painters': [('service_state', None),
('service_icons', None),
('host', 'service'),
('svc_plugin_output', None),
('perfometer', None)],
'public': True,
'show_filters': ['hostregex', 'svcstate', 'opthostgroup'],
'sorters': [('site', False),
('site_host', False)],
'user_sortable' : 'on',
'linktitle': _('Service globally'),
'title': _('All Services with this description:')},
'servicedescpnp': {'browser_reload': 90,
'column_headers': 'off',
'datasource': 'services',
'description': _('Graphs for all Services with a certain description'),
'group_painters': [('host', 'hostpnp')],
'hard_filters': [],
'hard_filtervars': [],
'hidden': True,
'hide_filters': ['service'],
'icon' : 'pnp',
'layout': 'boxed',
'linktitle': _('Graphs globally'),
'mustsearch': False,
'name': 'servicedescpnp',
'num_columns': 2,
'owner': 'admin',
'painters': [('svc_pnpgraph', None)],
'play_sounds': False,
'public': True,
'show_filters': ['hostregex', 'svcstate', 'opthostgroup'],
'sorters': [('svcstate', False),
('site', False),
('site_host', False)],
'title': _('Graphs of services with description:'),
'topic': _('Other')},
'servicegroup': {'browser_reload': 30,
'column_headers': 'pergroup',
'datasource': 'services',
'description': _('Services of a service group'),
'group_painters': [('sitealias', 'sitehosts'),
('host', 'host')],
'hard_filters': [],
'hard_filtervars': [],
'hidden': True,
'hide_filters': ['servicegroup'],
'layout': 'table',
'linktitle': _('Service Group'),
'mustsearch': False,
'name': 'servicegroup',
'num_columns': 1,
'owner': '',
'painters': service_view_painters,
'play_sounds': False,
'public': True,
'show_filters': [],
'sorters': [('site', False),
('site_host', False),
('svcdescr', False)],
'title': _('Service Group'),
'topic': _('Other')},
'sitehosts': {'browser_reload': 30,
'column_headers': 'pergroup',
'datasource': 'hosts',
'description': _('Link view showing all hosts of one site'),
'group_painters': [('site_icon', None), ('sitealias', 'sitesvcs')],
'hard_filters': [],
'hard_filtervars': [],
'hidden': True,
'hidebutton': True,
'hide_filters': ['site'],
'layout': 'boxed',
'mustsearch': False,
'name': 'sitehosts',
'num_columns': 2,
'owner': '',
'painters': host_view_painters,
'public': True,
'show_filters': [],
'sorters': [('site', False), ('site_host', False)],
'linktitle': _('Complete site'),
'title': _('All hosts of site')},
'svcbygroups': {'browser_reload': 30,
'column_headers': 'pergroup',
'datasource': 'servicesbygroup',
'description': _('Service grouped by service groups. Services not member of a group are not displayed. Services being in more groups, are displayed once for each group'),
'group_painters': [('sg_alias', 'servicegroup')],
'hard_filters': [],
'hard_filtervars': [],
'hidden': False,
'hide_filters': [],
'layout': 'table',
'mustsearch': False,
'name': 'svcbygroups',
'num_columns': 1,
'owner': '',
'painters': [('host', 'host')] + service_view_painters,
'public': True,
'show_filters': [],
'sorters': [('servicegroup', False),
('site_host', False),
('svcdescr', False)],
'title': _('Services by group'),
'topic': _('Service Groups')},
'svcbyhgroups': {'browser_reload': 30,
'column_headers': 'pergroup',
'datasource': 'servicesbyhostgroup',
'description': _('Service grouped by host groups. Services not member of a host group are not displayed. Services being in more groups, are displayed once for each group'),
'group_painters': [('hg_alias', 'hostgroup')],
'hard_filters': [],
'hard_filtervars': [],
'hidden': False,
'hide_filters': [],
'layout': 'boxed',
'mustsearch': False,
'name': 'svcbyhgroups',
'num_columns': 2,
'owner': '',
'painters': host_service_view_painters,
'public': True,
'show_filters': [],
'sorters': [('hostgroup', False),
('site_host', False),
('svcdescr', False)],
'title': _('Serv. by host groups'),
'topic': _('Services')},
'svcgroups': {'browser_reload': 30,
'column_headers': 'pergroup',
'datasource': 'servicegroups',
'description': _('A short overview over all service groups, without explicity listing of the actual hosts and services'),
'group_painters': [('sitealias', 'sitehosts')],
'hard_filters': [],
'hard_filtervars': [],
'hidden': False,
'hide_filters': [],
'layout': 'boxed',
'mustsearch': False,
'name': 'svcgroups',
'num_columns': 3,
'owner': '',
'painters': [('sg_name', 'servicegroup'),
('sg_alias', None),
('sg_num_services_ok', None),
('sg_num_services_warn', None),
('sg_num_services_crit', None),
('sg_num_services_unknown', None),
('sg_num_services_pending', None)],
'public': True,
'show_filters': ['servicegroupnameregex'],
'sorters': [],
'title': _('Service Groups (Summary)'),
'topic': _('Service Groups')},
'svcgroups_grid': {'browser_reload': 30,
'column_headers': 'pergroup',
'datasource': 'servicegroups',
'description': _('A short overview over all service groups, without explicity listing of the actual hosts and services'),
'group_painters': [('sitealias', 'sitehosts')],
'hard_filters': [],
'hard_filtervars': [],
'hidden': False,
'hide_filters': [],
'layout': 'boxed',
'mustsearch': False,
'name': 'svcgroups_grid',
'num_columns': 3,
'owner': '',
'painters': [('sg_name', 'servicegroup'),
('sg_alias', None),
('sg_services', None)],
'public': True,
'show_filters': [],
'sorters': [],
'title': _('Service Groups (Grid)'),
'topic': _('Service Groups'),
},
'svcproblems': {
'browser_reload': 30,
'column_headers': 'pergroup',
'datasource': 'services',
'description': _('All problems of services not currently in a downtime.'),
'group_painters': [('service_state', None)],
'hard_filters': ['in_downtime'],
'hard_filtervars': [('is_in_downtime', '0'),
('st0', ''),
('st1', 'on'),
('st2', 'on'),
('st3', 'on'),
('stp', ''),
('hst0', 'on'),
('hst1', ''),
('hst2', ''),
('hstp', 'on'),
],
'hidden': False,
'hide_filters': [],
'layout': 'table',
'mustsearch': False,
'name': 'svcproblems',
'num_columns': 1,
'owner': '',
'painters': host_service_view_painters,
'play_sounds': True,
'public': True,
'show_filters': ['service_in_notification_period',
'service_acknowledged',
'svcstate',
'svchardstate',
'serviceregex',
'host_tags',
'hoststate'],
'sorters': [('svcstate', True),
('stateage', False),
('svcdescr', False)],
'title': _('Service problems'),
'topic': _('Problems')},
'hosttiles': {'browser_reload': 30,
'column_headers': 'off',
'datasource': 'hostsbygroup',
'description': _('Displays hosts in a tiled layout, where each host is a single tile.'),
'group_painters': [('hg_name', 'hostgroup'),
('hg_alias', None)],
'hard_filters': [],
'hard_filtervars': [],
'hidden': False,
'hide_filters': [],
'layout': 'tiled',
'mustsearch': False,
'name': 'hosttiles',
'num_columns': 1,
'owner': '',
'painters': [('host', 'hoststatus'),
('host_address', None),
('host_icons', None),
('num_services', 'host'),
('num_problems', 'problemsofhost'),
('host_state', None)],
'play_sounds': False,
'public': True,
'show_filters': ['host_scheduled_downtime_depth',
'host_in_notification_period',
'hoststate',
'siteopt',
'host_acknowledged',
'hostregex',
'host_notifications_enabled',
'opthostgroup',
'host_check_command',
'opthost_contactgroup'],
'sorters': [],
'title': _('All hosts (tiled)'),
'topic': _('Hosts')},
'searchpnp': {'browser_reload': 90,
'column_headers': 'off',
'datasource': 'services',
'description': _('Search for services and display their graphs'),
'group_painters': [('sitealias', 'sitehosts'),
('host', 'host'),
('service_description', 'service'),
],
'hard_filters': ['service_process_performance_data',
'has_performance_data',
],
'hard_filtervars': [
('is_service_process_performance_data', '1'),
('is_has_performance_data', '1'),
],
'hidden': False,
'hide_filters': [],
'icon' : 'pnp',
'layout': 'boxed',
'mustsearch': True,
'name': 'searchpnp',
'num_columns': 2,
'owner': '',
'painters': [('svc_pnpgraph', None)],
'play_sounds': False,
'public': False,
'show_filters': ['service_in_notification_period',
'optservicegroup',
'service_notifications_enabled',
'host_in_notification_period',
'service_scheduled_downtime_depth',
'service_acknowledged',
'hostregex',
'service_active_checks_enabled',
'serviceregex',
'check_command',
'svcstate',
'opthostgroup',
'in_downtime',
'output',
'service_is_flapping'],
'sorters': [('site', False),
('site_host', False),
('svcdescr', False)
],
'title': _('Search Time Graphs'),
'topic': _('Metrics')},
'hostpnp': {'browser_reload': 90,
'column_headers': 'off',
'datasource': 'services',
'description': _('All graphs for a certain host.'),
'group_painters': [ ('service_description', 'service'),
],
'hard_filters': ['service_process_performance_data',
'has_performance_data',
'svcstate'],
'hard_filtervars': [('is_service_process_performance_data', '1'),
('is_has_performance_data', '1'),
('st0', 'on'),
('st1', 'on'),
('st2', 'on'),
('st3', 'on'),
('stp', '')],
'hidden': True,
'icon' : 'pnp',
'hide_filters': ['siteopt', 'host'],
'layout': 'boxed',
'mustsearch': False,
'name': 'hostpnp',
'num_columns': 2,
'owner': '',
'painters': [('svc_pnpgraph', None)],
'play_sounds': False,
'public': False,
'show_filters': ['serviceregex', 'check_command'],
'sorters': [('site', False),
('site_host', False),
('svcdescr', False)],
'linktitle': _('Service graphs'),
'title': _('Service graphs of host'),
},
'recentsvc': {'browser_reload': 30,
'column_headers': 'pergroup',
'datasource': 'services',
'description': _('Service whose state changed in the last 60 minutes'),
'group_painters': [],
'hard_filters': [],
'hard_filtervars': [('svc_last_state_change_from_range', '3600'),
('svc_last_state_change_from', '1'),
('st0', 'on'),
('st1', | |
from behave import when, then, use_step_matcher
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select, WebDriverWait
from tests import common_helpers
# noinspection PyUnresolvedReferences
from tests.common_steps import lectures # lgtm [py/unused-import]
# noinspection PyUnresolvedReferences
from tests.ui_steps import helpers, login_logout # lgtm [py/unused-import]
def get_lectures(driver):
return driver.find_elements_by_css_selector("[data-qa=lecture]")
def lectures_cnt(driver):
return len(get_lectures(driver))
def open_group_card(context, name):
# otevri skupiny z menu
helpers.open_groups(context.browser)
# pockej na nacteni
helpers.wait_loading_ends(context.browser)
# najdi skupinu s danym nazvem a otevri jeji kartu
found_group = helpers.find_group(context, name, True)
return found_group
def open_client_card(context, full_name):
# otevri klienty z menu
helpers.open_clients(context.browser)
# pockej na nacteni
helpers.wait_loading_ends(context.browser)
# najdi klienta s danym jmenem a otevri jeho kartu
found_client = helpers.find_client(context, full_name, True)
return found_client
def get_paid_button(driver):
return driver.find_element_by_css_selector("[data-qa=lecture_attendance_paid]")
def duration_title(duration):
return "TrvΓ‘nΓ: " + duration + " min."
def get_select_attendancestates(driver):
return Select(
driver.find_element_by_css_selector("[data-qa=lecture_select_attendance_attendancestate]")
)
def find_lecture(context, date, time, validate_context=False):
all_courses = context.browser.find_elements_by_css_selector("[data-qa=card_course]")
# hledej mezi vsemi kurzy
for course in all_courses:
found_course = course.find_element_by_css_selector("[data-qa=card_course_name]").text
all_course_lectures = course.find_elements_by_css_selector("[data-qa=lecture]")
# najdi lekci s danym zacatkem
for lecture in all_course_lectures:
found_start = lecture.find_element_by_css_selector("[data-qa=lecture_start]").text
found_duration = helpers.get_tooltip(
context.browser, lecture.find_element_by_css_selector("[data-qa=lecture_start]")
).text
found_canceled = helpers.check_class_included(
lecture.get_attribute("class"), "lecture-canceled"
)
# srovnej identifikatory
start = common_helpers.prepare_start(date, time)
start = f"{start.day}. {start.month}. {start.year} β {start.hour}:{start.minute:02}"
# je to substring (v UI je pred datumem i nazev dne)?
if start in found_start:
# prohledej a zvaliduj attendances (jen kdyz jsou k dispozici)
found_attendances_cnt = 0
found_old_attendances = []
if "attendances" in context:
for attendance in context.attendances:
# najdi attendance prislusici danemu klientovi
found_attendance = find_attendance_in_card(
context, lecture, attendance["client"]
)
found_note = found_attendance.find_element_by_css_selector(
"[data-qa=lecture_attendance_note]"
).text
found_old_attendances.append(
attendance_dict(
attendance["client"],
get_attendancestate_state(found_attendance),
get_paid_state(found_attendance),
found_note,
)
)
if (
found_note == attendance["note"]
and verify_paid(found_attendance, attendance["paid"])
and verify_attendancestate(
found_attendance, attendance["attendancestate"]
)
):
found_attendances_cnt += 1
# identifikatory sedi, otestuj pripadna dalsi data z kontextu (pokud nesedi, hledej dal)
# nebo rovnou vrat nalezeny prvek
if validate_context and (
found_attendances_cnt != len(context.attendances)
or duration_title(context.duration) != found_duration
):
continue
if (
# poznamka: uz vime, ze se attendances i duration sedi (jinak predchozi podminka zaridi continue)
not validate_context
or validate_context
and (
# jeste over spravnou hodnotu canceled
context.canceled == found_canceled
# pro single lekce srovnej kurz, pro skupiny ho neres
and (not context.is_group and found_course == context.course)
or context.is_group
)
):
# uloz stara data do kontextu pro pripadne overeni spravnosti
context.old_attendances = found_old_attendances
context.old_course = found_course
context.old_date = date
context.old_time = time
context.old_duration = found_duration
context.old_canceled = found_canceled
return lecture
return False
def find_lecture_with_context(context):
return find_lecture(context, context.date, context.time, validate_context=True)
def wait_form_visible(driver):
WebDriverWait(driver, helpers.WAIT_TIME).until(
EC.visibility_of_element_located((By.CSS_SELECTOR, "[data-qa=form_lecture]"))
)
def find_attendance_in_form(context, client):
if not context.is_group:
return context.browser.find_element_by_css_selector("[data-qa=form_lecture_attendance]")
all_attendances = context.browser.find_elements_by_css_selector(
"[data-qa=form_lecture_attendance]"
)
for attendance in all_attendances:
if attendance.find_element_by_css_selector("[data-qa=client_name]").text == client:
return attendance
return None
def find_attendance_in_card(context, lecture, client):
if not context.is_group:
return lecture.find_element_by_css_selector("[data-qa=lecture_attendance]")
all_attendances = lecture.find_elements_by_css_selector("[data-qa=lecture_attendance]")
for attendance in all_attendances:
if attendance.find_element_by_css_selector("[data-qa=client_name]").text == client:
return attendance
return None
def insert_to_form(context, verify_current_data=False):
# pockej az bude viditelny formular
wait_form_visible(context.browser)
# priprav pole z formulare
date_field = context.browser.find_element_by_css_selector("[data-qa=lecture_field_date]")
time_field = context.browser.find_element_by_css_selector("[data-qa=lecture_field_time]")
duration_field = context.browser.find_element_by_css_selector(
"[data-qa=lecture_field_duration]"
)
canceled_checkbox = context.browser.find_element_by_css_selector(
"[data-qa=lecture_checkbox_canceled]"
)
canceled_label = context.browser.find_element_by_css_selector(
"[data-qa=lecture_label_canceled]"
)
course_field = context.browser.find_element_by_id("course")
# over, ze aktualne zobrazene udaje ve formulari jsou spravne (krome attendancestates - viz nize)
if verify_current_data:
# ziskej aktualni hodnoty z react-selectu
course_field_value = context.browser.find_element_by_css_selector(
".course__single-value"
).text
assert (
context.old_course == course_field_value
and context.old_date == date_field.get_attribute("value")
and context.old_time == time_field.get_attribute("value")
and context.old_duration == duration_title(duration_field.get_attribute("value"))
and context.old_canceled == canceled_checkbox.is_selected()
)
# pokud se nejedna o skupinu, vloz i kurz
if not context.is_group:
course_field.send_keys(Keys.BACK_SPACE)
helpers.react_select_insert(context.browser, course_field, context.course)
# smaz vsechny udaje
date_field.clear()
time_field.clear()
duration_field.clear()
# vloz nove udaje
date_field.send_keys(context.date)
time_field.send_keys(context.time)
duration_field.send_keys(context.duration)
if (context.canceled and not canceled_checkbox.is_selected()) or (
not context.canceled and canceled_checkbox.is_selected()
):
canceled_label.click()
for attendance in context.attendances:
# najdi attendance prislusici danemu klientovi
found_attendance = find_attendance_in_form(context, attendance["client"])
paid_checkbox = found_attendance.find_element_by_css_selector(
"[data-qa=lecture_checkbox_attendance_paid]"
)
paid_label = found_attendance.find_element_by_css_selector(
"[data-qa=lecture_label_attendance_paid]"
)
note_field = found_attendance.find_element_by_css_selector(
"[data-qa=lecture_field_attendance_note]"
)
# over, ze aktualne zobrazena attendances pro daneho klienta je spravna
if verify_current_data:
# najdi puvodni hodnoty prislusne attendance
old_attendance_of_client = next(
old_attendance
for old_attendance in context.old_attendances
if old_attendance["client"] == attendance["client"]
)
# srovnej puvodni attendance s aktualnimi hodnotami attendance
assert old_attendance_of_client == attendance_dict(
attendance["client"],
get_attendancestate_state(found_attendance),
paid_checkbox.is_selected(),
note_field.get_attribute("value"),
)
# smazani stavajicich udaju
note_field.clear()
# vlozeni novych udaju
if (attendance["paid"] and not paid_checkbox.is_selected()) or (
not attendance["paid"] and paid_checkbox.is_selected()
):
paid_label.click()
note_field.send_keys(attendance["note"])
choose_attendancestate(found_attendance, attendance["attendancestate"])
def load_data_to_context(context, obj, date, time, duration, canceled, attendances, is_group=False):
load_id_data_to_context(context, date, time)
# pro skupinu je potreba ulozit skupinu, pro jednotlivce pouze kurz (klient je v attendances)
if is_group:
context.group = obj
else:
context.course = obj
context.is_group = is_group
context.attendances = attendances
context.duration = duration
context.canceled = common_helpers.to_bool(canceled)
def load_id_data_to_context(context, date, time):
context.date = date
context.time = time
def save_old_lectures_cnt_to_context(context):
context.old_lectures_cnt = lectures_cnt(context.browser)
def attendance_dict(client, attendancestate, paid, note):
return {
"client": client,
"attendancestate": attendancestate,
"paid": paid if isinstance(paid, bool) else common_helpers.to_bool(paid),
"note": note,
}
def get_paid_state(found_attendance):
return helpers.check_class_included(
get_paid_button(found_attendance).get_attribute("class"), "text-success"
)
def get_attendancestate_state(found_attendance):
# uloz si nalezene atributy ucasti
found_attendancestate_selected_list = get_select_attendancestates(
found_attendance
).all_selected_options
assert len(found_attendancestate_selected_list) == 1
return found_attendancestate_selected_list[0].text
def verify_paid(found_attendance, new_paid):
return get_paid_state(found_attendance) == new_paid
def verify_attendancestate(found_attendance, new_attendancestate):
return new_attendancestate == get_attendancestate_state(found_attendance)
def choose_attendancestate(found_attendance, new_attendancestate):
attendancestate_select = Select(
found_attendance.find_element_by_css_selector(
"[data-qa=lecture_select_attendance_attendancestate"
)
)
attendancestate_select.select_by_visible_text(new_attendancestate)
@then("the lecture is added")
def step_impl(context):
# pockej na pridani lekce
WebDriverWait(context.browser, helpers.WAIT_TIME).until(
lambda driver: lectures_cnt(driver) > context.old_lectures_cnt
)
# je lekce opravdu pridana?
assert find_lecture_with_context(context)
# over, ze je modalni okno kompletne zavrene
assert not helpers.is_modal_class_attr_present(context.browser)
@then("the lecture is updated")
def step_impl(context):
# pockej na update lekci
helpers.wait_loading_cycle(context.browser)
# ma lekce opravdu nove udaje?
assert find_lecture_with_context(context)
assert lectures_cnt(context.browser) == context.old_lectures_cnt
# over, ze je modalni okno kompletne zavrene
assert not helpers.is_modal_class_attr_present(context.browser)
@then("the paid state of the attendance is updated")
def step_impl(context):
# pockej na update lekci
helpers.wait_loading_cycle(context.browser)
# najdi upravovanou lekci
lecture_to_update = find_lecture(context, context.date, context.time)
assert lecture_to_update
# ma lekce opravdu nove udaje?
assert verify_paid(lecture_to_update, context.new_paid)
@then("the attendance state of the attendance is updated")
def step_impl(context):
# pockej na update lekci
helpers.wait_loading_cycle(context.browser)
# najdi upravovanou lekci
lecture_to_update = find_lecture(context, context.date, context.time)
assert lecture_to_update
# ma lekce opravdu nove udaje?
assert verify_attendancestate(lecture_to_update, context.new_attendancestate)
# pokud se lekce nove zmenila na omluvenou a byla zaplacena, over pridani nahradni lekce
excused_attendancestate = common_helpers.get_excused_attendancestate()
if (
context.cur_attendancestate != excused_attendancestate
and context.new_attendancestate == excused_attendancestate
and verify_paid(lecture_to_update, True)
):
assert lectures_cnt(context.browser) == context.old_lectures_cnt + 1
else:
assert lectures_cnt(context.browser) == context.old_lectures_cnt
@then("the lecture is deleted")
def step_impl(context):
# pockej na smazani lekce
WebDriverWait(context.browser, helpers.WAIT_TIME).until(
lambda driver: lectures_cnt(driver) < context.old_lectures_cnt
)
# je lekce opravdu smazana?
assert not find_lecture(context, context.date, context.time)
# over, ze je modalni okno kompletne zavrene
assert not helpers.is_modal_class_attr_present(context.browser)
@when('user deletes the lecture of the client "{client}" at "{date}", "{time}"')
def step_impl(context, client, date, time):
# nacti timestamp lekce do kontextu
load_id_data_to_context(context, date, time)
# otevri kartu prislusneho klienta
open_client_card(context, client)
# pockej na nacteni
helpers.wait_loading_ends(context.browser)
# najdi lekci a klikni u ni na Upravit
lecture_to_update = find_lecture(context, date, time)
assert lecture_to_update
button_edit_lecture = lecture_to_update.find_element_by_css_selector(
"[data-qa=button_edit_lecture]"
)
button_edit_lecture.click()
# pockej na nacteni
helpers.wait_loading_ends(context.browser)
# uloz puvodni pocet lekci
save_old_lectures_cnt_to_context(context)
# pockej az bude viditelny formular
wait_form_visible(context.browser)
# klikni na smazat
button_delete_lecture = context.browser.find_element_by_css_selector(
"[data-qa=button_delete_lecture]"
)
button_delete_lecture.click()
# a potvrd smazani
helpers.wait_for_alert_and_accept(context.browser)
@then("the lecture is not added")
def step_impl(context):
# zjisti, zda stale sviti formular a zadna lekce nepribyla
try:
WebDriverWait(context.browser, helpers.WAIT_TIME_SHORT).until_not(
EC.presence_of_element_located((By.CSS_SELECTOR, "[data-qa=form_lecture]"))
)
form_group_visible = False
except TimeoutException:
form_group_visible = True
assert form_group_visible
assert lectures_cnt(context.browser) == context.old_lectures_cnt
@when(
'user updates the data of lecture at "{date}", "{time}" to date "{new_date}", time "{new_time}", course "{new_course}", duration "{new_duration}", canceled "{new_canceled}", attendance of the client "{client}" is: "{new_attendancestate}", paid "{new_paid}", note "{new_note}"'
)
def step_impl(
context,
date,
time,
new_date,
new_time,
new_course,
new_duration,
new_canceled,
client,
new_attendancestate,
new_paid,
new_note,
):
new_attendances = [attendance_dict(client, new_attendancestate, new_paid, new_note)]
# nacti data skupiny do kontextu
load_data_to_context(
context, new_course, new_date, new_time, new_duration, new_canceled, new_attendances
)
# otevri kartu prislusneho klienta
open_client_card(context, client)
# pockej na nacteni
helpers.wait_loading_ends(context.browser)
# najdi lekci a klikni u ni na Upravit
lecture_to_update = find_lecture(context, date, time)
assert lecture_to_update
button_edit_lecture = lecture_to_update.find_element_by_css_selector(
"[data-qa=button_edit_lecture]"
)
button_edit_lecture.click()
# pockej na nacteni
helpers.wait_loading_ends(context.browser)
# uloz puvodni pocet skupin
save_old_lectures_cnt_to_context(context)
# over spravne zobrazene udaje ve formulari a vloz do nej vsechny udaje
insert_to_form(context, True)
# odesli formular
helpers.submit_form(context, "button_submit_lecture")
@when(
'user updates | |
<gh_stars>0
import os
import time
import warnings
from collections import defaultdict
from pathlib import Path
import flopy
import geopandas as gpd
import numpy as np
import pandas as pd
import pyproj
from packaging import version
fm = flopy.modflow
mf6 = flopy.mf6
import gisutils
import sfrmaker
from gisutils import get_shapefile_crs, get_values_at_points, project
from sfrmaker import Lines
from sfrmaker.utils import assign_layers
from mfsetup.bcs import (
get_bc_package_cells,
setup_basic_stress_data,
setup_flopy_stress_period_data,
)
from mfsetup.config import validate_configuration
from mfsetup.fileio import (
check_source_files,
load,
load_array,
load_cfg,
save_array,
set_cfg_paths_to_absolute,
setup_external_filepaths,
)
from mfsetup.grid import MFsetupGrid, get_ij, rasterize, setup_structured_grid
from mfsetup.interpolate import (
get_source_dest_model_xys,
interp_weights,
interpolate,
regrid,
)
from mfsetup.lakes import make_lakarr2d, setup_lake_fluxes, setup_lake_info
from mfsetup.mf5to6 import (
get_model_length_units,
get_model_time_units,
get_package_name,
)
from mfsetup.model_version import get_versions
from mfsetup.sourcedata import TransientTabularSourceData, setup_array
from mfsetup.tdis import (
get_parent_stress_periods,
parse_perioddata_groups,
setup_perioddata,
setup_perioddata_group,
)
from mfsetup.tmr import Tmr
from mfsetup.units import convert_length_units, lenuni_text, lenuni_values
from mfsetup.utils import flatten, get_input_arguments, get_packages, update
from mfsetup.wells import setup_wel_data
if version.parse(gisutils.__version__) < version.parse('0.2.2'):
warnings.warn('Automatic reprojection functionality requires gis-utils >= 0.2.2'
'\nPlease pip install --upgrade gis-utils')
if version.parse(sfrmaker.__version__) < version.parse('0.6'):
warnings.warn('sfr: sfrmaker_options: add_outlet functionality requires sfrmaker >= 0.6'
'\nPlease pip install --upgrade sfrmaker')
class MFsetupMixin():
"""Mixin class for shared functionality between MF6model and MFnwtModel.
Meant to be inherited by both those classes and not be called directly.
https://stackoverflow.com/questions/533631/what-is-a-mixin-and-why-are-they-useful
"""
source_path = os.path.split(__file__)[0]
""" -1 : well
0 : no lake
1 : lak package lake (lakarr > 0)
2 : high-k lake
3 : ghb
4 : sfr"""
# package variable name: number
bc_numbers = {'wel': -1,
'lak': 1,
'high-k lake': 2,
'ghb': 3,
'sfr': 4,
'riv': 5
}
def __init__(self, parent):
# property attributes
self._cfg = None
self._nper = None
self._perioddata = None
self._sr = None
self._modelgrid = None
self._bbox = None
self._parent = parent
self._parent_layers = None
self._parent_default_source_data = False
self._parent_mask = None
self._lakarr_2d = None
self._isbc_2d = None
self._lakarr = None
self._isbc = None
self._lake_bathymetry = None
self._high_k_lake_recharge = None
self._nodata_value = -9999
self._model_ws = None
self._abs_model_ws = None
self._model_version = None # semantic version of model
self._longname = None # long name for model (short name is self.name)
self._header = None # header for files and repr
self.inset = None # dictionary of inset models attached to LGR parent
self._is_lgr = False # flag for lgr inset models
self.lgr = None # holds flopy Lgr utility object
self.tmr = None # holds TMR class instance for TMR-type perimeter boundaries
self._load = False # whether model is being made or loaded from existing files
self.lake_info = None
self.lake_fluxes = None
# flopy settings
self._mg_resync = False
self._features = {} # dictionary for caching shapefile datasets in memory
# arrays remade during this session
self.updated_arrays = set()
# cache of interpolation weights to speed up regridding
self._interp_weights = None
def __repr__(self):
header = f'{self.header}\n'
txt = ''
if self.parent is not None:
txt += 'Parent model: {}/{}\n'.format(self.parent.model_ws, self.parent.name)
if self._modelgrid is not None:
txt += f'{self._modelgrid.__repr__()}'
txt += 'Packages:'
for pkg in self.get_package_list():
txt += ' {}'.format(pkg.lower())
txt += '\n'
txt += f'{self.nper:d} period(s):\n'
if self._perioddata is not None:
cols = ['per', 'start_datetime', 'end_datetime', 'perlen', 'steady', 'nstp']
txt += self.perioddata[cols].head(3).to_string(index=False)
txt += '\n ...\n'
tail = self.perioddata[cols].tail(1).to_string(index=False)
txt += tail.split('\n')[1]
txt = header + txt
return txt
def __eq__(self, other):
"""Test for equality to another model object."""
if not isinstance(other, self.__class__):
return False
# kludge: skip obs packages for now
# - obs packages aren't read in with same name under which they were created
# - also SFR_OBS package is handled by SFRmaker instead of Flopy;
# a loaded version of a model might have SFR_OBS,
# where a freshly made version may not (even though SFRmaker will write it)
#
all_packages = set(self.get_package_list()).union(other.get_package_list())
exceptions = {p for p in all_packages if p.lower().startswith('obs')
or p.lower().endswith('obs')}
other_packages = [s for s in sorted(other.get_package_list())
if s not in exceptions]
packages = [s for s in sorted(self.get_package_list())
if s not in exceptions]
if other_packages != packages:
return False
if other.modelgrid != self.modelgrid:
return False
if other.nlay != self.nlay:
return False
if not np.array_equal(other.perioddata, self.perioddata):
return False
# TODO: add checks of actual array values and other parameters
for k, v in self.__dict__.items():
if k in ['cfg',
'sfrdata',
'_load',
'_packagelist',
'_package_paths',
'package_key_dict',
'package_type_dict',
'package_name_dict',
'_ftype_num_dict']:
continue
elif k not in other.__dict__:
return False
elif type(v) == bool:
if not v == other.__dict__[k]:
return False
elif k == 'cfg':
continue
elif type(v) in [str, int, float, dict, list]:
if v != other.__dict__[k]:
pass
continue
return True
@property
def nper(self):
if self.perioddata is not None:
return len(self.perioddata)
@property
def nrow(self):
if self.modelgrid.grid_type == 'structured':
return self.modelgrid.nrow
@property
def ncol(self):
if self.modelgrid.grid_type == 'structured':
return self.modelgrid.ncol
@property
def modelgrid(self):
if self._modelgrid is None:
self.setup_grid()
elif self._modelgrid.nlay is None and 'DIS' in self.get_package_list():
self.setup_grid()
return self._modelgrid
@property
def bbox(self):
if self._bbox is None and self.modelgrid is not None:
self._bbox = self.modelgrid.bbox
return self._bbox
@property
def perioddata(self):
"""DataFrame summarizing stress period information.
Columns:
start_date_time : pandas datetimes; start date/time of each stress period
(does not include steady-state periods)
end_date_time : pandas datetimes; end date/time of each stress period
(does not include steady-state periods)
time : float; cumulative MODFLOW time (includes steady-state periods)
per : zero-based stress period
perlen : stress period length in model time units
nstp : number of timesteps in the stress period
tsmult : timestep multiplier for stress period
steady : True=steady-state, False=Transient
oc : MODFLOW-6 output control options
"""
if self._perioddata is None:
self._set_perioddata()
return self._perioddata
@property
def parent(self):
return self._parent
@property
def parent_layers(self):
"""Mapping between layers in source model and
layers in destination model.
Returns
-------
parent_layers : dict
{inset layer : parent layer}
"""
if self._parent_layers is None:
parent_layers = None
botm_source_data = self.cfg['dis'].get('source_data', {}).get('botm', {})
if self.cfg['parent'].get('inset_layer_mapping') is not None:
parent_layers = self.cfg['parent'].get('inset_layer_mapping')
elif isinstance(botm_source_data, dict) and 'from_parent' in botm_source_data:
parent_layers = botm_source_data.get('from_parent')
else:
#parent_layers = dict(zip(range(self.parent.modelgrid.nlay), range(self.parent.modelgrid.nlay)))
parent_layers = None
self._parent_layers = parent_layers
return self._parent_layers
@property
def parent_stress_periods(self):
"""Mapping between stress periods in source model and
stress periods in destination model.
Returns
-------
parent_stress_periods : dict
{inset stress period : parent stress period}
"""
return dict(zip(self.perioddata['per'], self.perioddata['parent_sp']))
@property
def package_list(self):
"""Definitive list of packages. Get from namefile input first
(as in mf6 input), then look under model input.
"""
packages = self.cfg.get('nam', {}).get('packages', [])
if len(packages) == 0:
packages = self.cfg['model'].get('packages', [])
return [p for p in self._package_setup_order
if p in packages]
@property
def perimeter_bc_type(self):
"""Dictates how perimeter boundaries are set up.
if 'head'; a constant head package is created
from the parent model starting heads
if 'flux'; a specified flux boundary is created
from parent model cell by cell flow output
"""
perimeter_boundary_type = self.cfg['model'].get('perimeter_boundary_type')
if perimeter_boundary_type is not None:
if 'head' in perimeter_boundary_type:
return 'head'
if 'flux' in perimeter_boundary_type:
return 'flux'
@property
def model_ws(self):
if self._model_ws is None:
self._model_ws = Path(self._get_model_ws())
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
self._abs_model_ws = os.path.normpath(os.path.abspath(model_ws))
@property
def model_version(self):
"""Semantic version of model, using a hacked version of the versioneer.
Version is reported using git tags for the model repository
or a start_version: key specified in the configuration file (default 0).
The start_version or tag is then appended by the remaining information
in a pep440-post style version tag (e.g. most recent git commit hash
for the model repository + "dirty" if the model repository has uncommited changes)
References
----------
https://github.com/warner/python-versioneer
https://github.com/warner/python-versioneer/blob/master/details.md
"""
if self._model_version is None:
self._model_version = get_versions(path=self.model_ws,
start_version=self.cfg['metadata']['start_version'])
return self._model_version
@property
def longname(self):
if self._longname is None:
longname = self.cfg['metadata'].get('longname')
if longname is None:
longname = f'{self.name} model'
self._longname = longname
return self._longname
@property
def header(self):
if self._header is None:
version_str = self.model_version['version']
header = f'{self.longname} version {version_str}'
self._header = header
return self._header
@property
def tmpdir(self):
#abspath = os.path.abspath(
# self.cfg['intermediate_data']['output_folder'])
abspath = self.model_ws / 'original-arrays'
self.cfg['intermediate_data']['output_folder'] = str(abspath)
abspath.mkdir(exist_ok=True)
#if not os.path.isdir(abspath):
# os.makedirs(abspath)
tmpdir = abspath
if self.relative_external_paths:
#tmpdir = os.path.relpath(abspath)
tmpdir = abspath.relative_to(self.model_ws)
#else:
# do we need to normalize with Pathlib??
# tmpdir = os.path.normpath(abspath)
return tmpdir
@property
def external_path(self):
abspath = os.path.abspath(
self.cfg.get('model', {}).get('external_path', 'external'))
if not os.path.isdir(abspath):
os.makedirs(abspath)
if | |
drift.
drift_metrics_min_spikes_per_interval: int
Minimum number of spikes for evaluating drift metrics per interval.
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: int
Frames after peak to compute amplitude
apply_filter: bool
If True, recording is bandpass-filtered
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
grouping_property: str
Property to group channels. E.g. if the recording extractor has the 'group' property and
'grouping_property' is 'group', then waveforms are computed group-wise.
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
compute_property_from_recording: bool
If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
property of the recording extractor channel on which the average waveform is the largest
max_channels_per_waveforms: int or None
Maximum channels per waveforms to return. If None, all channels are returned
n_jobs: int
Number of parallel jobs (default 1)
memmap: bool
If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
save_property_or_features: bool
If true, it will save features in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
dm_metrics: np.ndarray
The drift metrics of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=recording.get_sampling_frequency(), recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=None, verbose=params_dict['verbose'])
md.compute_pca_scores(**kwargs)
dm = DriftMetric(metric_data=md)
dm_metrics = dm.compute_metric(drift_metrics_interval_s, drift_metrics_min_spikes_per_interval, **kwargs)
return dm_metrics
def compute_quality_metrics(
sorting,
recording=None,
duration_in_frames=None,
sampling_frequency=None,
metric_names=None,
unit_ids=None,
as_dataframe=False,
isi_threshold=ISIViolation.params['isi_threshold'],
min_isi=ISIViolation.params['min_isi'],
snr_mode=SNR.params['snr_mode'],
snr_noise_duration=SNR.params['snr_noise_duration'],
max_spikes_per_unit_for_snr=SNR.params['max_spikes_per_unit_for_snr'],
template_mode=SNR.params['template_mode'],
max_channel_peak=SNR.params['max_channel_peak'],
max_spikes_per_unit_for_noise_overlap=NoiseOverlap.params['max_spikes_per_unit_for_noise_overlap'],
noise_overlap_num_features=NoiseOverlap.params['num_features'],
noise_overlap_num_knn=NoiseOverlap.params['num_knn'],
drift_metrics_interval_s=DriftMetric.params['drift_metrics_interval_s'],
drift_metrics_min_spikes_per_interval=DriftMetric.params['drift_metrics_min_spikes_per_interval'],
max_spikes_for_silhouette=SilhouetteScore.params['max_spikes_for_silhouette'],
num_channels_to_compare=13,
max_spikes_per_cluster=500,
max_spikes_for_nn=NearestNeighbor.params['max_spikes_for_nn'],
n_neighbors=NearestNeighbor.params['n_neighbors'],
**kwargs
):
"""
Computes and returns all specified metrics for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
duration_in_frames: int
Length of recording (in frames).
sampling_frequency: float
The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor
metric_names: list
List of metric names to be computed
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
as_dataframe: bool
If True, will return dataframe of metrics. If False, will return dictionary.
isi_threshold: float
The isi threshold for calculating isi violations
min_isi: float
The minimum expected isi value
snr_mode: str
Mode to compute noise SNR ('mad' | 'std' - default 'mad')
snr_noise_duration: float
Number of seconds to compute noise level from (default 10.0)
max_spikes_per_unit_for_snr: int
Maximum number of spikes to compute templates for SNR from (default 1000)
template_mode: str
Use 'mean' or 'median' to compute templates
max_channel_peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or both ('both' - default)
max_spikes_per_unit_for_noise_overlap: int
Maximum number of spikes to compute templates for noise overlap from (default 1000)
noise_overlap_num_features: int
Number of features to use for PCA for noise overlap
noise_overlap_num_knn: int
Number of nearest neighbors for noise overlap
drift_metrics_interval_s: float
Time period for evaluating drift.
drift_metrics_min_spikes_per_interval: int
Minimum number of spikes for evaluating drift metrics per interval
max_spikes_for_silhouette: int
Max spikes to be used for silhouette metric
num_channels_to_compare: int
The number of channels to be used for the PC extraction and comparison
max_spikes_per_cluster: int
Max spikes to be used from each unit
max_spikes_for_nn: int
Max spikes to be used for nearest-neighbors calculation
n_neighbors: int
Number of neighbors to compare
**kwargs: keyword arguments
Keyword arguments among the following:
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: int
Frames after peak to compute amplitude
apply_filter: bool
If True, recording is bandpass-filtered
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
grouping_property: str
Property to group channels. E.g. if the recording extractor has the 'group' property and
'grouping_property' is 'group', then waveforms are computed group-wise.
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
compute_property_from_recording: bool
If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
property of the recording extractor channel on which the average waveform is the largest
max_channels_per_waveforms: int or None
Maximum channels per waveforms to return. If None, all channels are returned
n_jobs: int
Number of parallel jobs (default 1)
memmap: bool
If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
save_property_or_features: bool
If true, it will save features in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
metrics: dictionary OR pandas.dataframe
Dictionary or pandas.dataframe of metrics.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
metrics_dict = OrderedDict()
if metric_names is None:
metric_names = all_metrics_list
else:
bad_metrics = []
for m in metric_names:
if m not in all_metrics_list:
bad_metrics.append(m)
if len(bad_metrics) > 0:
raise ValueError(f"Improper feature names: {str(bad_metrics)}. The following features names can be "
f"calculated: {str(all_metrics_list)}")
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=sampling_frequency, recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=duration_in_frames, verbose=params_dict['verbose'])
if "firing_rate" in metric_names or "presence_ratio" in metric_names or "isi_violation" in metric_names:
if recording is None and duration_in_frames is None:
raise ValueError(
"duration_in_frames and recording cannot both be None when computing firing_rate, "
"presence_ratio, and isi_violation")
if "max_drift" in metric_names or "cumulative_drift" in metric_names or "silhouette_score" in metric_names \
or "isolation_distance" in metric_names or "l_ratio" in metric_names or "d_prime" in metric_names \
or "nn_hit_rate" in metric_names or "nn_miss_rate" in metric_names:
if recording is None:
raise ValueError("The recording cannot be None when computing max_drift, cumulative_drift, "
"silhouette_score isolation_distance, l_ratio, d_prime, nn_hit_rate, or amplitude_cutoff.")
else:
md.compute_pca_scores(**kwargs)
if "amplitude_cutoff" in metric_names:
if recording is None:
raise ValueError("The recording cannot be None when computing amplitude cutoffs.")
else:
md.compute_amplitudes(**kwargs)
if "snr" in metric_names:
if recording is None:
raise ValueError("The recording cannot be None when computing snr.")
if "num_spikes" in metric_names:
ns = NumSpikes(metric_data=md)
num_spikes = ns.compute_metric(**kwargs)
metrics_dict['num_spikes'] = num_spikes
if "firing_rate" in metric_names:
fr = FiringRate(metric_data=md)
firing_rates = fr.compute_metric(**kwargs)
metrics_dict['firing_rate'] = firing_rates
if "presence_ratio" in metric_names:
pr = PresenceRatio(metric_data=md)
presence_ratios = pr.compute_metric(**kwargs)
metrics_dict['presence_ratio'] = presence_ratios
if "isi_violation" in metric_names:
iv = ISIViolation(metric_data=md)
isi_violations = iv.compute_metric(isi_threshold, min_isi, **kwargs)
metrics_dict['isi_violation'] = isi_violations
if "amplitude_cutoff" in metric_names:
ac = AmplitudeCutoff(metric_data=md)
amplitude_cutoffs = ac.compute_metric(**kwargs)
metrics_dict['amplitude_cutoff'] = amplitude_cutoffs
if "snr" in metric_names:
snr = SNR(metric_data=md)
snrs = snr.compute_metric(snr_mode, snr_noise_duration, max_spikes_per_unit_for_snr,
template_mode, max_channel_peak, **kwargs)
metrics_dict['snr'] = snrs
if "max_drift" in metric_names or "cumulative_drift" in metric_names:
dm = DriftMetric(metric_data=md)
max_drifts, cumulative_drifts = dm.compute_metric(drift_metrics_interval_s,
drift_metrics_min_spikes_per_interval, **kwargs)
if "max_drift" in metric_names:
metrics_dict['max_drift'] = max_drifts
if "cumulative_drift" in metric_names:
metrics_dict['cumulative_drift'] = cumulative_drifts
if "silhouette_score" in metric_names:
silhouette_score = SilhouetteScore(metric_data=md)
silhouette_scores = silhouette_score.compute_metric(max_spikes_for_silhouette, **kwargs)
metrics_dict['silhouette_score'] = silhouette_scores
if "isolation_distance" in metric_names:
isolation_distance | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
MagnetCommunication.py
Author: <NAME>
Created on Mon Dec 18 15:35:21 2017
Last Edited: 06.12.2018
Python Version: 3.6.5
4G Magnet Power Supply
Class for sending commands to 4G Magnet Power Supply
IMPORTANT: Magnet has to be in remote mode, can only
be set locally
________________________________________
LIST OF COMMANDS:
IMAG? : Query Manget Current
IOUT? : Query Power Supply Output current
LLIM? : Query Low Current sweep limit
ULIM? : Query High Current sweep limit
VLIM? : Query Voltage limit
VMAG? : Query Magnet Voltage
VOUT? : Query Output Voltage
NAME? : Query Magnet Coil Name
RANGE?: Query range limit for seep rate boundary
RATE? : Query sweep rate for selected sweep range
UNITS?: Query selected Units
________________________________________
Available if magnet is set to remote
LLIM : Set Low Current sweep Limit
ULIM : Set High Current sweep limit
VLIM : Set Voltage limit
NAME : Set magnet coil name
RANGE : set range limit for seep rate boundary
RATE : Set sweep rate for selected sweep range
UNITS : Select Units
QRESET : Reset Quench condition
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~~ 1) Imports ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
import socket
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~~ 2) Network Communication Class ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
class MagnetController:
"""
Class desinged for setting up and communicating with a 4G Magnet through
Network connection.
"""
BUFFER_SIZE = 4094
def __init__(self):
"""
Initialize communication with Network.
"""
self.mysocket = socket.socket()
def openConnection(self):
"""
Open Communication with Magnet Controller. Host and Port can be found
and set in the local controller display.
"""
self.mysocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
host = '10.10.1.7'
port = 4444
self.mysocket.connect((host, port))
def sendCommand(self, command):
"""
Send the command with \r as EndOfLine Char.
:param command: can be found in Magnet Manual.
"""
self.mysocket.send((str(command)+" \r").encode())
def receiveAnswer(self):
"""
Read answer from device. Buffer Size is 4094.
:return: answer as string
"""
return self.mysocket.recv(self.BUFFER_SIZE).decode()
def closeConnection(self):
"""
Close Connection with Magnet and Network.
"""
self.mysocket.close()
def resetQuenchCondition(self):
"""
QRESET Reset Quench Condition
Availability: Remote Mode
Command Syntax: QRESET
Description: The QRESET command resets a power supply quench condition
and returns the supply to STANDBY
"""
self.sendCommand("QRESET")
return 0
def setMagnetCurrent(self, value):
"""
IMAG Sets the magnet current (or magnetic field strength).
Availability: Remote Mode
Command Syntax: IMAG [value]
Example: IMAG 47.1123
Default Parameter: 0.0
Parameter Range: Β±Maximum Magnet Current
Description: The IMAG command sets the magnet current shown on the
display. The supply must be in standby or a command error will be
returned. The value must be supplied in the selectedu nits - amperes
or field (kG). If Shim Mode is enabled, the persistent mode current
displayed for the named shim is set if the shim parameter is provided.
:param value: Β±Maximum Magnet Current
"""
self.sendCommand("IMAG "+str(value))
return self.receiveAnswer()
def getMagnetCurrent(self):
"""
IMAG? Query magnet current (or magnetic field strength)
Availability: Always
Command Syntax: IMAG?
Response: <Magnet Current> <Units>
Response Example: 87.9350 A
Description: The IMAG? query returns the magnet current (or magnetic
field strength) in the present units. If the persistent switch heater is
ON the magnet current returned will be the same as the power supply
output current. If the persistent switch heater is off, the magnet
current will be the value of the power supply output current when the
persistent switch heater was last turned off. The magnet current will
be set to zero if the power supply detects a quench. If in SHIM mode,
the IMAG? query reports the present current of the shim selected by
the SHIM command in Amps. If the optional Shim ID is provided while
in shim mode, the present current of the specified shim will
be reported
"""
self.sendCommand("IMAG?")
return self.receiveAnswer()
def getCurrentOutPowerSupply(self):
"""
IOUT? Query power supply output current
Availability: Always
Command Syntax: IOUT?
:return: <Output Current> <Units>
Response Example: 87.935 A
Description: The IOUT? query returns the power supply output current
(or magnetic field strength) in the present units
"""
self.sendCommand("IOUT?")
return self.receiveAnswer()
def setCurrentSweepLow(self, limit):
"""
LLIM Set current sweep lower limit
Availability: Remote Mode
Command Syntax: LLIM [Limit]
Example: LLIM 20.1250
Default Parameter: 0.0
Description: The LLIM command sets the current limit used when the next
SWEEP DOWN command is issued. The value must be supplied in the
selected units - amperes or field (kG). An error will be returned if
this value is greater than the upper sweep limit
:param limit: Β±Maximum Magnet Current
"""
self.sendCommand("LLIM "+str(limit))
return 0
def getCurrentSweepLowLimit(self):
"""
LLIM? Query current sweep lower limit
Availability: Always
Command Syntax: LLIM?
:return: <Limit> <Units>
Response Example: 20.1250 A
Response Range: Β±Maximum Magnet Current
Description: The LLIM? query returns the current limit used with the
SWEEP DOWN command. It is issued in the selected units - amperes or
field (kG).
"""
self.sendCommand("LLIM?")
return self.receiveAnswer()
def setRangeLimitForSweep(self, select, limit):
"""
RANGE Set range limit for sweep rate boundary
Availability: Remote
Command Syntax: RANGE <Select> <Limit>
Example: RANGE 0 25.0
Default Parameter: None
Description: The RANGE command sets the upper limit for a charge rate
range in amps. Range 0 starts at zero and ends at the limit provided.
Range 1 starts at the Range 0 limit and ends at the Range 1 limit
provided. Range 2 starts at the Range 1 limit and ends at the Range 2
limit provided. Range 3 starts at the Range 2 limit and ends at the
Range 3 limit provided. Range 4 starts at the Range 3 limit and ends
at the supply output capacity
:param select: 0 to 4
:param limit: 0 to Max Supply Current
"""
self.sendCommand("RANGE "+str(select)+" "+str(limit))
return 0
def getRangeLimitForSweep(self, select):
"""
RANGE? Query range limit for sweep rate boundary
Availability: Always
Command Syntax: RANGE? <Select>
Example: RANGE? 1 Parameter Range: 0 to 4
:return:: <Limit>
Response Example: 75.000 Response Range: 0 to Max Magnet Current
Description: The RANGE? query returns the upper limit for a charge rate
range in amps. See RANGE for further details.
"""
self.sendCommand("RANGE? "+str(select))
return self.receiveAnswer()
def setRateForSweep(self, select, rate):
"""
RATE Set sweep rate for selected sweep range
Availability: Remote
Command Syntax: RATE <Range> <Sweep Rate>
Example: RATE 0 0.250
Default Parameter: None
Description: The RATE command sets the charge rate in amps/second for a
selected range. A range parameter of 0, 1, 2, 3, and 4 will select
Range 1, 2, 3, 4, or 5 sweep rates as displayed in the Rates Menu. A
range parameter of 5 selects the Fast mode sweep rate.
:param select: 0 to 5
:param rate: 0 to Max Magnet Current
"""
self.sendCommand("RATE "+str(select)+" "+str(rate))
return 0
def getRateForSweep(self, select):
"""
RATE? Query range limit for sweep rate boundary
Availability: Always
Command Syntax: RATE? <Range>
Example: RATE? 1
Response Example: 0.125
Description: The RATE? command queries the charge rate in amps/second
for a selected range. A range parameter of 0 to 4 will select Range 1
through 5 sweep rates as displayed in the Rates Menu. A range
parameter of 5 queries the Fast mode sweep rate.
:param select: 0 to 4
:return:: <Rate>, 0 to Max Magnet Current
"""
self.sendCommand("RATE? "+str(select))
return self.receiveAnswer()
def startSweep(self, mode, speed="SLOW"):
"""
SWEEP Start output current sweep
Availability: Remote Mode
Command Syntax: SWEEP <Sweep Mode> [fast or slow]
Examples: SWEEP UP
SWEEP UP FAST
Default Parameter: None
Description: The SWEEP command causes the power supply to sweep the
output current from the present current to the specified limit at the
applicable charge rate set by the range and rate
commands. If the FAST parameter is given, the fast mode rate will be
used instead of a rate selected from the output current range. SLOW
is required to change from fast sweep. SWEEP UP sweeps to the Upper
limit, SWEEP DOWN sweeps to the Lower limit, and SWEEP ZERO
discharges the supply. If in Shim Mode, SWEEP LIMIT sweeps to the shim
target current.
:param mode: UP, DOWN, PAUSE, or ZERO
:param speed: FAST, SLOW
"""
self.sendCommand("SWEEP "+str(mode)+" "+str(speed))
return 0
def getSweepMode(self):
"""
SWEEP? Query sweep mode
Availability: | |
<filename>src/pyomexmeta/pyomexmeta.py
from __future__ import annotations
import ctypes as ct
import os
import sys
from contextlib import contextmanager
from sys import executable as _python_interpretor
from typing import List
from functools import wraps
try:
# for use from outside the package, as a python package
from .pyomexmeta_api import PyOmexMetaAPI, eUriType, eXmlType, eLogLevel, OmexMetaException
except ImportError:
try: # for internal use
from pyomexmeta_api import PyOmexMetaAPI, eUriType, eXmlType, eLogLevel, OmexMetaException
except ImportError:
# for internal use
from . import pyomexmeta_api, eUriType, eXmlType, eLogLevel, OmexMetaException
_pyom = PyOmexMetaAPI()
# expose get_last_error at top level so easily importable to other modules
_get_last_error = _pyom.get_last_error
def propagate_omexmeta_error(func):
"""
If @param func is a callable then this
function behaves like a decorator, checking
the return type for a omexmeta error. This is used in simpler functions
(of which there are many) that only call a omexmeta method.
If @param func is not callable, then we check
to see whether func is nullptr or < 0, indicative
of a omexmeta error. This is used in more complicated
situations
Args:
func: callable or value.
Returns: a func of @param is callable or the original value if not.
todo split into two functions (voilation of SRP).
- check_for_error_value
- check_for_error_return (for decorator)
"""
if callable(func):
@wraps(func)
def raise_error_if_necessary(*args, **kwargs):
failed = func(*args, **kwargs)
if failed is None:
err = _pyom.get_last_error()
_pyom.clear_last_error()
raise OmexMetaException(err)
if isinstance(failed, int):
if failed < 0:
err = _pyom.get_last_error()
_pyom.clear_last_error()
raise OmexMetaException(err)
return failed
return raise_error_if_necessary
else:
value = func
if value is None:
err = _pyom.get_last_error()
_pyom.clear_last_error()
raise OmexMetaException(err)
if isinstance(func, int):
if func < 0:
err = _pyom.get_last_error()
_pyom.clear_last_error()
raise OmexMetaException(err)
return func
class RDF:
def __init__(self, storage_type: str = "memory", storage_name: str = "libOmexMetaStore",
storage_options: str = None, model_options: str = None, rdf_ptr: ct.c_int64 = None):
"""Create an :class:`RDF` empty graph.
This is a central object in libOmexMeta and pyomexmeta from which
all annotations are created, stored, read, written and queried. Arguments to :class:`RDF` are passed on
to the redland libraries and affect how the :class:`RDF` graph is stored. The default
is to store the :class:`RDF` graph in memory.
Args:
storage_type: memory (default), hashes, file, uri or sqlite. See https://librdf.org/docs/api/redland-storage-modules.html
storage_name: An arbitrary name for your storage. Not important when storage_type is memory but will be the
name of a `file` or database if `storage_type` is on disk
storage_options: Options forwarded onto the redland libraries backend. Unfortunately, the redland library
docs are not brilliant at describing these options and so neither are we. Please see https://librdf.org/docs/api/redland-storage.html
model_options: Options that are forwarded onto the redland libraries librdf_model* object. Similar to `storage_options` these
are not described in great detail in the redland lib docs. Please see `https://librdf.org/docs/api/redland-model.html#librdf-new-model`
rdf_ptr: A pointer (or memory address), stored as ctypes c_int64, that points to a preexisting instance
of an :class:`RDF` in memory. todo consider whether to remove this option in favour of using only :class:`RDF`._set_rdf_ptr.
.. code-block: python
:linenos:
# create an empty :class:`RDF` graph in memory
rdf = RDF()
# create a new sqlite database storage called MyFavouriteAnnotations
RDF("sqlite", "MyFavouriteAnnotations", "new='yes'")
"""
# when pointer argument not given by user, create new instance of :class:`RDF`
# argument is only given manually when static methods are used and
# this is hidden from users.
if not rdf_ptr:
self._obj = _pyom.rdf_new(
storage_type.encode(), storage_name.encode(),
None if storage_options is None else storage_options.encode(),
None if model_options is None else model_options.encode(),
)
else:
self._obj = rdf_ptr
@propagate_omexmeta_error
def __len__(self) -> int:
"""Returns the number of individual Triples stored in the rdf model"""
return _pyom.rdf_size(self._obj)
def __str__(self) -> str:
"""Defaults to rdfxml-abbrev syntax"""
return self.to_string("turtle")
def __del__(self) -> None:
"""deletes the :class:`RDF` instance"""
self.delete()
def _set_rdf_ptr(self, ptr: ct.c_int64) -> None:
"""Change the :class:`RDF` pointer to ptr
Args:
ptr: a pointer to the C generated :class:`RDF` object.
Returns: None
"""
# first remove the existing pointer
self.delete()
# then do the switch
self._obj = ptr
@staticmethod
def from_string(rdf_string: str, syntax: str = "guess",
storage_type: str = "hashes", storage_name: str = "pyomexmeta_storage", storage_options: str = None,
model_options: str = None) -> RDF:
"""Read RDF from a string and create an :class:`RDF` object with it.
This is a static method, i.e. it should be used without an instantiated instance of :class:`RDF`.
Args:
rdf_string: The rdf string to parse. It can be in any supported syntax.
syntax: `guess` is default which will try to make an educated guess
as to what syntax is contained in `rdf_string`. However, sometimes
the syntax cannot be identified and this will need to be specified
manually. Other options are rdfxml, ntriples, turtle, nquads, guess.
Note: the easiest way to get a list of available syntax options is to
run your program with deliberately the wrong `syntax` argument
storage_type: See :meth:`RDF.__init__`
storage_name: See :meth:`RDF.__init__`
storage_options: See :meth:`RDF.__init__`
model_options: See :meth:`RDF.__init__`
Returns: An :class:`RDF` object that contains the RDF graph described by rdf_string.
Examples:
>>> rdf_string = "RDF string encoded in turtle syntax"
>>> rdf = RDF.from_string(rdf_string, "turtle")
See Also:
:meth:`RDF.add_from_string`
"""
rdf_ptr = _pyom.rdf_from_string(
rdf_string.encode(), syntax.encode(),
storage_type.encode(), storage_name.encode(),
None if not storage_options else storage_options.encode(),
None if not storage_options else model_options.encode()
)
propagate_omexmeta_error(rdf_ptr)
rdf = RDF()
rdf._set_rdf_ptr(rdf_ptr)
return rdf
@propagate_omexmeta_error
def add_from_string(self, rdf_string: str, syntax: str = "guess") -> None:
"""Add annotations to the existing :class:`RDF` graph (self) from `rdf_string`
Args:
rdf_string: The string containing the annotations that you want to add to the current :class:`RDF` graph
syntax: The syntax used by `rdf_string` (default=`guess`).
Other options are rdfxml, ntriples, turtle, nquads, guess.
Returns: None
Examples:
>>> rdf = RDF()
>>> rdf.add_from_string("insert rdf string here")
"""
return _pyom.rdf_add_from_string(self._obj, rdf_string.encode(), syntax.encode())
@staticmethod
def from_uri(uri_string: str, syntax: str = "guess", storage_type: str = "hashes",
storage_name: str = "pyomexmeta_storage",
storage_options: str = None,
model_options: str = None) -> RDF:
""" Create an :class:`RDF` object from content in a URI.
This is a static method i.e. called without an instance of :class:`RDF`.
Args:
uri_string: The uri from which to download an :class:`RDF` graph from.
syntax: The syntax of the graph pointed to by uri_string.
storage_type: See :meth:`RDF.__init__`
storage_name: See :meth:`RDF.__init__`
storage_options:See :meth:`RDF.__init__`
model_options: See :meth:`RDF.__init__`
Returns: :class:`RDF`
Examples:
>>> rdf = RDF.from_uri("http://myrdfgraph.org/rdfspecial")
"""
rdf_ptr = _pyom.rdf_from_uri(
uri_string.encode(), syntax.encode(),
storage_type.encode(), storage_name.encode(),
None if not storage_options else storage_options.encode(),
None if not model_options else model_options.encode()
)
propagate_omexmeta_error(rdf_ptr)
rdf = RDF()
rdf._set_rdf_ptr(rdf_ptr)
return rdf
@propagate_omexmeta_error
def add_from_uri(self, uri_string: str, syntax: str = "guess") -> None:
"""Add RDF from a uri to an existing :class:`RDF` graph
Args:
uri_string: The uri from which to download rdf from
syntax: The syntax of the rdf content pointed to by uri_string
Returns: None
"""
return _pyom.rdf_add_from_uri(self._obj, uri_string.encode(), syntax.encode())
@staticmethod
def from_file(filename: str, syntax: str = "guess", storage_type: str = "hashes",
storage_name: str = "pyomexmeta_storage",
storage_options: str = None, model_options: str = None) -> RDF:
"""Create an :class:`RDF` graph from a file on disk.
This is a static method and is therefore called without an instance of :class:`RDF` (see example)
Args:
filename: The location on disk of the file containing rdf for parsing
syntax: The syntax for the rdf content in `filename`. See :class:`RDF`.
storage_type: See :meth:`RDF.__init__`
storage_name: See :meth:`RDF.__init__`
storage_options:See :meth:`RDF.__init__`
model_options: See :meth:`RDF.__init__`
Returns:
Examples:
>>> rdf = RDF.from_file("/path/to/rdf_file.rdf", "turtle")
See Also:
:meth:`RDF.add_from_file`
"""
rdf_ptr = _pyom.rdf_from_file(
filename.encode(), syntax.encode(),
storage_type.encode(), storage_name.encode(),
None if not storage_options else storage_options.encode(),
None if not storage_options else model_options.encode()
)
propagate_omexmeta_error(rdf_ptr)
rdf = RDF()
rdf._set_rdf_ptr(rdf_ptr)
return rdf
@staticmethod
@propagate_omexmeta_error
def equals_rdf_vs_rdf(first_rdf: RDF, second_rdf: RDF, syntax: str = "turtle", verbose: bool = False) -> bool:
"""Equality operator for comparing :class:`RDF` objects.
This is the "default" equality operator used by :meth:`RDF.__eq__`. `first_rdf`
and `second_rdf` are considered equal if all triples contained in each are identical.
The only exception is regarding blank nodes, which are considered equal even if their
label is different. For developers: this condition for blank nodes was necessary, because
they are usually automatically assigned random labels when reading from an rdf graph.
This method is static - | |
"font": font,
"borderwidth": bd,
"state": state,
"locale": locale,
"date_pattern": date_pattern,
"selectmode": selectmode,
'textvariable': self._textvariable,
'firstweekday': firstweekday,
'weekenddays': weekenddays,
'mindate': mindate,
'maxdate': maxdate,
'showweeknumbers': showweeknumbers,
'showothermonthdays': kw.pop('showothermonthdays', True),
'selectbackground': active_bg,
'selectforeground': 'white',
'disabledselectbackground': dis_active_bg,
'disabledselectforeground': 'white',
'normalbackground': 'white',
'normalforeground': 'black',
'background': 'gray30',
'foreground': 'white',
'disabledbackground': 'gray30',
'disabledforeground': 'gray70',
'bordercolor': 'gray70',
'othermonthforeground': 'gray45',
'othermonthbackground': 'gray93',
'othermonthweforeground': 'gray45',
'othermonthwebackground': 'gray75',
'weekendbackground': 'gray80',
'weekendforeground': 'gray30',
'headersbackground': 'gray70',
'headersforeground': 'black',
'disableddaybackground': dis_bg,
'disableddayforeground': dis_fg,
'tooltipforeground': 'gray90',
'tooltipbackground': 'black',
'tooltipalpha': 0.8,
'tooltipdelay': 2000}
self._properties.update(kw)
# --- calevents
self.calevents = {} # special events displayed in colors and with tooltips to show content
self._calevent_dates = {} # list of event ids for each date
self._tags = {} # tags to format event display
self.tooltip_wrapper = TooltipWrapper(self,
alpha=self._properties['tooltipalpha'],
style=self._style_prefixe + '.tooltip.TLabel',
delay=self._properties['tooltipdelay'])
# --- init calendar
# --- *-- header: month - year
self._header = ttk.Frame(self, style='main.%s.TFrame' % self._style_prefixe)
f_month = ttk.Frame(self._header,
style='main.%s.TFrame' % self._style_prefixe)
self._l_month = ttk.Button(f_month,
style='L.%s.TButton' % self._style_prefixe,
command=self._prev_month)
self._header_month = ttk.Label(f_month, width=10, anchor='center',
style='main.%s.TLabel' % self._style_prefixe, font=self._header_font)
self._r_month = ttk.Button(f_month,
style='R.%s.TButton' % self._style_prefixe,
command=self._next_month)
self._l_month.pack(side='left', fill="y")
self._header_month.pack(side='left', padx=4)
self._r_month.pack(side='left', fill="y")
f_year = ttk.Frame(self._header, style='main.%s.TFrame' % self._style_prefixe)
self._l_year = ttk.Button(f_year, style='L.%s.TButton' % self._style_prefixe,
command=self._prev_year)
self._header_year = ttk.Label(f_year, width=4, anchor='center',
style='main.%s.TLabel' % self._style_prefixe, font=self._header_font)
self._r_year = ttk.Button(f_year, style='R.%s.TButton' % self._style_prefixe,
command=self._next_year)
self._l_year.pack(side='left', fill="y")
self._header_year.pack(side='left', padx=4)
self._r_year.pack(side='left', fill="y")
f_month.pack(side='left', fill='x')
f_year.pack(side='right')
# --- *-- calendar
self._cal_frame = ttk.Frame(self,
style='cal.%s.TFrame' % self._style_prefixe)
ttk.Label(self._cal_frame,
style='headers.%s.TLabel' % self._style_prefixe).grid(row=0,
column=0,
sticky="eswn")
# week day names
self._week_days = []
for i, day in enumerate(self._cal.iterweekdays()):
d = self._day_names[day % 7]
self._cal_frame.columnconfigure(i + 1, weight=1)
self._week_days.append(ttk.Label(self._cal_frame,
font=self._font,
style='headers.%s.TLabel' % self._style_prefixe,
anchor="center",
text=d, width=4))
self._week_days[-1].grid(row=0, column=i + 1, sticky="ew", pady=(0, 1))
self._week_nbs = [] # week numbers
self._calendar = [] # days
for i in range(1, 7):
self._cal_frame.rowconfigure(i, weight=1)
wlabel = ttk.Label(self._cal_frame, style='headers.%s.TLabel' % self._style_prefixe,
font=self._font, padding=2,
anchor="e", width=2)
self._week_nbs.append(wlabel)
wlabel.grid(row=i, column=0, sticky="esnw", padx=(0, 1))
if not showweeknumbers:
wlabel.grid_remove()
self._calendar.append([])
for j in range(1, 8):
label = ttk.Label(self._cal_frame, style='normal.%s.TLabel' % self._style_prefixe,
font=self._font, anchor="center")
self._calendar[-1].append(label)
label.grid(row=i, column=j, padx=(0, 1), pady=(0, 1), sticky="nsew")
if selectmode == "day":
label.bind("<1>", self._on_click)
# --- *-- pack main elements
self._header.pack(fill="x", padx=2, pady=2)
self._cal_frame.pack(fill="both", expand=True, padx=bd, pady=bd)
self.config(state=state)
# --- bindings
self.bind('<<ThemeChanged>>', self._setup_style)
self._setup_style()
self._display_calendar()
self._btns_date_range()
self._check_sel_date()
if self._textvariable is not None:
try:
self._textvariable_trace_id = self._textvariable.trace_add('write', self._textvariable_trace)
except AttributeError:
self._textvariable_trace_id = self._textvariable.trace('w', self._textvariable_trace)
def __getitem__(self, key):
"""Return the resource value for a KEY given as string."""
try:
return self._properties[key]
except KeyError:
raise AttributeError("Calendar object has no attribute %s." % key)
def __setitem__(self, key, value):
if key not in self._properties:
raise AttributeError("Calendar object has no attribute %s." % key)
elif key == 'date_pattern':
date_pattern = self._get_date_pattern(value)
self._properties[key] = date_pattern
else:
if key == "selectmode":
if value == "none":
for week in self._calendar:
for day in week:
day.unbind("<1>")
elif value == "day":
for week in self._calendar:
for day in week:
day.bind("<1>", self._on_click)
else:
raise ValueError("'selectmode' option should be 'none' or 'day'.")
elif key == "locale":
self._day_names = get_day_names('abbreviated', locale=value)
self._month_names = get_month_names('wide', locale=value)
self._properties['date_pattern'] = self._get_date_pattern("short", value)
for i, l in enumerate(self._week_days):
l.configure(text=self._day_names[i])
self._header_month.configure(text=self._month_names[self._date.month].title())
elif key == 'textvariable':
try:
if self._textvariable is not None:
self._textvariable.trace_remove('write', self._textvariable_trace_id)
if value is not None:
self._textvariable_trace_id = value.trace_add('write', self._textvariable_trace)
except AttributeError:
if self._textvariable is not None:
self._textvariable.trace_vdelete('w', self._textvariable_trace_id)
if value is not None:
value.trace('w', self._textvariable_trace)
self._textvariable = value
value.set(value.get())
elif key == 'showweeknumbers':
if value:
for wlabel in self._week_nbs:
wlabel.grid()
else:
for wlabel in self._week_nbs:
wlabel.grid_remove()
elif key == 'firstweekday':
if value not in ["monday", "sunday"]:
raise ValueError("'firstweekday' option should be 'monday' or 'sunday'.")
self._cal.firstweekday = (value == 'sunday') * 6
for label, day in zip(self._week_days, self._cal.iterweekdays()):
label.configure(text=self._day_names[day % 7])
elif key == 'weekenddays':
self._check_weekenddays(value)
elif key == 'borderwidth':
try:
bd = int(value)
self._cal_frame.pack_configure(padx=bd, pady=bd)
except ValueError:
raise ValueError('expected integer for the borderwidth option.')
elif key == 'state':
if value not in ['normal', 'disabled']:
raise ValueError("bad state '%s': must be disabled or normal" % value)
else:
state = '!' * (value == 'normal') + 'disabled'
self.state((state,))
self._header.state((state,))
for child in self._header.children.values():
child.state((state,))
self._header_month.state((state,))
self._header_year.state((state,))
self._l_year.state((state,))
self._r_year.state((state,))
self._l_month.state((state,))
self._r_month.state((state,))
for child in self._cal_frame.children.values():
child.state((state,))
elif key == "maxdate":
if value is not None:
if isinstance(value, self.datetime):
value = value.date()
elif not isinstance(value, self.date):
raise TypeError("expected %s for the 'maxdate' option." % self.date)
mindate = self['mindate']
if mindate is not None and mindate > value:
self._properties['mindate'] = value
self._date = self._date.replace(year=value.year, month=value.month)
elif self._date > value:
self._date = self._date.replace(year=value.year, month=value.month)
self._r_month.state(['!disabled'])
self._r_year.state(['!disabled'])
self._l_month.state(['!disabled'])
self._l_year.state(['!disabled'])
elif key == "mindate":
if value is not None:
if isinstance(value, self.datetime):
value = value.date()
elif not isinstance(value, self.date):
raise TypeError("expected %s for the 'mindate' option." % self.date)
maxdate = self['maxdate']
if maxdate is not None and maxdate < value:
self._properties['maxdate'] = value
self._date = self._date.replace(year=value.year, month=value.month)
elif self._date < value:
self._date = self._date.replace(year=value.year, month=value.month)
self._r_month.state(['!disabled'])
self._r_year.state(['!disabled'])
self._l_month.state(['!disabled'])
self._l_year.state(['!disabled'])
elif key == "font":
font = Font(self, value)
prop = font.actual()
self._font.configure(**prop)
prop["size"] += 1
self._header_font.configure(**prop)
size = max(prop["size"], 10)
self.style.configure('R.%s.TButton' % self._style_prefixe, arrowsize=size)
self.style.configure('L.%s.TButton' % self._style_prefixe, arrowsize=size)
elif key == "normalbackground":
self.style.configure('cal.%s.TFrame' % self._style_prefixe, background=value)
self.style.configure('normal.%s.TLabel' % self._style_prefixe, background=value)
self.style.configure('normal_om.%s.TLabel' % self._style_prefixe, background=value)
elif key == "normalforeground":
self.style.configure('normal.%s.TLabel' % self._style_prefixe, foreground=value)
elif key == "bordercolor":
self.style.configure('cal.%s.TFrame' % self._style_prefixe, background=value)
elif key == "othermonthforeground":
self.style.configure('normal_om.%s.TLabel' % self._style_prefixe, foreground=value)
elif key == "othermonthbackground":
self.style.configure('normal_om.%s.TLabel' % self._style_prefixe, background=value)
elif key == "othermonthweforeground":
self.style.configure('we_om.%s.TLabel' % self._style_prefixe, foreground=value)
elif key == "othermonthwebackground":
self.style.configure('we_om.%s.TLabel' % self._style_prefixe, background=value)
elif key == "selectbackground":
self.style.configure('sel.%s.TLabel' % self._style_prefixe, background=value)
elif key == "selectforeground":
self.style.configure('sel.%s.TLabel' % self._style_prefixe, foreground=value)
elif key == "disabledselectbackground":
self.style.map('sel.%s.TLabel' % self._style_prefixe, background=[('disabled', value)])
elif key == "disabledselectforeground":
self.style.map('sel.%s.TLabel' % self._style_prefixe, foreground=[('disabled', value)])
elif key == "disableddaybackground":
self.style.map('%s.TLabel' % self._style_prefixe, background=[('disabled', value)])
elif key == "disableddayforeground":
self.style.map('%s.TLabel' % self._style_prefixe, foreground=[('disabled', value)])
elif key == "weekendbackground":
self.style.configure('we.%s.TLabel' % self._style_prefixe, background=value)
self.style.configure('we_om.%s.TLabel' % self._style_prefixe, background=value)
elif key == "weekendforeground":
self.style.configure('we.%s.TLabel' % self._style_prefixe, foreground=value)
elif key == "headersbackground":
self.style.configure('headers.%s.TLabel' % self._style_prefixe, background=value)
elif key == "headersforeground":
self.style.configure('headers.%s.TLabel' % self._style_prefixe, foreground=value)
elif key == "background":
self.style.configure('main.%s.TFrame' % self._style_prefixe, background=value)
self.style.configure('main.%s.TLabel' % self._style_prefixe, background=value)
self.style.configure('R.%s.TButton' % self._style_prefixe, background=value,
bordercolor=value,
lightcolor=value, darkcolor=value)
self.style.configure('L.%s.TButton' % self._style_prefixe, background=value,
bordercolor=value,
lightcolor=value, darkcolor=value)
elif key == "foreground":
self.style.configure('R.%s.TButton' % self._style_prefixe, arrowcolor=value)
self.style.configure('L.%s.TButton' % self._style_prefixe, arrowcolor=value)
self.style.configure('main.%s.TLabel' % self._style_prefixe, foreground=value)
elif key == "disabledbackground":
self.style.map('%s.TButton' % self._style_prefixe,
background=[('active', '!disabled', self.style.lookup('TEntry', 'selectbackground', ('focus',))),
('disabled', value)],)
self.style.map('main.%s.TFrame' % self._style_prefixe,
background=[('disabled', value)])
self.style.map('main.%s.TLabel' % self._style_prefixe,
background=[('disabled', value)])
elif key == "disabledforeground":
self.style.map('%s.TButton' % self._style_prefixe,
arrowcolor=[('disabled', value)])
self.style.map('main.%s.TLabel' % self._style_prefixe,
foreground=[('disabled', value)])
elif key == "cursor":
ttk.Frame.configure(self, cursor=value)
elif key == "tooltipbackground":
self.style.configure('%s.tooltip.TLabel' % self._style_prefixe,
background=value)
elif key == "tooltipforeground":
self.style.configure('%s.tooltip.TLabel' % self._style_prefixe,
foreground=value)
elif key == "tooltipalpha":
self.tooltip_wrapper.configure(alpha=value)
elif key == "tooltipdelay":
self.tooltip_wrapper.configure(delay=value)
self._properties[key] = value
if key in ['showothermonthdays', 'firstweekday', 'weekenddays',
'maxdate', 'mindate']:
self._display_calendar()
self._check_sel_date()
self._btns_date_range()
@staticmethod
def _check_weekenddays(weekenddays):
try:
if len(weekenddays) != 2:
raise ValueError("weekenddays should be a list of two days.")
else:
for d in weekenddays:
if d not in range(1, 8):
raise ValueError("weekenddays should contain integers between 1 and 7.")
except TypeError:
raise TypeError("weekenddays should be a list of two days.")
def _textvariable_trace(self, *args):
"""Connect StringVar value with selected date."""
if self._properties.get("selectmode") == "day":
date = self._textvariable.get()
if not date:
self._remove_selection()
self._sel_date = None
else:
try:
self._sel_date = self.parse_date(date)
except Exception:
if self._sel_date is None:
self._textvariable.set('')
else:
self._textvariable.set(self.format_date(self._sel_date))
raise ValueError("%r is not a valid date." % date)
else:
self._date = self._sel_date.replace(day=1)
self._display_calendar()
self._display_selection()
def _setup_style(self, event=None):
"""Configure style."""
self.style.layout('L.%s.TButton' % self._style_prefixe,
[('Button.focus',
{'children': [('Button.leftarrow', None)]})])
self.style.layout('R.%s.TButton' % self._style_prefixe,
[('Button.focus',
{'children': [('Button.rightarrow', None)]})])
active_bg = self.style.lookup('TEntry', 'selectbackground', ('focus',))
sel_bg = self._properties.get('selectbackground')
sel_fg = self._properties.get('selectforeground')
dis_sel_bg = self._properties.get('disabledselectbackground')
dis_sel_fg = self._properties.get('disabledselectforeground')
dis_day_bg = self._properties.get('disableddaybackground')
dis_day_fg = self._properties.get('disableddayforeground')
cal_bg = self._properties.get('normalbackground')
cal_fg = self._properties.get('normalforeground')
hd_bg = self._properties.get("headersbackground")
hd_fg = self._properties.get("headersforeground")
bg = self._properties.get('background')
fg = self._properties.get('foreground')
dis_bg = self._properties.get('disabledbackground')
dis_fg = self._properties.get('disabledforeground')
bc = self._properties.get('bordercolor')
om_fg = self._properties.get('othermonthforeground')
om_bg = self._properties.get('othermonthbackground')
omwe_fg = self._properties.get('othermonthweforeground')
omwe_bg = self._properties.get('othermonthwebackground')
we_bg = self._properties.get('weekendbackground')
we_fg = self._properties.get('weekendforeground')
self.style.configure('main.%s.TFrame' % self._style_prefixe, background=bg)
self.style.configure('cal.%s.TFrame' % self._style_prefixe, background=bc)
self.style.configure('main.%s.TLabel' % self._style_prefixe, background=bg, foreground=fg)
self.style.configure('headers.%s.TLabel' | |
import unittest
from soap.context import context
from soap.datatype import (
auto_type, int_type, float_type, FloatArrayType, ArrayType
)
from soap.expression import (
operators, Variable, Subscript, expression_factory
)
from soap.parser import parse
from soap.semantics.error import IntegerInterval
from soap.semantics.functions.label import label
from soap.semantics.label import Label
from soap.semantics.schedule.distance import (
dependence_vector, dependence_distance, ISLIndependenceException
)
from soap.semantics.schedule.common import schedule_graph
from soap.semantics.schedule.graph import (
LoopScheduleGraph, SequentialScheduleGraph
)
from soap.semantics.schedule.table import LATENCY_TABLE
from soap.semantics.state import flow_to_meta_state
class TestDependenceCheck(unittest.TestCase):
def setUp(self):
self.x = Variable('x', dtype=int_type)
self.y = Variable('y', dtype=int_type)
self.sx = slice(0, 10, 1)
self.sy = slice(1, 11, 1)
def test_simple_subscripts(self):
source = Subscript(
expression_factory(operators.ADD_OP, self.x, IntegerInterval(1)))
sink = Subscript(self.x)
dist_vect = dependence_vector([self.x], [self.sx], source, sink)
self.assertEqual(dist_vect, (1, ))
dist = dependence_distance(dist_vect, [self.sx])
self.assertEqual(dist, 1)
def test_simple_independence(self):
source = Subscript(
expression_factory(operators.ADD_OP, self.x, IntegerInterval(20)))
sink = Subscript(self.x)
self.assertRaises(
ISLIndependenceException, dependence_vector,
[self.x], [self.sx], source, sink)
def test_multi_dim_subscripts(self):
# for (x in 0...9) for (y in 1...10) a[x + 2, y] = ... a[x, y - 1] ...
expr = expression_factory(operators.ADD_OP, self.x, IntegerInterval(2))
source = Subscript(expr, self.y)
expr = expression_factory(
operators.SUBTRACT_OP, self.y, IntegerInterval(1))
sink = Subscript(self.x, expr)
iter_slices = [self.sx, self.sy]
dist_vect = dependence_vector(
[self.x, self.y], iter_slices, source, sink)
self.assertEqual(dist_vect, (2, 1))
dist = dependence_distance(dist_vect, iter_slices)
self.assertEqual(dist, 21)
def test_multi_dim_coupled_subscripts_independence(self):
# for (x in 0...9) { a[x + 1, x + 2] = a[x, x]; }
expr_1 = expression_factory(
operators.ADD_OP, self.x, IntegerInterval(1))
expr_2 = expression_factory(
operators.ADD_OP, self.x, IntegerInterval(2))
source = Subscript(expr_1, expr_2)
sink = Subscript(self.x, self.x)
self.assertRaises(
ISLIndependenceException, dependence_vector,
[self.x], [self.sx], source, sink)
def test_multi_dim_coupled_subscripts_dependence(self):
# for (x in 0...9) { a[x + 1, x + 1] = a[x, x]; }
expr_1 = expression_factory(
operators.ADD_OP, self.x, IntegerInterval(1))
expr_2 = expression_factory(
operators.ADD_OP, self.x, IntegerInterval(1))
source = Subscript(expr_1, expr_2)
sink = Subscript(self.x, self.x)
dist_vect = dependence_vector([self.x], [self.sx], source, sink)
self.assertEqual(dist_vect, (1, ))
class _CommonMixin(unittest.TestCase):
def setUp(self):
context.take_snapshot()
context.ii_precision = 30
context.round_values = False
context.scheduler = 'alap'
self.x = Variable('x', float_type)
self.y = Variable('y', float_type)
self.a = Variable('a', FloatArrayType([30]))
self.b = Variable('b', FloatArrayType([30, 30]))
self.c = Variable('c', FloatArrayType([30]))
self.i = Variable('i', int_type)
def tearDown(self):
context.restore_snapshot()
class TestLoopScheduleGraph(_CommonMixin):
def test_variable_initiation(self):
program = """
#pragma soap input float x
#pragma soap output x
for (int i = 0; i < 9; i = i + 1) {
x = x + 1;
}
"""
fix_expr = flow_to_meta_state(parse(program))[self.x]
graph = LoopScheduleGraph(fix_expr)
ii = graph.initiation_interval()
expect_ii = LATENCY_TABLE[float_type][operators.ADD_OP]
self.assertAlmostEqual(ii, expect_ii)
trip_count = graph.trip_count()
self.assertEqual(trip_count, 9)
latency = graph.latency()
expect_latency = (trip_count - 1) * ii + graph.depth()
self.assertAlmostEqual(latency, expect_latency)
def test_array_independence_initiation(self):
program = """
#pragma soap input float a[30]
#pragma soap output a
for (int i = 0; i < 9; i = i + 1)
a[i] = a[i] + 1;
"""
fix_expr = flow_to_meta_state(parse(program))[self.a]
graph = LoopScheduleGraph(fix_expr)
ii = graph.initiation_interval()
expect_ii = 1
self.assertEqual(ii, expect_ii)
def test_simple_array_initiation(self):
program = """
#pragma soap input float a[30]
#pragma soap output a
for (int i = 0; i < 9; i = i + 1)
a[i] = a[i - 3] + 1;
"""
fix_expr = flow_to_meta_state(parse(program))[self.a]
graph = LoopScheduleGraph(fix_expr)
ii = graph.initiation_interval()
expect_ii = LATENCY_TABLE[float_type][operators.INDEX_ACCESS_OP]
expect_ii += LATENCY_TABLE[float_type][operators.ADD_OP]
expect_ii += LATENCY_TABLE[ArrayType][operators.INDEX_UPDATE_OP]
expect_ii /= 3
self.assertAlmostEqual(ii, expect_ii)
def test_simple_array_unroll_initiation(self):
program = """
#pragma soap input float a[30]
#pragma soap output a
for (int i = 0; i < 10; i = i + 1)
a[i] = a[i - 1] + 1.0;
"""
unroll_program = """
#pragma soap input float a[30]
#pragma soap output a
for (int i = 0; i < 9; i = i + 2) {
a[i] = a[i - 1] + 1.0;
a[i + 1] = a[i] + 1.0;
}
"""
fix_expr = flow_to_meta_state(parse(program))[self.a]
graph = LoopScheduleGraph(fix_expr)
ii = graph.initiation_interval()
unroll_fix_expr = flow_to_meta_state(parse(unroll_program))[self.a]
unroll_graph = LoopScheduleGraph(unroll_fix_expr)
unroll_ii = unroll_graph.initiation_interval()
expect_ii = LATENCY_TABLE[float_type][operators.INDEX_ACCESS_OP]
expect_ii += LATENCY_TABLE[float_type][operators.ADD_OP]
expect_ii += LATENCY_TABLE[ArrayType][operators.INDEX_UPDATE_OP]
self.assertAlmostEqual(ii, expect_ii)
self.assertAlmostEqual(unroll_ii, 2 * expect_ii)
def test_transitive_initiation(self):
program = """
#pragma soap output y
float x = 1.0;
float x0 = x;
float y = 1.0;
for (int i = 0; i < 9; i = i + 1) {
x0 = x;
x = y + 1.0;
y = x0 * 2.0;
}
"""
fix_expr = flow_to_meta_state(parse(program))[self.y]
graph = LoopScheduleGraph(fix_expr)
ii = graph.initiation_interval()
expect_ii = LATENCY_TABLE[float_type][operators.ADD_OP]
expect_ii += LATENCY_TABLE[float_type][operators.MULTIPLY_OP]
expect_ii /= 2
self.assertAlmostEqual(ii, expect_ii)
def test_mixed_array_transitive_initiation(self):
program = """
#pragma soap input float a[30], float c[30]
#pragma soap output a
for (int i = 0; i < 9; i = i + 1) {
a[i] = c[i - 1] + 1.0;
c[i] = a[i - 1] * 2.0;
}
"""
fix_expr = flow_to_meta_state(parse(program))[self.a]
graph = LoopScheduleGraph(fix_expr)
ii = graph.initiation_interval()
expect_ii = LATENCY_TABLE[float_type][operators.INDEX_ACCESS_OP]
expect_ii += LATENCY_TABLE[float_type][operators.ADD_OP]
expect_ii += LATENCY_TABLE[ArrayType][operators.INDEX_UPDATE_OP]
expect_ii += LATENCY_TABLE[float_type][operators.INDEX_ACCESS_OP]
expect_ii += LATENCY_TABLE[float_type][operators.MULTIPLY_OP]
expect_ii += LATENCY_TABLE[ArrayType][operators.INDEX_UPDATE_OP]
expect_ii /= 2
self.assertAlmostEqual(ii, expect_ii)
def test_recurrence_info(self):
program = """
#pragma soap input float a[30]
#pragma soap output a
for (int i = 0; i < 30; i = i + 1)
a[i] = a[i - 3] * 2;
"""
flow = parse(program)
meta_state = flow_to_meta_state(flow)
fix_expr = meta_state[flow.outputs[0]]
graph = LoopScheduleGraph(fix_expr)
im1 = expression_factory(
operators.SUBTRACT_OP, self.i, IntegerInterval(3))
access = expression_factory(
operators.INDEX_ACCESS_OP, self.a, Subscript(im1))
update = expression_factory(
operators.INDEX_UPDATE_OP, self.a, Subscript(self.i),
Variable('__dont_care', auto_type))
compare_set = {
(self.i, self.i, 1),
(access, update, 3),
}
self.assertSetEqual(graph.recurrences, compare_set)
class TestSequentialScheduleGraph(_CommonMixin):
def _to_graph(self, program):
flow = parse(program)
meta_state = flow_to_meta_state(flow)
outputs = flow.outputs
lab, env = label(meta_state, None, outputs)
return SequentialScheduleGraph(env, outputs)
def _simple_dag(self):
program = """
#pragma soap input float w, float x, int y, int z
#pragma soap output x
x = (w + x) * (y + z) - (w + z);
"""
return self._to_graph(program)
def test_simple_dag_latency(self):
graph = self._simple_dag()
expect_latency = LATENCY_TABLE[float_type][operators.ADD_OP]
expect_latency += LATENCY_TABLE[float_type][operators.MULTIPLY_OP]
expect_latency += LATENCY_TABLE[float_type][operators.SUBTRACT_OP]
self.assertEqual(graph.latency(), expect_latency)
def test_simple_dag_resource(self):
graph = self._simple_dag()
total_map, min_alloc_map = graph.resource()
compare_total_map = {
(int_type, operators.ADD_OP): 1,
(float_type, operators.ADD_OP): 3,
(float_type, operators.MULTIPLY_OP): 1,
}
compare_min_alloc_map = {
(int_type, operators.ADD_OP): 1,
(float_type, operators.ADD_OP): 1,
(float_type, operators.MULTIPLY_OP): 1,
}
self.assertEqual(total_map, compare_total_map)
self.assertEqual(min_alloc_map, compare_min_alloc_map)
def test_simple_recurrence_aware_latency(self):
program = """
#pragma soap input float x, int y, float z
#pragma soap output x
x = z * z * z * z * z * z * z * z * z * z * z * z * z + (x * y + x);
"""
graph = self._to_graph(program)
graph.recurrences = [(self.x, self.x, 1)]
expect_latency = LATENCY_TABLE[float_type][operators.MULTIPLY_OP]
expect_latency += LATENCY_TABLE[float_type][operators.ADD_OP]
expect_latency += LATENCY_TABLE[float_type][operators.ADD_OP]
self.assertEqual(graph.latency(), expect_latency)
def test_array_recurrence_aware_latency(self):
program = """
#pragma soap input float a[30], int i
#pragma soap output a
a[i] = (a[i - 1] + (a[i - 2] + a[i - 3])) / 3;
"""
graph = self._to_graph(program)
im1 = expression_factory(
operators.SUBTRACT_OP, self.i, IntegerInterval(1))
access = expression_factory(
operators.INDEX_ACCESS_OP, self.a, Subscript(im1))
update = expression_factory(
operators.INDEX_UPDATE_OP, self.a, Subscript(self.i), None)
graph.recurrences = [(access, update, 1)]
expect_latency = LATENCY_TABLE[float_type][operators.INDEX_ACCESS_OP]
expect_latency += LATENCY_TABLE[float_type][operators.ADD_OP]
expect_latency += LATENCY_TABLE[float_type][operators.DIVIDE_OP]
expect_latency += LATENCY_TABLE[ArrayType][operators.INDEX_UPDATE_OP]
self.assertEqual(graph.latency(), expect_latency)
def test_loop_sequentialization(self):
program = """
#pragma soap input float x[30], float y[30]
#pragma soap output z
for (int i = 1; i < 30; i = i + 1)
x[i] = x[i - 1] + 1;
for (int j = 1; j < 20; j = j + 1)
y[j] = y[j - 1] + 1;
float z = x[0] + y[0];
"""
loop_ii = LATENCY_TABLE[float_type][operators.INDEX_ACCESS_OP]
loop_ii += LATENCY_TABLE[float_type][operators.ADD_OP]
loop_ii += LATENCY_TABLE[ArrayType][operators.INDEX_UPDATE_OP]
loop_depth = loop_ii
loop_depth += LATENCY_TABLE[int_type][operators.SUBTRACT_OP]
trip_count_i = 29
trip_count_j = 19
seq_loop_latency = (trip_count_i + trip_count_j - 2) * loop_ii
seq_loop_latency += loop_depth * 2
seq_latency = seq_loop_latency
seq_latency += LATENCY_TABLE[float_type][operators.INDEX_ACCESS_OP]
seq_latency += LATENCY_TABLE[float_type][operators.ADD_OP]
graph = self._to_graph(program)
graph.sequentialize_loops = True
self.assertAlmostEqual(graph.latency(), seq_latency, delta=2)
par_loop_latency = (max(trip_count_i, trip_count_j) - 1) * loop_ii
par_loop_latency += loop_depth
par_latency = par_loop_latency
par_latency += LATENCY_TABLE[float_type][operators.INDEX_ACCESS_OP]
par_latency += LATENCY_TABLE[float_type][operators.ADD_OP]
graph = self._to_graph(program)
graph.sequentialize_loops = False
self.assertAlmostEqual(graph.latency(), par_latency, delta=2)
class TestFullSchedule(_CommonMixin):
def assertStatisticsAlmostEqual(
self, graph, expect_latency, expect_resource, delta=1):
latency = graph.latency()
self.assertAlmostEqual(latency, expect_latency, delta=delta)
total_resource, resource = graph.resource()
self.assertTrue(set(expect_resource) <= set(resource))
for dtype_op, expect_count in expect_resource.items():
count = resource[dtype_op]
try:
self.assertAlmostEqual(count, expect_count, delta=0.01)
except AssertionError:
raise AssertionError(
'Resource count mismatch for operator {}: {} != {}'
.format(dtype_op, count, expect_count))
def test_simple_flow(self):
program = """
#pragma | |
7.01529154326273*m.x187*m.x187 + 29.5414539159021*m.x188*m.x188 +
38.1756318711238*m.x189*m.x189 + 8.58104823499548*m.x190*m.x190 + 7.69711583001143*m.x191*m.x191
+ 42.4371248163469*m.x192*m.x192 + 4.59409541976021*m.x193*m.x193 + 28.1235883297576*m.x194*
m.x194 + 47.6916842358554*m.x195*m.x195 + 27.1839166296889*m.x196*m.x196 + 34.3442197234332*
m.x197*m.x197 + 41.449320254595*m.x198*m.x198 + 19.0074548015991*m.x199*m.x199 + 30.5239256850067
*m.x200*m.x200 + 21.6034519208861*m.x201*m.x201 + 15.0364791559573*m.x202*m.x202 +
10.9122403828508*m.x203*m.x203 + 26.5498628209752*m.x204*m.x204 + 2.85517352456554*m.x205*m.x205
+ 14.3944475974119*m.x206*m.x206 + 17.8303957927801*m.x207*m.x207 + 18.8507158873978*m.x208*
m.x208 + 17.8500424336862*m.x209*m.x209 + 29.662423104137*m.x210*m.x210 + 28.5445975843512*m.x211
*m.x211 + 16.9256148790629*m.x212*m.x212 + 40.3065698346231*m.x213*m.x213 + 15.0916313984205*
m.x214*m.x214 + 45.1764582705759*m.x215*m.x215 + 19.4831745475101*m.x216*m.x216 +
29.3725674554356*m.x217*m.x217 + 44.731112827661*m.x218*m.x218 + 42.5239940840714*m.x219*m.x219
+ 34.7422328142325*m.x220*m.x220 + 9.56259782677507*m.x221*m.x221 + 44.5183379515596*m.x222*
m.x222 + 43.1496210295644*m.x223*m.x223 + 13.4063833026897*m.x224*m.x224 + 17.3538516477183*
m.x225*m.x225 + 28.0201386197756*m.x226*m.x226 + 33.2510480473979*m.x227*m.x227 +
39.1950454740892*m.x228*m.x228 + 19.0749534705101*m.x229*m.x229 + 29.906167363235*m.x230*m.x230
+ 32.6505026879169*m.x231*m.x231 + 16.4969329551662*m.x232*m.x232 + 35.3141598922056*m.x233*
m.x233 + 39.0252650952467*m.x234*m.x234 + 29.938751063908*m.x235*m.x235 + 26.2431654020124*m.x236
*m.x236 + 17.6402775915806*m.x237*m.x237 + 21.1630857812134*m.x238*m.x238 + 32.0599203901578*
m.x239*m.x239 + 11.5212775085687*m.x240*m.x240 + 4.08359203762451*m.x241*m.x241 +
36.7723808801052*m.x242*m.x242 + 14.8331100451586*m.x243*m.x243 + 18.0680240767646*m.x244*m.x244
+ 39.6231855790722*m.x245*m.x245 + 24.2940900497125*m.x246*m.x246 + 36.2696520369664*m.x247*
m.x247 + 34.9806914576888*m.x248*m.x248 + 10.7781714676847*m.x249*m.x249 + 27.1927719936449*
m.x250*m.x250 + 39.2235284102308*m.x251*m.x251 + 46.81871185907*m.x252*m.x252 + 29.7554930096906*
m.x253*m.x253 + 29.8593540779954*m.x254*m.x254 + 39.8469146973509*m.x255*m.x255 +
30.8766835617017*m.x256*m.x256 + 37.5112051189399*m.x257*m.x257 + 42.5777572849887*m.x258*m.x258
+ 35.5929248730112*m.x259*m.x259 + 8.75036836178061*m.x260*m.x260 + 18.3306632685796*m.x261*
m.x261 + 28.8006399800856*m.x262*m.x262 + 10.4104638844038*m.x263*m.x263 + 25.9946415925091*
m.x264*m.x264 + 34.5829351714139*m.x265*m.x265 + 20.2281004690202*m.x266*m.x266 +
34.2013260693052*m.x267*m.x267 + 28.7669136159159*m.x268*m.x268 + 26.3010972908322*m.x269*m.x269
+ 25.2740230311798*m.x270*m.x270 + 39.3978078399888*m.x271*m.x271 + 27.1220636854672*m.x272*
m.x272 + 8.14765975567047*m.x273*m.x273 + 35.6206832261575*m.x274*m.x274 + 30.470898602416*m.x275
*m.x275 + 9.51795565513467*m.x276*m.x276 + 20.0177187196987*m.x277*m.x277 + 3.87298729099393*
m.x278*m.x278 + 38.9289895779823*m.x279*m.x279 + 32.8891639309257*m.x280*m.x280 +
18.8450066441327*m.x281*m.x281 + 22.1338366297348*m.x282*m.x282 + 28.1197040918976*m.x283*m.x283
+ 15.2264081770499*m.x284*m.x284 + 36.2111917393707*m.x285*m.x285 + 36.8746985831279*m.x286*
m.x286 + 49.5023743566618*m.x287*m.x287 + 18.006698113536*m.x288*m.x288 + 5.41098116939701*m.x289
*m.x289 + 35.4825636675857*m.x290*m.x290 + 40.9816938000357*m.x291*m.x291 + 1.34474551367002*
m.x292*m.x292 + 43.8705427790599*m.x293*m.x293 + 25.1002650124917*m.x294*m.x294 +
11.3941518471258*m.x295*m.x295 + 17.7973227487902*m.x296*m.x296 + 28.2407666812435*m.x297*m.x297
+ 3.30733460518593*m.x298*m.x298 + 26.7892427381251*m.x299*m.x299 + 14.7163802750569*m.x300*
m.x300 + 12.9638935112588*m.x301*m.x301 + 26.6549567192985*m.x302*m.x302 + 30.9453562112728*
m.x303*m.x303 + 43.8550202201459*m.x304*m.x304 + 28.9026725022899*m.x305*m.x305 + 14.249060442369
*m.x306*m.x306 + 14.1979664603783*m.x307*m.x307 + 45.3986327406191*m.x308*m.x308 +
12.7365472374608*m.x309*m.x309 + 29.0294211183633*m.x310*m.x310 + 12.1555367221088*m.x311*m.x311
+ 11.9146992055163*m.x312*m.x312 + 39.6061281545836*m.x313*m.x313 + 31.2362268652626*m.x314*
m.x314 + 16.7746373962514*m.x315*m.x315 + 16.2958521172435*m.x316*m.x316 + 3.72751754850412*
m.x317*m.x317 + 18.0527543349882*m.x318*m.x318 + 16.6570712657146*m.x319*m.x319 +
9.03515021301791*m.x320*m.x320 + 37.0616605429735*m.x321*m.x321 + 18.5634179938177*m.x322*m.x322
+ 38.6262548798249*m.x323*m.x323 + 16.5339743835349*m.x324*m.x324 + 11.290126171656*m.x325*
m.x325 + 24.326274349439*m.x326*m.x326 + 12.0078534731682*m.x327*m.x327 + 29.0789990413162*m.x328
*m.x328 + 14.49929333473*m.x329*m.x329 + 2.50619869087026*m.x330*m.x330 + 40.756463008065*m.x331*
m.x331 + 18.2471997262686*m.x332*m.x332 + 7.87689270723246*m.x333*m.x333 + 20.2010107208352*
m.x334*m.x334 + 5.74149313485879*m.x335*m.x335 + 7.65551848087413*m.x336*m.x336 +
28.3073343015669*m.x337*m.x337 + 28.6998613588456*m.x338*m.x338 + 27.3612947829915*m.x339*m.x339
+ 18.0762648540954*m.x340*m.x340 + 29.2790538115466*m.x341*m.x341 + 29.1554111576085*m.x342*
m.x342 + 22.9538016354946*m.x343*m.x343 + 33.4718072626091*m.x344*m.x344 + 39.9817984660988*
m.x345*m.x345 + 14.3334323264211*m.x346*m.x346 + 8.82097755804155*m.x347*m.x347 +
30.5501773132439*m.x348*m.x348 + 24.962726994492*m.x349*m.x349 + 16.3307079736418*m.x350*m.x350
+ 16.9558660846385*m.x351*m.x351 + 30.9314989229319*m.x352*m.x352 + 34.5423416714242*m.x353*
m.x353 + 46.6619312989622*m.x354*m.x354 + 33.228836993836*m.x355*m.x355 + 18.4954712835656*m.x356
*m.x356 + 18.5054882752769*m.x357*m.x357 + 49.2378067596818*m.x358*m.x358 + 17.1044218096962*
m.x359*m.x359 + 30.2034384025674*m.x360*m.x360 + 12.6321367305542*m.x361*m.x361 +
16.0141290858008*m.x362*m.x362 + 40.0723136631917*m.x363*m.x363 + 34.459620698161*m.x364*m.x364
+ 12.3956432712284*m.x365*m.x365 + 19.0188375221311*m.x366*m.x366 + 6.19831548199877*m.x367*
m.x367 + 13.9221119060892*m.x368*m.x368 + 12.8122273796007*m.x369*m.x369 + 6.13370434188828*
m.x370*m.x370 + 41.0968375844964*m.x371*m.x371 + 14.6186931189279*m.x372*m.x372 +
38.4921488282025*m.x373*m.x373 + 20.9302603618723*m.x374*m.x374 + 15.5636246925953*m.x375*m.x375
+ 25.4596194393136*m.x376*m.x376 + 10.6294797845934*m.x377*m.x377 + 28.4688565296775*m.x378*
m.x378 + 18.6954138416337*m.x379*m.x379 + 4.61218640637729*m.x380*m.x380 + 42.5007124541763*
m.x381*m.x381 + 21.3765252652025*m.x382*m.x382 + 4.00129446992529*m.x383*m.x383 +
18.2915426353969*m.x384*m.x384 + 7.77325147746949*m.x385*m.x385 + 11.1092013653379*m.x386*m.x386
+ 32.4897962503184*m.x387*m.x387 + 31.1278624253877*m.x388*m.x388 + 27.9717393018571*m.x389*
m.x389 + 22.4594220475599*m.x390*m.x390 + 33.6318811657835*m.x391*m.x391 + 29.0443297366047*
m.x392*m.x392 + 27.2379488169805*m.x393*m.x393 + 36.4858131458785*m.x394*m.x394 +
40.6018713128095*m.x395*m.x395 + 16.0629269346546*m.x396*m.x396 + 4.81525668497211*m.x397*m.x397
+ 30.9311804481859*m.x398*m.x398 + 28.5318041442341*m.x399*m.x399 + 17.2825839158902*m.x400*
m.x400 + 15.3886356195344*m.x401*m.x401 + 3.639078617158*m.x402*m.x402 + 21.9838884507305*m.x403*
m.x403 + 37.9088485968505*m.x404*m.x404 + 8.87861161563052*m.x405*m.x405 + 14.7868928239645*
m.x406*m.x406 + 12.6377267663112*m.x407*m.x407 + 29.0613478323192*m.x408*m.x408 +
13.9043046834911*m.x409*m.x409 + 37.7787079397065*m.x410*m.x410 + 31.2722818316731*m.x411*m.x411
+ 17.6097587036563*m.x412*m.x412 + 49.1632677838196*m.x413*m.x413 + 25.8124618085875*m.x414*
m.x414 + 43.2140317605441*m.x415*m.x415 + 24.463948149118*m.x416*m.x416 + 25.8715293420462*m.x417
*m.x417 + 44.4982880887476*m.x418*m.x418 + 42.7658214659465*m.x419*m.x419 + 34.7301431733577*
m.x420*m.x420 + 19.9651966195937*m.x421*m.x421 + 44.7559504569515*m.x422*m.x422 +
51.1795876779508*m.x423*m.x423 + 10.5463421565483*m.x424*m.x424 + 16.804561010205*m.x425*m.x425
+ 35.0311288406906*m.x426*m.x426 + 34.9761049599032*m.x427*m.x427 + 45.3802365093719*m.x428*
m.x428 + 12.9442458718436*m.x429*m.x429 + 26.9570625559902*m.x430*m.x430 + 43.0190756152088*
m.x431*m.x431 + 22.3365314922575*m.x432*m.x432 + 34.3665490926145*m.x433*m.x433 +
42.2281265185792*m.x434*m.x434 + 25.6594032641024*m.x435*m.x435 + 21.4357901433689*m.x436*m.x436
+ 6.26267204158932*m.x437*m.x437 + 30.5018588171929*m.x438*m.x438 + 39.257458945887*m.x439*
m.x439 + 9.69057193666559*m.x440*m.x440 + 7.97074757931595*m.x441*m.x441 + 43.5336604621135*
m.x442*m.x442 + 4.90864265710208*m.x443*m.x443 + 28.9113381092315*m.x444*m.x444 +
48.6980466468667*m.x445*m.x445 + 28.3026833848487*m.x446*m.x446 + 35.3388601794511*m.x447*m.x447
+ 42.5231486648828*m.x448*m.x448 + 19.9318185520225*m.x449*m.x449 + 31.6423287145795*m.x450*
m.x450 + 33.282707909069*m.x451*m.x451 + 41.0608639131094*m.x452*m.x452 + 25.7321538738961*m.x453
*m.x453 + 28.8438679424945*m.x454*m.x454 + 34.6761504250219*m.x455*m.x455 + 25.0008354295236*
m.x456*m.x456 + 31.5674311819617*m.x457*m.x457 + 39.5642598961721*m.x458*m.x458 +
29.6465578588287*m.x459*m.x459 + 7.07047231601392*m.x460*m.x460 + 12.7994848939547*m.x461*m.x461
+ 22.8716222353359*m.x462*m.x462 + 14.8382270666204*m.x463*m.x463 + 22.4416007031607*m.x464*
m.x464 + 30.8985670539825*m.x465*m.x465 + 14.427582480897*m.x466*m.x466 + 28.6151196262587*m.x467
*m.x467 + 25.7309181132432*m.x468*m.x468 + 23.1025878913385*m.x469*m.x469 + 20.6140884292046*
m.x470*m.x470 + 35.344002471579*m.x471*m.x471 + 24.278266637956*m.x472*m.x472 + 13.8818804931538*
m.x473*m.x473 + 29.7562355760092*m.x474*m.x474 + 24.5291936186927*m.x475*m.x475 +
4.72430449546098*m.x476*m.x476 + 15.332468275175*m.x477*m.x477 + 6.75609361846499*m.x478*m.x478
+ 32.980605924041*m.x479*m.x479 + 27.3741928229052*m.x480*m.x480 + 19.8022677182581*m.x481*
m.x481 + 16.5635035411547*m.x482*m.x482 + 23.389450475074*m.x483*m.x483 + 12.7332729775241*m.x484
*m.x484 + 30.6128872291588*m.x485*m.x485 + 31.0684274756472*m.x486*m.x486 + 43.7147740452919*
m.x487*m.x487 + 14.7266857444304*m.x488*m.x488 + 3.15313070572545*m.x489*m.x489 + 29.686045042527
*m.x490*m.x490 + 35.7694996274889*m.x491*m.x491 + 4.80666817925016*m.x492*m.x492 +
38.0428267153991*m.x493*m.x493 + 22.1899496554312*m.x494*m.x494 + 15.4122116584851*m.x495*m.x495
+ 11.8549162690246*m.x496*m.x496 + 23.6482319628512*m.x497*m.x497 + 5.6969741987334*m.x498*
m.x498 + 22.0013339847288*m.x499*m.x499 + 8.84520143140503*m.x500*m.x500 + 12.0538814756566*
m.x501*m.x501 + 16.23466497608*m.x502*m.x502 + 16.5974844437991*m.x503*m.x503 + 31.7485567694529*
m.x504*m.x504 + 13.4799029429327*m.x505*m.x505 + 1.6890776482773*m.x506*m.x506 + 8.65473826695797
*m.x507*m.x507 + 30.0182361507271*m.x508*m.x508 + 7.39385125660418*m.x509*m.x509 +
24.8898494472728*m.x510*m.x510 + 17.1458543199508*m.x511*m.x511 + 4.07954905540417*m.x512*m.x512
+ 36.5641453316698*m.x513*m.x513 + 18.3527554958907*m.x514*m.x514 + 32.2990195299471*m.x515*
m.x515 + 10.4355746931838*m.x516*m.x516 + 16.8958928748594*m.x517*m.x517 + 32.1005571299358*
m.x518*m.x518 + 30.0168208743099*m.x519*m.x519 + 22.0878462792558*m.x520*m.x520 +
21.3595249328992*m.x521*m.x521 + 32.0248725946701*m.x522*m.x522 + 37.8479790928498*m.x523*m.x523
+ 5.14439359808491*m.x524*m.x524 + 4.5971129987593*m.x525*m.x525 + 21.4696441079178*m.x526*
m.x526 + 21.2805529434135*m.x527*m.x527 + 31.259300587242*m.x528*m.x528 + 10.3241406903707*m.x529
*m.x529 + 17.2363361732175*m.x530*m.x530 + 32.8392792483173*m.x531*m.x531 + 9.01571121435599*
m.x532*m.x532 + 22.482591196072*m.x533*m.x533 + 28.0848265504106*m.x534*m.x534 + 17.7698968345925
*m.x535*m.x535 + 14.6553136926935*m.x536*m.x536 + 18.8623807692585*m.x537*m.x537 +
19.5554867391833*m.x538*m.x538 + 25.6659911039689*m.x539*m.x539 + 4.83536803895401*m.x540*m.x540
+ 14.0783028808137*m.x541*m.x541 + 29.6359612370616*m.x542*m.x542 + 13.211272683702*m.x543*
m.x543 + 21.2429061872888*m.x544*m.x544 + 36.3386717344397*m.x545*m.x545 + 13.985698109692*m.x546
*m.x546 + 23.4457200696784*m.x547*m.x547 + 29.0516922884994*m.x548*m.x548 + 11.2421477455841*
m.x549*m.x549 + 17.3344690834011*m.x550*m.x550 + 38.8186738836796*m.x551*m.x551 +
47.7727698397582*m.x552*m.x552 + 33.0094151165075*m.x553*m.x553 + 34.5270044628545*m.x554*m.x554
+ 41.951347559798*m.x555*m.x555 + 31.4943201221686*m.x556*m.x556 + 37.5614833814483*m.x557*
m.x557 + 46.508078223137*m.x558*m.x558 + 35.607459570993*m.x559*m.x559 + 12.8629527812987*m.x560*
m.x560 + 16.9874558039294*m.x561*m.x561 + 29.1158999260905*m.x562*m.x562 + 14.8229090731901*
m.x563*m.x563 + 29.5591950252006*m.x564*m.x564 + 30.7602855525051*m.x565*m.x565 +
21.2673891854345*m.x566*m.x566 + 32.5197518524081*m.x567*m.x567 + 24.6273710555267*m.x568*m.x568
+ 22.2958483393986*m.x569*m.x569 + 22.4519075030253*m.x570*m.x570 + 42.6443825268819*m.x571*
m.x571 + 22.9063340919059*m.x572*m.x572 + 11.2236520865135*m.x573*m.x573 + 36.2503134967502*
m.x574*m.x574 + 30.6247770245502*m.x575*m.x575 + 12.0262968291735*m.x576*m.x576 +
17.4189668845633*m.x577*m.x577 + 1.07807177006453*m.x578*m.x578 + 38.8533687540994*m.x579*m.x579
+ 31.0828245157535*m.x580*m.x580 + 23.7802061906022*m.x581*m.x581 + 23.6782788347268*m.x582*
m.x582 + 25.3009835213528*m.x583*m.x583 + 11.2145227907086*m.x584*m.x584 + 34.5286181976978*
m.x585*m.x585 + 35.7406076900691*m.x586*m.x586 + 50.3537267047179*m.x587*m.x587 +
21.6735949371723*m.x588*m.x588 + 8.80483309985958*m.x589*m.x589 + 36.3692854321993*m.x590*m.x590
+ 43.0258732187857*m.x591*m.x591 + 4.35080871179049*m.x592*m.x592 + 44.6026430699056*m.x593*
m.x593 + 29.0245970069113*m.x594*m.x594 + 15.9708973683108*m.x595*m.x595 + 17.8191905714001*
m.x596*m.x596 + 25.2750311040078*m.x597*m.x597 + 8.05272122989117*m.x598*m.x598 +
29.3675855272193*m.x599*m.x599 + 14.4611612667033*m.x600*m.x600 + 27.1349232013116*m.x601*m.x601
+ 36.1963158880999*m.x602*m.x602 + 25.0729344305518*m.x603*m.x603 + 31.841416983906*m.x604*
m.x604 + 31.4903604933699*m.x605*m.x605 + 19.8503252733427*m.x606*m.x606 + 25.7875846488454*
m.x607*m.x607 + 39.8893048433744*m.x608*m.x608 + 23.8342828876696*m.x609*m.x609 +
11.9873416112886*m.x610*m.x610 + 6.08320486919953*m.x611*m.x611 + 17.3810668616685*m.x612*m.x612
+ 21.5038157918647*m.x613*m.x613 + 22.7326429776787*m.x614*m.x614 + 25.3935835758863*m.x615*
m.x615 + 10.2302140600641*m.x616*m.x616 + 21.9039160471843*m.x617*m.x617 + 21.0461583159419*
m.x618*m.x618 + 18.3554229403566*m.x619*m.x619 + 14.4063445842059*m.x620*m.x620 +
34.2473722804359*m.x621*m.x621 + 19.8740811214005*m.x622*m.x622 + 20.4867118785569*m.x623*m.x623
+ 24.581251854727*m.x624*m.x624 + 18.85694105891*m.x625*m.x625 + 7.43400076370344*m.x626*m.x626
+ 9.28941505365061*m.x627*m.x627 + 11.7023373643641*m.x628*m.x628 + 27.0902682630276*m.x629*
m.x629 + 20.654078037651*m.x630*m.x630 + 24.8480048974215*m.x631*m.x631 + 13.1276976307904*m.x632
*m.x632 + 17.0694131651193*m.x633*m.x633 + 10.1207173972924*m.x634*m.x634 + 23.9052161505626*
m.x635*m.x635 + 24.5040649528682*m.x636*m.x636 + 38.7209128355565*m.x637*m.x637 +
16.2355161949925*m.x638*m.x638 + 9.3590011098723*m.x639*m.x639 + 24.8218966662312*m.x640*m.x640
+ 32.4654923837817*m.x641*m.x641 + 11.084901446957*m.x642*m.x642 + 32.9393880967354*m.x643*
m.x643 + 23.3997133416003*m.x644*m.x644 + 21.9929906253402*m.x645*m.x645 + 6.10348843319099*
m.x646*m.x646 + 17.4398598099457*m.x647*m.x647 + 12.3857531665369*m.x648*m.x648 +
20.1626389390212*m.x649*m.x649 + 2.72300341131078*m.x650*m.x650 + 7.18675342864002*m.x651*m.x651
+ 8.61325409319841*m.x652*m.x652 + 23.0024072338296*m.x653*m.x653 + 39.0179799785569*m.x654*
m.x654 + 13.6461820638952*m.x655*m.x655 + 8.59504201092474*m.x656*m.x656 + 4.10019151732122*
m.x657*m.x657 + 33.7543697359988*m.x658*m.x658 + 5.50763660772865*m.x659*m.x659 +
33.9546508091896*m.x660*m.x660 + 24.2476026741446*m.x661*m.x661 + 10.8550405823299*m.x662*m.x662
+ 45.6321642385193*m.x663*m.x663 + 25.8203330146643*m.x664*m.x664 + 34.692191834533*m.x665*
m.x665 + 19.3022929908536*m.x666*m.x666 + 17.3345052586859*m.x667*m.x667 + 36.1838530587943*
m.x668*m.x668 + 34.5672477739713*m.x669*m.x669 + 26.5766432434876*m.x670*m.x670 +
24.4674981228843*m.x671*m.x671 + 36.5372742530649*m.x672*m.x672 + 46.8208721569115*m.x673*m.x673
+ 3.9263580310218*m.x674*m.x674 + 9.49867763545034*m.x675*m.x675 + 30.436888181807*m.x676*m.x676
+ 27.3762785425725*m.x677*m.x677 + 39.7729121636589*m.x678*m.x678 + 4.60574093214427*m.x679*
m.x679 + 18.428173343741*m.x680*m.x680 + 41.5379576085564*m.x681*m.x681 + 18.0849252063193*m.x682
*m.x682 + 26.0121740930698*m.x683*m.x683 + 35.053149685726*m.x684*m.x684 + 17.169964483385*m.x685
*m.x685 + 12.9770435920073*m.x686*m.x686 + 10.7294724624106*m.x687*m.x687 + 28.3239614362299*
m.x688*m.x688 + 34.5999306420356*m.x689*m.x689 + 4.85181277416448*m.x690*m.x690 +
13.4342599191064*m.x691*m.x691 + 38.3854492671269*m.x692*m.x692 + 4.96229763828965*m.x693*m.x693
+ 28.8998131291662*m.x694*m.x694 + 45.4061767525307*m.x695*m.x695 + 22.1479065878021*m.x696*
m.x696 + 26.9768378537689*m.x697*m.x697 + 38.0111413600614*m.x698*m.x698 + 18.8610069897279*
m.x699*m.x699 + 25.5211139512462*m.x700*m.x700 + 38.776585022583*m.x701*m.x701 + 33.1096406144515
*m.x702*m.x702 + 11.3502527294494*m.x703*m.x703 + 11.7400688122056*m.x704*m.x704 +
20.6077027887146*m.x705*m.x705 + 29.282835213349*m.x706*m.x706 + 34.9663636747835*m.x707*m.x707
+ 5.84090124783487*m.x708*m.x708 + 34.4281134804593*m.x709*m.x709 + 28.2365219752888*m.x710*
m.x710 + 37.1105134352139*m.x711*m.x711 + 30.7514685893966*m.x712*m.x712 + 34.7515666674737*
m.x713*m.x713 + 12.3137735512548*m.x714*m.x714 + 57.1601882065354*m.x715*m.x715 +
27.6402367107267*m.x716*m.x716 + 44.1590225694267*m.x717*m.x717 + 54.7838417141612*m.x718*m.x718
+ 52.2038050664828*m.x719*m.x719 + 45.6002942862698*m.x720*m.x720 + 9.93439426481814*m.x721*
m.x721 + 54.0138442825685*m.x722*m.x722 + 39.3771223207845*m.x723*m.x723 + 30.3703842060224*
m.x724*m.x724 + 32.0017728676966*m.x725*m.x725 + 29.6032812874892*m.x726*m.x726 +
42.2442365607667*m.x727*m.x727 + 40.0836582178245*m.x728*m.x728 + 36.4405402405692*m.x729*m.x729
+ 44.1970059331029*m.x730*m.x730 + 23.254815664039*m.x731*m.x731 + 24.7726027650492*m.x732*
m.x732 + 47.119494838472*m.x733*m.x733 + 45.0938626124197*m.x734*m.x734 + 45.2506856078179*m.x735
*m.x735 + 42.2529198821386*m.x736*m.x736 + 35.5214329634158*m.x737*m.x737 + 19.4736293316317*
m.x738*m.x738 + 32.1951095193572*m.x739*m.x739 + 28.5673787476742*m.x740*m.x740 +
21.5312774593735*m.x741*m.x741 + 36.8889265434687*m.x742*m.x742 + 33.1559632643192*m.x743*m.x743
+ 12.0076821609823*m.x744*m.x744 + 33.5416889395295*m.x745*m.x745 + 32.1748487789037*m.x746*
m.x746 + 47.9492673850307*m.x747*m.x747 + 33.7064731313384*m.x748*m.x748 + | |
# This file contains fairly exhaustive tests of almost all the methods
# supported by the Python `str` type, and tests that `untrusted.string` type:
# * correctly supports the same methods
# * accepts `str` and/or `untrusted.string` arguments interchangeably
# * never returns `str` or any iterable of `str`, only an
# appropriate `untrusted.*` type.
# Also tests that subclassed instances of untrusted.string work
import untrusted
from sys import stderr
import html
class customstring(untrusted.string):
pass
def same(a, b):
if type(a) != type(b):
return False
if isinstance(a, untrusted.string):
a = a.value
if isinstance(b, untrusted.string):
b = b.value
if a != b:
return False
return True
# Test the test
assert same("cat", "cat")
assert not same("cat", "dog")
assert same(untrusted.string("cat"), untrusted.string("cat"))
assert not same(untrusted.string("cat"), untrusted.string("dog"))
assert not same(untrusted.string("cat"), "cat")
assert not same("cat", untrusted.string("cat"))
assert not same("cat", None)
assert not same(untrusted.string("cat"), None)
assert not same(untrusted.string("cat"), customstring("cat"))
assert same(None, None)
# Test an untrusted.string is never None!
try:
_ = untrusted.string(None)
raise AssertionError
except TypeError:
pass
# Test an untrusted.string doesn't print!
try:
print(untrusted.string("Hello"))
raise AssertionError
except TypeError:
pass # expected!
# Test the subclassed string doesn't print!
try:
print(customstring("Hello"))
raise AssertionError
except TypeError:
pass # expected!
# Test equality checks still work
assert "cat" == untrusted.string("cat")
assert untrusted.string("cat") == "cat"
assert "cat" == customstring("cat")
assert customstring("cat") == "cat"
assert customstring("cat") == untrusted.string("cat")
assert untrusted.string("cat") == customstring("cat")
# Test hashable with set membership
assert "cat" in [untrusted.string("cat"), untrusted.string("dog")]
assert untrusted.string("cat") in [untrusted.string("cat"), untrusted.string("dog")]
assert untrusted.string("cat") in ["cat", "dog"]
# Test hashable with sorted
it = iter(sorted(["cat", "dog", "aligator", "zebra", "mouse"]))
assert same("aligator", next(it))
assert same("cat", next(it))
assert same("dog", next(it))
assert same("mouse", next(it))
assert same("zebra", next(it))
it = iter(sorted([untrusted.string("cat"),
untrusted.string("dog"),
untrusted.string("aligator"),
untrusted.string("zebra"),
untrusted.string("mouse")]))
assert same(untrusted.string("aligator"), next(it))
assert same(untrusted.string("cat"), next(it))
assert same(untrusted.string("dog"), next(it))
assert same(untrusted.string("mouse"), next(it))
assert same(untrusted.string("zebra"), next(it))
it = iter(sorted(untrusted.sequence(["cat", "dog", "aligator", "zebra", "mouse"])))
assert same(untrusted.string("aligator"), next(it))
assert same(untrusted.string("cat"), next(it))
assert same(untrusted.string("dog"), next(it))
assert same(untrusted.string("mouse"), next(it))
assert same(untrusted.string("zebra"), next(it))
# container iteration
for i in "cat":
assert i in ("c", "a", "t")
for i in untrusted.string("cat"):
assert i in ("c", "a", "t")
assert same(i, untrusted.string("c")) or same(i, untrusted.string("a")) or same(i, untrusted.string("t"))
# "Strings implement all of the common sequence operations"
# https://docs.python.org/3.4/library/stdtypes.html#typesseq-common
# membership: x in s
assert "a" in "cat"
assert "a" in untrusted.string("cat")
assert untrusted.string("a") in untrusted.string("cat")
assert not ("b" in "cat")
assert not ("b" in untrusted.string("cat"))
assert not (untrusted.string("b") in untrusted.string("cat"))
assert "cat" in "dogcatmouse"
assert "cat" in untrusted.string("dogcatmouse")
assert untrusted.string("cat") in untrusted.string("dogcatmouse")
assert customstring("a") in untrusted.string("cat")
assert untrusted.string("a") in customstring("a")
# membership: x not in s
assert "b" not in "cat"
assert "b" not in untrusted.string("cat")
assert untrusted.string("b") not in untrusted.string("cat")
assert not ("a" not in "cat")
assert not ("a" not in untrusted.string("cat"))
assert not (untrusted.string("a") not in untrusted.string("cat"))
assert customstring("b") not in untrusted.string("cat")
# concatenation: s + t
assert same("cat"+"dog", "catdog")
assert same(untrusted.string("cat") + "dog", untrusted.string("catdog"))
assert same("cat" + untrusted.string("dog"), untrusted.string("catdog"))
assert same(untrusted.string("cat") + untrusted.string("dog"), untrusted.string("catdog"))
# concatination with subclasses - becomes left-most class
assert same(untrusted.string("a") + customstring("b"), untrusted.string("ab"))
assert same(customstring("a") + untrusted.string("b"), customstring("ab"))
# s * n or n * s - "equivalent to adding s to itself n times"
assert same(3*"cat", "catcatcat")
assert same(3*untrusted.string("cat"), untrusted.string("catcatcat"))
assert same(3*customstring("cat"), customstring("catcatcat"))
assert same("cat"*3, "catcatcat")
assert same(untrusted.string("cat")*3, untrusted.string("catcatcat"))
assert same(customstring("cat")*3, customstring("catcatcat"))
assert same(0*"cat", "")
assert same(0*untrusted.string("cat"), untrusted.string(""))
assert same("cat"*0, "")
assert same(untrusted.string("cat")*0, untrusted.string(""))
# s[i] - item at index i
assert same("cat"[1], "a")
assert same(untrusted.string("cat")[1], untrusted.string("a"))
assert same("cat"[-1], "t")
assert same(untrusted.string("cat")[-1], untrusted.string("t"))
try:
_ = "cat"[4]
raise AssertionError
except IndexError:
pass # expected!
try:
_ = untrusted.string("cat")[4]
raise AssertionError
except IndexError:
pass # expected!
# s[i:j:k] - slice i to j with step k
assert same("dogcatmouse"[3:6], "cat")
assert same(untrusted.string("dogcatmouse")[3:6], untrusted.string("cat"))
assert same(customstring("dogcatmouse")[3:6], customstring("cat"))
assert same("dogcatmouse"[3:6:2], "ct")
assert same(untrusted.string("dogcatmouse")[3:6:2], untrusted.string("ct"))
assert same(customstring("dogcatmouse")[3:6:2], customstring("ct"))
# len(s)
assert len("cat") == 3
assert len(untrusted.string("cat")) == 3
#min(s) smallest item of s
assert same(min("cat"), "a")
assert same(min(untrusted.string("cat")), untrusted.string("a"))
#max(s) largest item of s
assert same(max("cat"), "t")
assert same(max(untrusted.string("cat")), untrusted.string("t"))
# s.index(x[, i[, j]])
# "index of the first occurrence of x in s
# (at or after index i and before index j)"
assert "cat".index("a") == 1
assert untrusted.string("cat").index("a") == 1
assert "dogcatmouse".index("cat") == 3
assert untrusted.string("dogcatmouse").index("cat") == 3
assert untrusted.string("dogcatmouse").index(untrusted.string("cat")) == 3
# s.count(x) - occurrences of x in s
assert "cat".count("a") == 1
assert untrusted.string("cat").count("a") == 1
assert untrusted.string("cat").count(untrusted.string("a")) == 1
assert "cataclasm".count("a") == 3
assert untrusted.string("cataclasm").count("a") == 3
assert untrusted.string("cataclasm").count(untrusted.string("a")) == 3
assert "cat attack".count("at") == 2
assert untrusted.string("cat attack").count("at") == 2
assert untrusted.string("cat attack").count(untrusted.string("at")) == 2
# x.join(y)
assert same(''.join([]), "")
assert same(untrusted.string('').join([]), untrusted.string(""))
assert same(''.join("cat"), "cat")
assert same(untrusted.string('').join("cat"), untrusted.string("cat"))
assert same(untrusted.string('').join(untrusted.string("cat")), untrusted.string("cat"))
assert same(','.join(["cat", "dog", "mouse"]), "cat,dog,mouse")
assert same(untrusted.string(',').join(["cat", "dog", "mouse"]), untrusted.string("cat,dog,mouse"))
assert same(untrusted.string(',').join([untrusted.string("cat"), untrusted.string("dog"), untrusted.string("mouse")]), untrusted.string("cat,dog,mouse"))
# sorry, str('').join(untrusted.string(...)) won't work
# but let's make sure we get an exception
# to be certain that an untrusted.string doesn't ever leak into a normal str
try:
_ = ''.join(untrusted.string("hello"))
raise AssertionError
except TypeError:
pass # expected
try:
_ = ''.join(customstring("hello"))
raise AssertionError
except TypeError:
pass # expected
# x.reversed()
assert same(''.join(reversed("cat")), "tac")
assert same(untrusted.string('').join(reversed(untrusted.string("cat"))), untrusted.string("tac"))
# iteration
for i in "cat":
assert same(i, "c") or same(i, "a") or same(i, "t")
for i in untrusted.string("cat"):
assert same(i, untrusted.string("c")) or same(i, untrusted.string("a")) or same(i, untrusted.string("t"))
# string methods
# https://docs.python.org/3.4/library/stdtypes.html#string-methods
# str.capitalize()
assert same("cAt".capitalize(), "Cat")
assert same(untrusted.string("cAt").capitalize(), untrusted.string("Cat"))
# str.casefold()
assert same("CatΓ".casefold(), "catss")
assert same(untrusted.string("CatΓ").casefold(), untrusted.string("catss"))
# str.center(width[, fillchar])
assert same("cat".center(7), " cat ")
assert same(untrusted.string("cat").center(7), untrusted.string(" cat "))
assert same("cat".center(7, "-"), "--cat--")
assert same(untrusted.string("cat").center(7, "-"), untrusted.string("--cat--"))
assert same(untrusted.string("cat").center(7, untrusted.string("-")), untrusted.string("--cat--"))
# str.count(sub[, start[, end]])
assert "dogcatmousecat".count("cat", 0, 3) == 0
assert "dogcatmousecat".count("cat", 3, 6) == 1
assert "dogcatmousecat".count("cat", 3) == 2
assert untrusted.string("dogcatmousecat").count("cat", 0, 3) == 0
assert untrusted.string("dogcatmousecat").count("cat", 3, 6) == 1
assert untrusted.string("dogcatmousecat").count("cat", 3) == 2
assert untrusted.string("dogcatmousecat").count(untrusted.string("cat"), 0, 3) == 0
assert untrusted.string("dogcatmousecat").count(untrusted.string("cat"), 3, 6) == 1
assert untrusted.string("dogcatmousecat").count(untrusted.string("cat"), 3) == 2
# str.encode
# disabled on purpose for untrusted.string!!!
assert same("cat".encode("ascii"), b"cat")
try:
_ = untrusted.string("cat").encode("ascii")
raise AssertionError
except TypeError:
pass # expected!
# str.endswith(suffix[, start[, end]])
assert "catdogmouse".endswith("mouse")
assert untrusted.string("catdogmouse").endswith("mouse")
assert untrusted.string("catdogmouse").endswith(untrusted.string("mouse"))
assert not "catdogmouse".endswith("cat")
assert not untrusted.string("catdogmouse").endswith("cat")
assert not untrusted.string("catdogmouse").endswith(untrusted.string("cat"))
assert "catdogmouse".endswith("dog", 0, 6)
assert untrusted.string("catdogmouse").endswith("dog", 0, 6)
assert untrusted.string("catdogmouse").endswith(untrusted.string("dog"), 0, 6)
assert not "catdogmouse".endswith("dog", 4)
assert not untrusted.string("catdogmouse").endswith("dog", 4)
assert not untrusted.string("catdogmouse").endswith(untrusted.string("dog"), 4)
# str.expandtabs(tabsize=8)
assert same("\tHello\tworld!".expandtabs(), " Hello world!")
assert same(untrusted.string("\tHello\tworld!").expandtabs(), untrusted.string(" Hello world!"))
# str.find(sub[, start[, end]])
assert "dogcatmouse".find("cat") == 3
assert untrusted.string("dogcatmouse").find("cat") == 3
assert untrusted.string("dogcatmouse").find(untrusted.string("cat")) == 3
assert "dogcatmouse".find("cat", 4) == -1
assert untrusted.string("dogcatmouse").find("cat", 4) == -1
assert untrusted.string("dogcatmouse").find(untrusted.string("cat"), 4) == -1
# str.format(*args, **kwargs)
# with numeric argument:
assert same(
"Hello {0}, UserID: {1}".format("Sarah", 123),
"Hello Sarah, UserID: 123"
)
assert same(
untrusted.string("Hello {0}, UserID: {1}").format("Sarah", 123),
untrusted.string("Hello Sarah, UserID: 123")
)
assert same(
untrusted.string("Hello {0}, UserID: {1}").format(untrusted.string("Sarah"), 123),
untrusted.string("Hello Sarah, UserID: 123")
)
# ensure untrusted.string never leaks into a str...
try:
_ = "Hello {0}, UserID: {1}".format(untrusted.string("Sarah"), 123),
raise AssertionError
except TypeError:
pass # expected!
# with named arguments:
assert same(
"Hello {name}, UserID: {uid}".format(name="Sarah", uid=123),
"Hello Sarah, UserID: 123"
)
assert same(
untrusted.string("Hello {name}, UserID: {uid}").format(name="Sarah", uid=123),
untrusted.string("Hello Sarah, UserID: 123")
)
assert same(
untrusted.string("Hello {name}, UserID: {uid}").format(name=untrusted.string("Sarah"), uid=123),
untrusted.string("Hello Sarah, UserID: 123")
)
# str.format_map(mapping)
assert same(
"Hello {name}, UserID: {uid}".format_map({"name": "Sarah", "uid": 123}),
"Hello Sarah, UserID: 123"
)
assert same(
untrusted.string("Hello {name}, UserID: {uid}").format_map({"name": "Sarah", "uid": 123}),
untrusted.string("Hello Sarah, UserID: 123")
)
assert same(
untrusted.string("Hello {name}, UserID: {uid}").format_map({"name": untrusted.string("Sarah"), "uid": "123"}),
untrusted.string("Hello Sarah, UserID: 123")
)
# advanced! format_map with an untrusted.mapping!!
myUntrustedDict = untrusted.mapping({'name': 'Sarah', "uid": "123"})
assert same(
untrusted.string("Hello {name}, UserID: {uid}").format_map(myUntrustedDict),
untrusted.string("Hello Sarah, UserID: 123")
)
# An untrusted mapping with untrusted keys is not allowed to format a string
# This is by design!
myUntrustedDict = untrusted.mappingOf(untrusted.string, untrusted.string)({'name': 'Sarah', "uid": "123"})
try:
assert same(
untrusted.string("Hello {name}, UserID: {uid}").format_map(myUntrustedDict),
untrusted.string("Hello Sarah, UserID: 123")
)
raise AssrtionError
except TypeError:
pass # expected
# ensure untrusted.mapping never leaks into a str...
try:
_ = "Hello {name}, UserID: {uid}".format_map(myUntrustedDict),
raise AssertionError
except TypeError:
pass # expected!
# str.index(sub[, start[, end]])
# "Like find(), but raise ValueError when the substring is not found."
assert "dogcatmouse".index("cat") == 3
assert untrusted.string("dogcatmouse").index("cat") == 3
assert untrusted.string("dogcatmouse").index(untrusted.string("cat")) == 3
try:
_ = "dogcatmouse".index("tiger")
raise AssertionError
except ValueError:
pass # expected
try:
_ = untrusted.string("dogcatmouse").index("tiger")
raise AssertionError
except ValueError:
pass # expected
try:
_ = untrusted.string("dogcatmouse").index(untrusted.string("tiger"))
raise AssertionError
except ValueError:
pass # expected
try:
_ = "dogcatmouse".index("cat", 4)
raise AssertionError
except ValueError:
pass # expected
try:
_ = untrusted.string("dogcatmouse").index("cat", 4)
raise AssertionError
except ValueError:
pass # expected
try:
_ = untrusted.string("dogcatmouse").index(untrusted.string("cat"), 4)
raise AssertionError
except ValueError:
pass # expected
# str.isalnum()
assert "cat".isalnum()
assert untrusted.string("cat").isalnum()
assert not "Β£123".isalnum()
assert not untrusted.string("Β£123").isalnum()
# str.isalpha()
assert "cat".isalpha()
assert untrusted.string("cat").isalpha()
assert not "123".isalpha()
assert not untrusted.string("123").isalpha()
# str.isdecimal()
assert "123".isdecimal()
assert untrusted.string("123").isdecimal()
assert not "cat".isdecimal()
assert not untrusted.string("cat").isdecimal()
# str.isdigit()
assert "2Β²".isdigit()
assert untrusted.string("2Β²").isdigit()
# str.isidentifier()
assert "hello".isidentifier()
assert untrusted.string("hello").isidentifier()
assert not "123".isidentifier()
assert not untrusted.string("123").isidentifier()
# str.islower()
assert "hello".islower()
assert untrusted.string("hello").islower()
assert not "Hello".islower()
assert not untrusted.string("Hello").islower()
# str.isnumeric()
assert "123".isnumeric()
assert untrusted.string("123").isnumeric()
assert not "hello".isnumeric()
assert not untrusted.string("hello").isnumeric()
# str.isprintable()
assert "123".isprintable()
assert untrusted.string("123").isprintable()
assert not "\01".isprintable()
assert not untrusted.string("\01").isprintable()
# str.isspace()
assert " \t\r\n".isspace()
assert untrusted.string(" \t\r\n").isspace()
assert not "cat".isspace()
assert not untrusted.string("cat").isspace()
# str.istitle()
assert "Hello World".istitle()
assert untrusted.string("Hello World").istitle()
assert not "hello world".istitle()
assert not untrusted.string("hello world").istitle()
# str.isupper()
assert "CAT".isupper()
assert untrusted.string("CAT").isupper()
assert not "cat".isupper()
assert not untrusted.string("cat").isupper()
# str.join(iterable) - done
# str.ljust(width[, fillchar])
assert same("CAT".ljust(8, "-"), "CAT-----")
assert same(untrusted.string("CAT").ljust(8, "-"), untrusted.string("CAT-----"))
# str.lower()
assert same("Cat".lower(), "cat")
assert same(untrusted.string("Cat").lower(), untrusted.string("cat"))
# str.lstrip([chars])
assert same(" cat".lstrip(), "cat")
assert same(untrusted.string(" cat".lstrip()), untrusted.string("cat"))
assert same(" cat".lstrip(" ca"), "t")
assert same(untrusted.string(" cat").lstrip(" ca"), untrusted.string("t"))
assert same(untrusted.string(" cat").lstrip(untrusted.string(" ca")), untrusted.string("t"))
assert same(untrusted.string(" cat").lstrip(customstring(" ca")), untrusted.string("t"))
# str.partition(sep)
# no result
parts = "cat,dog,mouse".partition("X")
a, b, c = parts
assert same(a, "cat,dog,mouse")
assert same(b, "")
assert same(c, "")
parts = untrusted.string("cat,dog,mouse").partition("X")
a, b, c = parts
assert same(a, untrusted.string("cat,dog,mouse"))
assert same(b, untrusted.string(""))
assert same(c, untrusted.string(""))
parts = untrusted.string("cat,dog,mouse").partition(untrusted.string("X"))
a, b, c = parts
assert same(a, untrusted.string("cat,dog,mouse"))
assert same(b, untrusted.string(""))
assert same(c, untrusted.string(""))
parts = customstring("cat,dog,mouse").partition(untrusted.string("X"))
a, | |
0xF0, 0x00, # OOOO OOOO
0x10, 0x80, 0x00, # O O
0x10, 0x80, 0x00, # O O
0x08, 0x80, 0x00, # O O
0x0C, 0xFF, 0x80, # OO OOOOOOOOO
0x06, 0x81, 0x00, # OO O O
0x03, 0x82, 0x00, # OOO O
0x01, 0x82, 0x00, # OO O
0x00, 0xC1, 0x00, # OO O
0x00, 0x3F, 0x80, # OOOOOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @8625 'Γ' (17 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x30, 0x00, # OO
0x00, 0x78, 0x00, # OOOO
0x00, 0xCC, 0x00, # OO OO
0x01, 0x86, 0x00, # OO OO
0x03, 0x03, 0x00, # OO OO
0x07, 0x87, 0x80, # OOOO OOOO
0x00, 0x84, 0x00, # O O
0x00, 0x84, 0x00, # O O
0x00, 0x88, 0x00, # O O
0xFF, 0x98, 0x00, # OOOOOOOOO OO
0x40, 0xB0, 0x00, # O O OO
0x20, 0xE0, 0x00, # O OOO
0x20, 0xC0, 0x00, # O OO
0x41, 0x80, 0x00, # O OO
0xFE, 0x00, 0x00, # OOOOOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @8694 'Γ' (17 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x3F, 0x80, # OOOOOOO
0x00, 0xC1, 0x00, # OO O
0x01, 0x82, 0x00, # OO O
0x03, 0x82, 0x00, # OOO O
0x06, 0x81, 0x00, # OO O O
0x0C, 0xFF, 0x80, # OO OOOOOOOOO
0x08, 0x80, 0x00, # O O
0x10, 0x80, 0x00, # O O
0x10, 0x80, 0x00, # O O
0xF0, 0xF0, 0x00, # OOOO OOOO
0x60, 0x60, 0x00, # OO OO
0x30, 0xC0, 0x00, # OO OO
0x19, 0x80, 0x00, # OO OO
0x0F, 0x00, 0x00, # OOOO
0x06, 0x00, 0x00, # OO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @8763 'Γ' (17 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0xFE, 0x00, 0x00, # OOOOOOO
0x41, 0x80, 0x00, # O OO
0x20, 0xC0, 0x00, # O OO
0x20, 0xE0, 0x00, # O OOO
0x40, 0xB0, 0x00, # O O OO
0xFF, 0x98, 0x00, # OOOOOOOOO OO
0x00, 0x88, 0x00, # O O
0x00, 0x84, 0x00, # O O
0x00, 0x84, 0x00, # O O
0x07, 0x87, 0x80, # OOOO OOOO
0x03, 0x03, 0x00, # OO OO
0x01, 0x86, 0x00, # OO OO
0x00, 0xCC, 0x00, # OO OO
0x00, 0x78, 0x00, # OOOO
0x00, 0x30, 0x00, # OO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @8832 'Γ' (20 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0xD0, 0x00, 0x70, # OO O OOO
0xD8, 0x01, 0xF0, # OO OO OOOOO
0xDE, 0x03, 0xC0, # OO OOOO OOOO
0x67, 0x07, 0x70, # OO OOO OOO OOO
0x63, 0x0E, 0x60, # OO OO OOO OO
0x31, 0x8C, 0xE0, # OO OO OO OOO
0x39, 0x99, 0xC0, # OOO OO OO OOO
0x1E, 0xC7, 0x80, # OOOO OO OOOO
0x06, 0xDF, 0x00, # OO OO OOOOO
0x02, 0xDC, 0x00, # O OO OOO
0x01, 0xDC, 0x00, # OOO OOO
0x07, 0xDB, 0x00, # OOOOO OO OO
0x0E, 0x1B, 0x80, # OOO OO OOO
0x19, 0xCD, 0xC0, # OO OOO OO OOO
0x31, 0x8C, 0xC0, # OO OO OO OO
0x63, 0x06, 0x60, # OO OO OO OO
0x67, 0x07, 0x60, # OO OOO OOO OO
0xDE, 0x03, 0xB0, # OO OOOO OOO OO
0xF8, 0x01, 0xB0, # OOOOO OO OO
0xE0, 0x00, 0x70, # OOO OOO
# @8901 'Γ' (21 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0xFF, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOOO
0xAF, 0xFF, 0x88, # O O OOOOOOOOOOOOO O
0x93, 0xFE, 0x08, # O O OOOOOOOOO O
0x91, 0xFC, 0x78, # O O OOOOOOO OOOO
0xCC, 0xF9, 0x88, # OO OO OOOOO OO O
0xCE, 0x73, 0x98, # OO OOO OOO OOO OO
0xE6, 0x63, 0x38, # OOO OO OO OO OOO
0xF3, 0x24, 0x78, # OOOO OO O O OOOO
0xF9, 0x38, 0xF8, # OOOOO O OOO OOOOO
0xFD, 0x23, 0xF8, # OOOOOO O O OOOOOOO
0xFF, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOOO
0xFF, 0x2B, 0xF8, # OOOOOOOO O O OOOOOOO
0xF8, 0x24, 0xF8, # OOOOO O O OOOOO
0xF1, 0xE4, 0x78, # OOOO OOOO O OOOO
0xE3, 0x32, 0x38, # OOO OO OO O OOO
0xC6, 0x73, 0x18, # OO OO OOO OO OO
0xCC, 0xF9, 0x98, # OO OO OOOOO OO OO
0x99, 0xFC, 0x88, # O OO OOOOOOO O O
0xE3, 0xFE, 0x48, # OOO OOOOOOOOO O O
0x8F, 0xFF, 0x48, # O OOOOOOOOOOOO O O
0xFF, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOOO
# @8970 'Γ' (20 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x07, 0x80, 0xC0, # OOOO OO
0x1F, 0xE3, 0xF0, # OOOOOOOO OOOOOO
0x3C, 0x77, 0xC0, # OOOO OOO OOOOO
0x30, 0x1F, 0x00, # OO OOOOO
0x70, 0x3C, 0x00, # OOO OOOO
0x60, 0x70, 0x00, # OO OOO
0x60, 0xE0, 0x00, # OO OOO
0x61, 0x8C, 0x00, # OO OO OO
0x63, 0x0E, 0x00, # OO OO OOO
0x66, 0x07, 0x00, # OO OO OOO
0x6C, 0x03, 0x00, # OO OO OO
0x28, 0x03, 0x00, # O O OO
0x30, 0x03, 0x00, # OO OO
0x37, 0x07, 0x00, # OO OOO OOO
0x73, 0xFE, 0x00, # OOO OOOOOOOOO
0x70, 0xFC, 0x00, # OOO OOOOOO
0xF0, 0x00, 0x00, # OOOO
0xF0, 0x00, 0x00, # OOOO
0xF0, 0x00, 0x00, # OOOO
0x60, 0x00, 0x00, # OO
# @9039 'Γ' (20 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x70, 0x00, 0x00, # OOO
0xFC, 0x7F, 0x00, # OOOOOO OOOOOOO
0xFF, 0xFF, 0xC0, # OOOOOOOOOOOOOOOOOO
0x7F, 0x01, 0xE0, # OOOOOOO OOOO
0x00, 0xC0, 0x60, # OO OO
0x02, 0x60, 0x70, # O OO OOO
0x06, 0x30, 0x30, # OO OO OO
0x06, 0x18, 0x30, # OO OO OO
0x0C, 0x0C, 0x30, # OO OO OO
0x0C, 0x06, 0x60, # OO OO OO
0x0C, 0x07, 0x60, # OO OOO OO
0x0C, 0x03, 0xC0, # OO OOOO
0x0C, 0x19, 0x80, # OO OO OO
0x0E, 0x39, 0xC0, # OOO OOO OOO
0x07, 0xF0, 0xE0, # OOOOOOO OOO
0x03, 0xE0, 0xE0, # OOOOO OOO
0x00, 0x00, 0x70, # OOO
0x00, 0x00, 0x70, # OOO
0x00, 0x00, 0x20, # O
0x00, 0x00, 0x20, # O
# @9108 'Γ' (20 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x40, 0x00, 0x00, # O
0x40, 0x00, 0x00, # O
0xE0, 0x00, 0x00, # OOO
0xE0, 0x00, 0x00, # OOO
0x70, 0x7C, 0x00, # OOO OOOOO
0x70, 0xFE, 0x00, # OOO OOOOOOO
0x39, 0xC7, 0x00, # OOO OOO OOO
0x19, 0x83, 0x00, | |
@Cached(expire=10.)
def get_user_filters(self):
"""
returns a name:filter dictionary
"""
import json
prop = self.get_db_property('PANIC','UserFilters')
prop = [t.split(':',1) for t in prop]
return dict((t[0],unicode2str(json.loads(t[1]))) for t in prop)
def set_user_filters(self,filters,overwrite=True):
"""
filters should be a name:filter dictionary
"""
import json
assert isMapping(filters),'Should be a dictionary!'
if not overwrite:
prevs = self.get_user_filters()
prevs.update(filters)
filters = prevs
value = []
for name,f in filters.items():
value.append('%s:%s'%(name,f if isString(f) else json.dumps(f)))
self.put_db_property('PANIC','UserFilters',value)
def get_global_receivers(self,tag='',renew=False):
try:
if (renew or self._global_receivers[-1]<time.time()-3600):
prop = getPanicProperty('GlobalReceivers')
self._global_receivers = (prop,time.time())
else:
prop = self._global_receivers[0]
if not tag:
return prop
else:
prop = [p.split(':',1) for p in prop]
rows = []
for line in prop:
mask = (line[0] if len(line)>1 else '*').split(',')
neg = [m[1:] for m in mask if m.startswith('!')]
if neg and any(matchCl(m,tag) for m in neg):
continue
pos = [m for m in mask if not m.startswith('!')]
if not pos or any(matchCl(m,tag) for m in pos):
rows.append(line[-1])
return ','.join(rows)
except:
print('>>> Exception at get_global_receivers(%s)'%tag)
traceback.print_exc()
return ''
GROUP_EXP = fandango.tango.TangoEval.FIND_EXP.replace('FIND','GROUP')
def group_macro(self,match):
"""
For usage details see:
https://github.com/tango-controls/PANIC/
blob/documentation/doc/recipes/AlarmsHierarchy.rst
"""
match,cond = match.split(';',1) if ';' in match else (match,'')
#if '/' not in match and self._eval._locals.get('DEVICE',None):
#match = self._eval._locals['DEVICE']+'/'+match
exps = match.split(',')
attrs = []
for e in exps:
if '/' in e:
attrs.extend(d+'/'+a
for dev,attr in [e.rsplit('/',1)]
for d,dd in self.devices.items()
for a in dd.alarms
if matchCl(dev,d) and matchCl(attr,a))
else:
attrs.extend(self[a].get_attribute(full=True)
for a in self if matchCl(e,a))
if not cond:
attrs = [m+'.delta' for m in attrs]
cond = 'x > 0'
exp = 'any([%s for x in [ %s ]])'%(cond,' , '.join(attrs))
return exp
def split_formula(self,formula,keep_operators=False):
f = self[formula].formula if formula in self else formula
i,count,buff,final = 0,0,'',[]
while i<len(f):
s = f[i]
if s in '([{': count+=1
if s in ')]}': count-=1
if not count and s in ' \t':
if f[i:i+4].strip().lower() == 'or':
nx = 'or'
i+=len(nx)+2
elif f[i:i+5].strip().lower() == 'and':
nx = 'and'
i+=len(nx)+2
else:
nx = ''
if nx:
final.append(buff.strip())
if keep_operators:
final.append(nx)
buff = ''
continue
buff+=s
i+=1
nx=''
return final
def parse_alarms(self, formula):
"""
Searches for alarm tags used in the formula
"""
alnum = '(?:^|[^/a-zA-Z0-9-_])([a-zA-Z0-9-_]+)'#(?:$|[^/a-zA-Z0-9-_])'
#It's redundant to check for the terminal character, re already does it
var = re.findall(alnum,formula)
#print '\tparse_alarms(%s): %s'%(formula,var)
return [a for a in self.keys() if a in var]
def replace_alarms(self, formula):
"""
Replaces alarm tags by its equivalent device/alarm attributes
"""
try:
var = self.parse_alarms(formula)
#print 'replace_alarms(%s): %s'%(formula,var)
if var:
for l,a in reversed([(len(s),s) for s in var]):
x = '[^/a-zA-Z0-9-_\"\']'
x = '(?:^|%s)(%s)(?:$|%s)'%(x,a,x)
attr = self[a].device+'/'+a
m,new_formula = True,''
#print 'replacing %s by %s'%(a,attr)
while m:
m = re.search(x,formula)
if m:
start,end = m.start(),m.end()
if not formula.startswith(a): start+=1
if not formula.endswith(a): end-=1
new_formula += formula[:start]+attr
formula = formula[end:]
formula = new_formula+formula
return formula
except:
print('Exception in replace_alarms():%s'%traceback.format_exc())
return formula
def parse_attributes(self, formula, replace = True):
""" Returns all tango attributes that appear in a formula """
if formula in self.alarms: formula = self.alarms[formula].formula
formula = getattr(formula,'formula',formula)
attributes = self._eval.parse_variables(self.replace_alarms(formula)
if replace else formula)
return sorted('%s/%s'%(t[:2]) for t in attributes)
def evaluate(self, formula, device=None,timeout=1000,_locals=None,
_raise=True):
#Returns the result of evaluation on formula
#Both result and attribute values are kept!,
#be careful to not generate memory leaks
try:
if formula.strip().lower() in ('and','or'):
return None
if device and not check_device_cached(device):
device = None
if device and device in self.devices:
d = self.devices[device].get()
t = d.get_timeout_millis()
d.set_timeout_millis(timeout)
try:
r = d.evaluateFormula(formula)
return r
except Exception,e:
raise e
finally:
d.set_timeout_millis(t)
else:
self._eval.set_timeout(timeout)
self._eval.update_locals({'PANIC':self})
if _locals: self._eval.update_locals(_locals)
formula = self.replace_alarms(formula)
self.debug('AlarmAPI.evaluate(%s,%s)'%(formula,_locals))
return self._eval.eval(formula,_raise=_raise)
except Exception,e:
return e
def get(self,tag='',device='',attribute='',receiver='', severity='',
alarms = None,limit=0,strict=False):
"""
Gets alarms matching the given filters
(tag,device,attribute,receiver,severity)
"""
result=[]
alarms = alarms or self.values()
m = fn.parse_tango_model(tag)
if m:
tag = m.attribute
device = m.device
if limit==1 and tag in self.alarms:
found = [self[tag]]
else:
filters = {'tag':tag,'device':device,'attribute':attribute,
'receivers':receiver,'severity':severity}
if strict:
found = [a for a in alarms if
all([getattr(a,f)==v for f,v in filters.items() if v])]
else:
found = self.filter_alarms(filters,alarms)
if not limit: return found
elif limit==1: return found[0]
else: return found[:limit]
#if tag and not tag.endswith('$'): tag+='$'
#if attribute and not attribute.endswith('$'): attribute+='$'
#if device and not device.endswith('$'): device+='$'
##if receiver and not receiver.startswith('%'): receiver='%'+receiver
#if severity and not severity.endswith('$'): severity+='$'
#for alarm in (alarms or self.alarms.values()):
#if ((not tag or searchCl(tag,alarm.tag)) and
#(not device or searchCl(device,alarm.device)) and
#(not attribute or searchCl(attribute,alarm.formula)) and
#(not receiver or receiver in alarm.receivers) and
#(not severity or searchCl(severity,alarm.severity))):
#result.append(alarm)
#return result
def get_basic_alarms(self):
"""
Children are those alarms that have no alarms below or have
a higher alarm that depends from them.
"""
self.log('Getting Alarm children ...')
result=[]
for a,v in self.items():
children = self.parse_alarms(v.formula)
if children:
result.extend(children)
else:
result.append(a)
result = set(result)
return [v for a,v in self.items() if a in result]
def filter_alarms(self, filters, alarms = None):
"""
filters must be a dictionary: {filter:regexp}
alarms must be a list of alarm objects
regexps accept '!' to exclude a certain match
Tries to apply all default filters:
'tag','name',
'device','active','severity','regexp','receivers'
'formula','attribute','history','failed','hierarchy'
"""
alarms = alarms or self.values()
filters = filters or {}
if isString(filters): filters = {'regexp':filters}
exclude = []
self.log('AlarmAPI.filter_alarms(%s)'%filters)
for f,r in filters.items():
if f in ('name','alarm'): f = 'tag'
if not r: continue
result = []
for a in alarms:
ok = False
if isString(a): a = self[a]
if f == 'regexp':
## Regexp will be used to explicitly reject an alarm
regexp = r.split(',')
for e in regexp:
n,e = '!' in e,e.strip('!')
s = str(map(str,a.to_dict().values()))
m = searchCl(e,s)
if m and n: exclude.append(a.tag)
elif m and not n: ok = True
elif not m and n and len(regexp)==1: ok = True
if a.tag in exclude: continue
if f == 'attribute':
attrs = self.parse_attributes(a.formula)
if any(searchCl(r,t,0,1) for t in attrs): ok = True
elif f == 'hierarchy':
r = r.upper()
is_top = self.parse_alarms(a.formula)
if not xor(is_top,r == 'TOP'): ok = True
elif f == 'severity':
r,s = r.upper().strip(),a.severity.upper().strip()
s = s or DEFAULT_SEVERITY
if SEVERITIES[s]>=SEVERITIES[r]: ok = True
elif f == 'receivers':
v = self.parse_phonebook(a.receivers)
if searchCl(r,v,0,1): ok = True
else:
v = getattr(a,f,'')
if isString(v):
if v and searchCl(r,v,0,1): ok = True
else: ok = not xor(isFalse(r),isFalse(v))
if ok: result.append(a)
alarms = result
return alarms
def filter_hierarchy(self, rel, alarms = None):
"""
TOP are those alarms which state is evaluated using other Alarms values.
BOTTOM are those alarms that have no alarms below or
have a TOP alarm that depends from them.
"""
return self.filter_alarms({'hierarchy':rel})
def filter_severity(self, sev, alarms = None):
return self.filter_alarms({'severity':sev})
def get_states(self,tag='',device=''):
device = device.lower()
if tag:
if not tag in self.alarms: return None
return self.alarms[tag].get_active()
elif device:
if device not in self.devices: return {}
d = self.devices[device]
try:
dp = d.get()
if dp.ping():
als = sorted(self.devices[device].alarms.keys())
ats = [self.alarms[a].get_attribute() for a in als]
vals = [v.value for v in dp.read_attributes(ats)]
return dict((a,t) for a,t in zip(als,vals))
else:
raise Exception('')
except Exception,e:
print 'device %s is not running'%device
traceback.print_exc()
[setattr(self.alarms[a],'active',None) for a in d.alarms]
return dict((a,None) for a in d.alarms)
else:
vals = dict()
[vals.update(self.get_states(device=d)) for d in self.devices]
return vals
def get_configs(self,tag='*'):
result = {}
for alarm in self.get(tag):
reks = self.parse_phonebook(alarm.receivers)
result[alarm.tag] = {
'Device':alarm.device,
'Severity':alarm.severity,
'Snap':'SNAP' in reks,
'Email':'@' in reks,
'Action':'ACTION' in reks,
'SMS':'SMS' in reks,
}
result[alarm.tag].update((k,v)
for k,v in self.devices[alarm.device].get_config().items()
if k in ALARM_CONFIG)
return result
def get_admins_for_alarm(self,alarm=''):
users = filter(bool,
getPanicProperty('PanicAdminUsers'))
if users:
if alarm:
users = users+[r.strip().split('@')[0] for r in
self.parse_phonebook(self[alarm].receivers).split(',')
if '@' in r]
return users
def add(self,tag,device,formula='',description='',receivers='',
severity=DEFAULT_SEVERITY, load=True, config=None,overwrite=False):
| |
folds - 1:
if exog is None:
pred = forecaster.predict_interval(
steps = steps,
last_window = last_window_y,
interval = interval,
n_boot = n_boot,
random_state = random_state,
in_sample_residuals = in_sample_residuals
)
else:
pred = forecaster.predict_interval(
steps = steps,
last_window = last_window_y,
exog = next_window_exog,
interval = interval,
n_boot = n_boot,
random_state = random_state,
in_sample_residuals = in_sample_residuals
)
else:
if remainder == 0:
if exog is None:
pred = forecaster.predict_interval(
steps = steps,
last_window = last_window_y,
interval = interval,
n_boot = n_boot,
random_state = random_state,
in_sample_residuals = in_sample_residuals
)
else:
pred = forecaster.predict_interval(
steps = steps,
last_window = last_window_y,
exog = next_window_exog,
interval = interval,
n_boot = n_boot,
random_state = random_state,
in_sample_residuals = in_sample_residuals
)
else:
# Only the remaining steps need to be predicted
steps = remainder
if exog is None:
pred = forecaster.predict_interval(
steps = steps,
last_window = last_window_y,
interval = interval,
n_boot = n_boot,
random_state = random_state,
in_sample_residuals = in_sample_residuals
)
else:
pred = forecaster.predict_interval(
steps = steps,
last_window = last_window_y,
exog = next_window_exog,
interval = interval,
n_boot = n_boot,
random_state = random_state,
in_sample_residuals = in_sample_residuals
)
backtest_predictions.append(pred)
backtest_predictions = pd.concat(backtest_predictions)
if isinstance(backtest_predictions, pd.Series):
backtest_predictions = pd.DataFrame(backtest_predictions)
metric_value = metric(
y_true = y.iloc[initial_train_size : initial_train_size + len(backtest_predictions)],
y_pred = backtest_predictions['pred']
)
return np.array([metric_value]), backtest_predictions
def backtesting_forecaster(
forecaster,
y: pd.Series,
steps: int,
metric: str,
initial_train_size: Optional[int],
exog: Optional[Union[pd.Series, pd.DataFrame]]=None,
refit: bool=False,
interval: Optional[list]=None,
n_boot: int=500,
random_state: int=123,
in_sample_residuals: bool=True,
verbose: bool=False,
set_out_sample_residuals: Any='deprecated'
) -> Tuple[np.array, pd.DataFrame]:
'''
Backtesting of forecaster model.
If `refit` is False, the model is trained only once using the `initial_train_size`
first observations. If `refit` is True, the model is trained in each iteration
increasing the training set. A copy of the original forecaster is created so
it is not modified during the process.
Parameters
----------
forecaster : ForecasterAutoreg, ForecasterAutoregCustom, ForecasterAutoregMultiOutput
Forecaster model.
y : pandas Series
Training time series values.
initial_train_size: int, default `None`
Number of samples in the initial train split. If `None` and `forecaster` is already
trained, no initial train is done and all data is used to evaluate the model. However,
the first `len(forecaster.last_window)` observations are needed to create the
initial predictors, so no predictions are calculated for them.
`None` is only allowed when `refit` is False.
steps : int
Number of steps to predict.
metric : {'mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error'}
Metric used to quantify the goodness of fit of the model.
exog :panda Series, pandas DataFrame, default `None`
Exogenous variable/s included as predictor/s. Must have the same
number of observations as `y` and should be aligned so that y[i] is
regressed on exog[i].
refit: bool, default False
Whether to re-fit the forecaster in each iteration.
interval: list, default `None`
Confidence of the prediction interval estimated. Sequence of percentiles
to compute, which must be between 0 and 100 inclusive. If `None`, no
intervals are estimated. Only available for forecaster of type ForecasterAutoreg
and ForecasterAutoregCustom.
n_boot: int, default `500`
Number of bootstrapping iterations used to estimate prediction
intervals.
random_state: int, default 123
Sets a seed to the random generator, so that boot intervals are always
deterministic.
in_sample_residuals: bool, default `True`
If `True`, residuals from the training data are used as proxy of
prediction error to create prediction intervals. If `False`, out_sample_residuals
are used if they are already stored inside the forecaster.
set_out_sample_residuals: 'deprecated'
Deprecated since version 0.4.2, will be removed on version 0.5.0.
verbose : bool, default `False`
Print number of folds and index of training and validation sets used for backtesting.
Returns
-------
metric_value: numpy ndarray shape (1,)
Value of the metric.
backtest_predictions: pandas DataFrame
Value of predictions and their estimated interval if `interval` is not `None`.
column pred = predictions.
column lower_bound = lower bound of the interval.
column upper_bound = upper bound interval of the interval.
'''
if initial_train_size is not None and initial_train_size > len(y):
raise Exception(
'If used, `initial_train_size` must be smaller than length of `y`.'
)
if initial_train_size is not None and initial_train_size < forecaster.window_size:
raise Exception(
f"`initial_train_size` must be greater than "
f"forecaster's window_size ({forecaster.window_size})."
)
if initial_train_size is None and not forecaster.fitted:
raise Exception(
'`forecaster` must be already trained if no `initial_train_size` is provided.'
)
if not isinstance(refit, bool):
raise Exception(
f'`refit` must be boolean: True, False.'
)
if initial_train_size is None and refit:
raise Exception(
f'`refit` is only allowed when there is a initial_train_size.'
)
if interval is not None and isinstance(forecaster, ForecasterAutoregMultiOutput):
raise Exception(
('Interval prediction is only available when forecaster is of type '
'ForecasterAutoreg or ForecasterAutoregCustom.')
)
if set_out_sample_residuals is not 'deprecated':
warnings.warn(
('`set_out_sample_residuals` is deprecated since version 0.4.2, '
'will be removed on version 0.5.0.')
)
if refit:
metric_value, backtest_predictions = _backtesting_forecaster_refit(
forecaster = forecaster,
y = y,
steps = steps,
metric = metric,
initial_train_size = initial_train_size,
exog = exog,
interval = interval,
n_boot = n_boot,
random_state = random_state,
in_sample_residuals = in_sample_residuals,
verbose = verbose
)
else:
metric_value, backtest_predictions = _backtesting_forecaster_no_refit(
forecaster = forecaster,
y = y,
steps = steps,
metric = metric,
initial_train_size = initial_train_size,
exog = exog,
interval = interval,
n_boot = n_boot,
random_state = random_state,
in_sample_residuals = in_sample_residuals,
verbose = verbose
)
return metric_value, backtest_predictions
def grid_search_forecaster(
forecaster,
y: pd.Series,
param_grid: dict,
initial_train_size: int,
steps: int,
metric: str,
exog: Optional[Union[pd.Series, pd.DataFrame]]=None,
lags_grid: Optional[list]=None,
refit: bool=False,
return_best: bool=True,
verbose: bool=True
) -> pd.DataFrame:
'''
Exhaustive search over specified parameter values for a Forecaster object.
Validation is done using time series backtesting.
Parameters
----------
forecaster : ForecasterAutoreg, ForecasterAutoregCustom, ForecasterAutoregMultiOutput
Forcaster model.
y : pandas Series
Training time series values.
param_grid : dict
Dictionary with parameters names (`str`) as keys and lists of parameter
settings to try as values.
initial_train_size: int
Number of samples in the initial train split.
steps : int
Number of steps to predict.
metric : {'mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error'}
Metric used to quantify the goodness of fit of the model.
exog : pandas Series, pandas DataFrame, default `None`
Exogenous variable/s included as predictor/s. Must have the same
number of observations as `y` and should be aligned so that y[i] is
regressed on exog[i].
lags_grid : list of int, lists, np.narray or range.
Lists of `lags` to try. Only used if forecaster is an instance of
`ForecasterAutoreg`.
refit: bool, default False
Whether to re-fit the forecaster in each iteration of backtesting.
return_best : bool
Refit the `forecaster` using the best found parameters on the whole data.
verbose : bool, default `True`
Print number of folds used for cv or backtesting.
Returns
-------
results: pandas DataFrame
Metric value estimated for each combination of parameters.
'''
if isinstance(forecaster, ForecasterAutoregCustom):
if lags_grid is not None:
warnings.warn(
'`lags_grid` ignored if forecaster is an instance of `ForecasterAutoregCustom`.'
)
lags_grid = ['custom predictors']
elif lags_grid is None:
lags_grid = [forecaster.lags]
lags_list = []
params_list = []
metric_list = []
param_grid = list(ParameterGrid(param_grid))
print(
f"Number of models compared: {len(param_grid)*len(lags_grid)}"
)
for lags in tqdm(lags_grid, desc='loop lags_grid', position=0, ncols=90):
if isinstance(forecaster, (ForecasterAutoreg, ForecasterAutoregMultiOutput)):
forecaster.set_lags(lags)
lags = forecaster.lags.copy()
for params in tqdm(param_grid, desc='loop param_grid', position=1, leave=False, ncols=90):
forecaster.set_params(**params)
metrics = backtesting_forecaster(
forecaster = forecaster,
y = y,
exog = exog,
initial_train_size = initial_train_size,
steps = steps,
metric = metric,
refit = refit,
interval = None,
verbose = verbose
)[0]
lags_list.append(lags)
params_list.append(params)
metric_list.append(metrics.mean())
results = pd.DataFrame({
'lags' : lags_list,
'params': params_list,
'metric': metric_list})
results = results.sort_values(by='metric', ascending=True)
results = pd.concat([results, results['params'].apply(pd.Series)], axis=1)
if return_best:
best_lags = results['lags'].iloc[0]
best_params = results['params'].iloc[0]
best_metric = results['metric'].iloc[0]
if isinstance(forecaster, (ForecasterAutoreg, ForecasterAutoregMultiOutput)):
forecaster.set_lags(best_lags)
forecaster.set_params(**best_params)
forecaster.fit(y=y, exog=exog)
print(
f"`Forecaster` refitted using the best-found lags and parameters, and the whole data set: \n"
f" Lags: {best_lags} \n"
f" Parameters: {best_params}\n"
f" Backtesting | |
value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def thickness(self):
"""field `Thickness`
| Units: m
| IP-Units: in
Args:
value (float): value for IDD Field `Thickness`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `thickness` or None if not set
"""
return self["Thickness"]
@thickness.setter
def thickness(self, value=None):
"""Corresponds to IDD field `Thickness`"""
self["Thickness"] = value
@property
def solar_index_of_refraction(self):
"""field `Solar Index of Refraction`
| value > 1.0
Args:
value (float): value for IDD Field `Solar Index of Refraction`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `solar_index_of_refraction` or None if not set
"""
return self["Solar Index of Refraction"]
@solar_index_of_refraction.setter
def solar_index_of_refraction(self, value=None):
"""Corresponds to IDD field `Solar Index of Refraction`"""
self["Solar Index of Refraction"] = value
@property
def solar_extinction_coefficient(self):
"""field `Solar Extinction Coefficient`
| Units: 1/m
Args:
value (float): value for IDD Field `Solar Extinction Coefficient`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `solar_extinction_coefficient` or None if not set
"""
return self["Solar Extinction Coefficient"]
@solar_extinction_coefficient.setter
def solar_extinction_coefficient(self, value=None):
"""Corresponds to IDD field `Solar Extinction Coefficient`"""
self["Solar Extinction Coefficient"] = value
@property
def visible_index_of_refraction(self):
"""field `Visible Index of Refraction`
| value > 1.0
Args:
value (float): value for IDD Field `Visible Index of Refraction`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `visible_index_of_refraction` or None if not set
"""
return self["Visible Index of Refraction"]
@visible_index_of_refraction.setter
def visible_index_of_refraction(self, value=None):
"""Corresponds to IDD field `Visible Index of Refraction`"""
self["Visible Index of Refraction"] = value
@property
def visible_extinction_coefficient(self):
"""field `Visible Extinction Coefficient`
| Units: 1/m
Args:
value (float): value for IDD Field `Visible Extinction Coefficient`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `visible_extinction_coefficient` or None if not set
"""
return self["Visible Extinction Coefficient"]
@visible_extinction_coefficient.setter
def visible_extinction_coefficient(self, value=None):
"""Corresponds to IDD field `Visible Extinction Coefficient`"""
self["Visible Extinction Coefficient"] = value
@property
def infrared_transmittance_at_normal_incidence(self):
"""field `Infrared Transmittance at Normal Incidence`
| value < 1.0
Args:
value (float): value for IDD Field `Infrared Transmittance at Normal Incidence`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `infrared_transmittance_at_normal_incidence` or None if not set
"""
return self["Infrared Transmittance at Normal Incidence"]
@infrared_transmittance_at_normal_incidence.setter
def infrared_transmittance_at_normal_incidence(self, value=None):
"""Corresponds to IDD field `Infrared Transmittance at Normal
Incidence`"""
self["Infrared Transmittance at Normal Incidence"] = value
@property
def infrared_hemispherical_emissivity(self):
"""field `Infrared Hemispherical Emissivity`
| Emissivity of front and back side assumed equal
| Default value: 0.84
| value < 1.0
Args:
value (float): value for IDD Field `Infrared Hemispherical Emissivity`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `infrared_hemispherical_emissivity` or None if not set
"""
return self["Infrared Hemispherical Emissivity"]
@infrared_hemispherical_emissivity.setter
def infrared_hemispherical_emissivity(self, value=0.84):
"""Corresponds to IDD field `Infrared Hemispherical Emissivity`"""
self["Infrared Hemispherical Emissivity"] = value
@property
def conductivity(self):
"""field `Conductivity`
| Units: W/m-K
| Default value: 0.9
Args:
value (float): value for IDD Field `Conductivity`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `conductivity` or None if not set
"""
return self["Conductivity"]
@conductivity.setter
def conductivity(self, value=0.9):
"""Corresponds to IDD field `Conductivity`"""
self["Conductivity"] = value
@property
def dirt_correction_factor_for_solar_and_visible_transmittance(self):
"""field `Dirt Correction Factor for Solar and Visible Transmittance`
| Default value: 1.0
| value <= 1.0
Args:
value (float): value for IDD Field `Dirt Correction Factor for Solar and Visible Transmittance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `dirt_correction_factor_for_solar_and_visible_transmittance` or None if not set
"""
return self[
"Dirt Correction Factor for Solar and Visible Transmittance"]
@dirt_correction_factor_for_solar_and_visible_transmittance.setter
def dirt_correction_factor_for_solar_and_visible_transmittance(
self,
value=1.0):
"""Corresponds to IDD field `Dirt Correction Factor for Solar and
Visible Transmittance`"""
self[
"Dirt Correction Factor for Solar and Visible Transmittance"] = value
@property
def solar_diffusing(self):
"""field `Solar Diffusing`
| Default value: No
Args:
value (str): value for IDD Field `Solar Diffusing`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `solar_diffusing` or None if not set
"""
return self["Solar Diffusing"]
@solar_diffusing.setter
def solar_diffusing(self, value="No"):
"""Corresponds to IDD field `Solar Diffusing`"""
self["Solar Diffusing"] = value
class WindowMaterialGas(DataObject):
""" Corresponds to IDD object `WindowMaterial:Gas`
Gas material properties that are used in Windows or Glass Doors
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'gas type',
{'name': u'Gas Type',
'pyname': u'gas_type',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Air',
u'Argon',
u'Krypton',
u'Xenon',
u'Custom'],
'autocalculatable': False,
'type': 'alpha'}),
(u'thickness',
{'name': u'Thickness',
'pyname': u'thickness',
'minimum>': 0.0,
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'm'}),
(u'conductivity coefficient a',
{'name': u'Conductivity Coefficient A',
'pyname': u'conductivity_coefficient_a',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'W/m-K'}),
(u'conductivity coefficient b',
{'name': u'Conductivity Coefficient B',
'pyname': u'conductivity_coefficient_b',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'W/m-K2'}),
(u'conductivity coefficient c',
{'name': u'Conductivity Coefficient C',
'pyname': u'conductivity_coefficient_c',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'W/m-K3'}),
(u'viscosity coefficient a',
{'name': u'Viscosity Coefficient A',
'pyname': u'viscosity_coefficient_a',
'minimum>': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'kg/m-s'}),
(u'viscosity coefficient b',
{'name': u'Viscosity Coefficient B',
'pyname': u'viscosity_coefficient_b',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'kg/m-s-K'}),
(u'viscosity coefficient c',
{'name': u'Viscosity Coefficient C',
'pyname': u'viscosity_coefficient_c',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'kg/m-s-K2'}),
(u'specific heat coefficient a',
{'name': u'Specific Heat Coefficient A',
'pyname': u'specific_heat_coefficient_a',
'minimum>': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'J/kg-K'}),
(u'specific heat coefficient b',
{'name': u'Specific Heat Coefficient B',
'pyname': u'specific_heat_coefficient_b',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'J/kg-K2'}),
(u'specific heat coefficient c',
{'name': u'Specific Heat Coefficient C',
'pyname': u'specific_heat_coefficient_c',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'J/kg-K3'}),
(u'molecular weight',
{'name': u'Molecular Weight',
'pyname': u'molecular_weight',
'maximum': 200.0,
'required-field': False,
'autosizable': False,
'minimum': 20.0,
'autocalculatable': False,
'type': u'real',
'unit': u'g/mol'}),
(u'specific heat ratio',
{'name': u'Specific Heat Ratio',
'pyname': u'specific_heat_ratio',
'minimum>': 1.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'})]),
'format': None,
'group': u'Surface Construction Elements',
'min-fields': 3,
'name': u'WindowMaterial:Gas',
'pyname': u'WindowMaterialGas',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def gas_type(self):
"""field `Gas Type`
Args:
value (str): value for IDD Field `Gas Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `gas_type` or None if not set
"""
return self["Gas Type"]
@gas_type.setter
def gas_type(self, value=None):
"""Corresponds to IDD field `Gas Type`"""
self["Gas Type"] = value
@property
def thickness(self):
"""field `Thickness`
| Units: m
| IP-Units: in
Args:
value (float): value for IDD Field `Thickness`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `thickness` or None if not set
"""
return self["Thickness"]
@thickness.setter
def thickness(self, value=None):
"""Corresponds to IDD field `Thickness`"""
self["Thickness"] = value
@property
def conductivity_coefficient_a(self):
"""field `Conductivity Coefficient A`
| Used only if Gas Type = Custom
| Units: W/m-K
Args:
value (float): value for IDD Field `Conductivity Coefficient A`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `conductivity_coefficient_a` or None if not set
"""
return self["Conductivity Coefficient A"]
@conductivity_coefficient_a.setter
def conductivity_coefficient_a(self, value=None):
"""Corresponds to IDD field `Conductivity Coefficient A`"""
self["Conductivity Coefficient A"] = value
@property
def conductivity_coefficient_b(self):
"""field `Conductivity Coefficient B`
| Used only if Gas Type = Custom
| Units: W/m-K2
Args:
value (float): value for IDD Field `Conductivity Coefficient B`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value | |
<filename>gryphon/lib/exchange/itbit_btc_usd.py<gh_stars>1000+
"""
Exchange documentation: https://api.itbit.com/docs
"""
# -*- coding: utf-8 -*-
import base64
from collections import OrderedDict, defaultdict
import hashlib
import hmac
import json
import time
import urllib
import cdecimal
from cdecimal import Decimal
from gryphon.lib.exchange import exceptions
from gryphon.lib.exchange import order_types
from gryphon.lib.exchange.exchange_api_wrapper import ExchangeAPIWrapper
from gryphon.lib.logger import get_logger
from gryphon.lib.models.exchange import Balance
from gryphon.lib.money import Money
from gryphon.lib.time_parsing import parse
logger = get_logger(__name__)
class ItbitBTCUSDExchange(ExchangeAPIWrapper):
def __init__(self, session=None, configuration=None):
super(ItbitBTCUSDExchange, self).__init__(session)
self.name = u'ITBIT_BTC_USD'
self.friendly_name = u'Itbit BTC-USD'
self.base_url = 'https://api.itbit.com/v1'
self.currency = 'USD'
self.bid_string = 'buy'
self.ask_string = 'sell'
self.nonce = 1
# Configurables with defaults.
self.market_order_fee = Decimal('0.002')
self.limit_order_fee = Decimal('0')
self.fee = self.market_order_fee
self.fiat_balance_tolerance = Money('0.0001', 'USD')
self.volume_balance_tolerance = Money('0.00000001', 'BTC')
self.max_tick_speed = 1
self.min_order_size = Money('0', 'BTC')
self.use_cached_orderbook = False
if configuration:
self.configure(configuration)
@property
def wallet_id(self):
try:
self._wallet_id
except AttributeError:
self._wallet_id = self._load_env('ITBIT_BTC_USD_WALLET_ID')
return self._wallet_id
def req(self, req_method, url, **kwargs):
# Our auth_request method expects the params in the url.
assert '?' not in url
if 'params' in kwargs:
if kwargs['params']: # Check that it's not empty.
url += '?' + urllib.urlencode(kwargs['params'])
del kwargs['params']
req = super(ItbitBTCUSDExchange, self).req(req_method, url, **kwargs)
return req
def resp(self, req):
response = super(ItbitBTCUSDExchange, self).resp(req)
if 'error' in response and response['error']:
raise exceptions.ExchangeAPIErrorException(self, response['error'])
if 'code' in response:
errors_string = str(response['description'])
error_code = int(response['code'])
if error_code == 81001:
raise exceptions.InsufficientFundsError()
elif error_code == 10002:
raise exceptions.NonceError()
elif error_code == 81002:
raise exceptions.CancelOrderNotFoundError()
else:
raise exceptions.ExchangeAPIErrorException(
self,
'Code %s: %s' % (
error_code, errors_string,
))
return response
def all_trades(self, page=1):
req = self.all_trades_req(page)
return self.all_trades_resp(req)
def all_trades_req(self, page=1):
params = {}
if page:
params['page'] = page
return self.req(
'get',
'/wallets/%s/trades' % self.wallet_id,
params=params,
)
def all_trades_resp(self, req):
response = self.resp(req)
return response['tradingHistory']
def trades_for_orders(self, order_ids):
req = self.trades_for_orders_req()
return self.trades_for_orders_resp(req, order_ids)
def trades_for_orders_req(self):
return self.all_trades_req()
def trades_for_orders_resp(self, req, order_ids):
order_ids = [str(o) for o in order_ids]
trades = self.all_trades_resp(req)
matching_trades = defaultdict(list)
for trade in trades:
oid = str(trade['orderId'])
if oid in order_ids:
matching_trades[oid].append(trade)
return matching_trades
def all_orders(self, status=None, page=1):
req = self.all_orders_req(status, page)
return self.all_orders_resp(req)
def all_orders_req(self, status=None, page=1):
params = {}
if status:
params['status'] = status
if page:
params['page'] = page
return self.req(
'get',
'/wallets/%s/orders' % self.wallet_id,
params=params,
)
def all_orders_resp(self, req):
raw_orders = self.resp(req)
orders = []
for raw_order in raw_orders:
mode = self._order_mode_to_const(raw_order['side'])
volume = Money(raw_order['amount'], 'BTC')
volume_filled = Money(raw_order['amountFilled'], 'BTC')
volume_remaining = volume - volume_filled
order = {
'mode': mode,
'id': str(raw_order['id']),
'price': Money(raw_order['price'], 'USD'),
'volume': volume,
'volume_remaining': volume_remaining,
'status': raw_order['status']
}
orders.append(order)
return orders
# Common Exchange Methods
def auth_request(self, req_method, url, request_args):
"""
This modifies request_args.
"""
try:
self.api_key
self.secret
except AttributeError:
self.api_key = self._load_env('ITBIT_BTC_USD_API_KEY')
self.secret = self._load_env('ITBIT_BTC_USD_API_SECRET').encode('utf-8')
timestamp = int(round(time.time() * 1000))
nonce = self.nonce
body = ''
if 'data' in request_args:
body = json.dumps(request_args['data'])
request_args['data'] = body
message = self._auth_create_message(req_method, url, body, nonce, timestamp)
sig = self._auth_sign_message(message, nonce, url, self.secret)
if 'headers' not in request_args:
request_args['headers'] = {}
headers = request_args['headers']
headers['Authorization'] = self.api_key + ':' + sig
headers['X-Auth-Timestamp'] = str(timestamp)
headers['X-Auth-Nonce'] = str(nonce)
headers['Content-Type'] = 'application/json'
def _auth_create_message(self, verb, url, body, nonce, timestamp):
return json.dumps(
[verb.upper(), url, body, str(nonce), str(timestamp)],
separators=(',', ':'),
)
def _auth_sign_message(self, message, nonce, url, api_secret):
sha256_hash = hashlib.sha256()
nonced_message = str(nonce) + message
sha256_hash.update(nonced_message)
hash_digest = sha256_hash.digest()
msg_to_hmac = url.encode('utf8') + hash_digest
hmac_digest = hmac.new(api_secret, msg_to_hmac, hashlib.sha512).digest()
sig = base64.b64encode(hmac_digest)
return sig
def get_balance_req(self):
try:
self.user_id
except AttributeError:
self.user_id = self._load_env('ITBIT_BTC_USD_USER_ID')
return self.req('get', '/wallets/%s' % self.wallet_id)
def get_balance_resp(self, req):
response = self.resp(req)
raw_balances = response['balances']
btc_available = None
usd_available = None
for raw_balance in raw_balances:
if raw_balance['currency'] == 'XBT':
btc_available = Money(raw_balance['availableBalance'], 'BTC')
elif raw_balance['currency'] == 'USD':
usd_available = Money(raw_balance['availableBalance'], 'USD')
if btc_available is None or usd_available is None:
raise exceptions.ExchangeAPIErrorException(
self,
'missing expected balances',
)
balance = Balance()
balance['BTC'] = btc_available
balance['USD'] = usd_available
return balance
def get_ticker_req(self, verify=True):
return self.req(
'get',
'/markets/XBTUSD/ticker',
no_auth=True,
verify=verify,
)
def get_ticker_resp(self, req):
response = self.resp(req)
return {
'high': Money(response['high24h'], 'USD'),
'low': Money(response['low24h'], 'USD'),
'last': Money(response['lastPrice'], 'USD'),
'volume': Money(response['volume24h'], 'BTC')
}
def _get_orderbook_from_api_req(self, verify=True):
return self.req(
'get',
'/markets/XBTUSD/order_book',
no_auth=True,
verify=verify,
)
def place_order_req(self, mode, volume, price=None, order_type=order_types.LIMIT_ORDER):
side = self._order_mode_from_const(mode)
if price.currency != 'USD':
raise ValueError('price must be in USD')
if volume.currency != 'BTC':
raise ValueError('volume must be in BTC')
# Truncate the volume instead of rounding it because it's better# to trade too
# little than too much.
volume = volume.round_to_decimal_places(8, rounding=cdecimal.ROUND_DOWN)
volume_str = '%.8f' % volume.amount
price_str = '%.2f' % price.amount
payload = {
'type': 'limit',
'currency': 'XBT',
'side': side,
'amount': volume_str,
'price': price_str,
'instrument': 'XBTUSD'
}
return self.req(
'post',
'/wallets/%s/orders/' % self.wallet_id,
data=payload,
)
def place_order_resp(self, req):
response = self.resp(req)
try:
order_id = str(response['id'])
return {'success': True, 'order_id': order_id}
except KeyError:
raise exceptions.ExchangeAPIErrorException(
self,
'response does not contain an order id',
)
def get_open_orders_req(self):
return self.all_orders_req(status='open')
def get_open_orders_resp(self, req):
open_orders = self.all_orders_resp(req)
for o in open_orders:
del o['status']
return open_orders
def get_order_details(self, order_id):
req = self.get_order_details_req()
return self.get_order_details_resp(req, order_id)
def get_order_details_req(self):
return self.get_multi_order_details_req()
def get_order_details_resp(self, req, order_id):
return self.get_multi_order_details_resp(req, [order_id])[order_id]
def get_multi_order_details(self, order_ids):
req = self.get_multi_order_details_req()
return self.get_multi_order_details_resp(req, order_ids)
def get_multi_order_details_req(self):
return self.trades_for_orders_req()
def get_multi_order_details_resp(self, req, order_ids):
# This is modeled after Bitstamp, where we get the order details from the
# trades endpoint directly. The caveat is that order_details will only work
# for the most recent 50 trades. Since we are always accounting trades right
# after they happen, this should be ok (and also affects Bitstamp).
order_ids = [str(o) for o in order_ids]
multi_trades = self.trades_for_orders_resp(req, order_ids)
data = {}
for order_id in order_ids:
total_usd = Money('0', 'USD')
total_btc = Money('0', 'BTC')
our_trades = []
our_type = None
if order_id in multi_trades:
trades = multi_trades[order_id]
for t in trades:
assert(t['currency1'] == 'XBT')
btc_amount = Money(t['currency1Amount'], 'BTC')
assert(t['currency2'] == 'USD')
usd_amount = Money(t['currency2Amount'], 'USD')
# This might also come back as XBT, but since ItBit has 0-fee
# trading right now, I can't tell.
assert(t['commissionCurrency'] == 'USD')
fee = Money(t['commissionPaid'], 'USD')
total_usd += usd_amount
total_btc += btc_amount
our_type = self._order_mode_to_const(t['direction'])
our_trades.append({
'time': parse(t['timestamp']).epoch,
'trade_id': None,
'fee': fee,
'btc': btc_amount,
'fiat': usd_amount,
})
time_created = None
if our_trades:
time_created = min([t['time'] for t in our_trades])
data[order_id] = {
'time_created': time_created,
'type': our_type,
'btc_total': total_btc,
'fiat_total': total_usd,
'trades': our_trades
}
return data
def cancel_order_req(self, order_id):
return self.req(
'delete',
'/wallets/%s/orders/%s' % (self.wallet_id, order_id),
)
def cancel_order_resp(self, req):
# In the success case, no response is given but we need to call resp() so it
# can catch any error cases.
response = self.resp(req) # noqa
return {'success': True}
def withdraw_crypto_req(self, address, volume):
if not isinstance(address, basestring):
raise TypeError('Withdrawal address must be a string')
if not isinstance(volume, Money) or volume.currency != self.volume_currency:
raise TypeError('Withdrawal volume must be in %s' % self.volume_currency)
volume_str = '%.8f' % volume.amount
payload = {
'currency': 'XBT',
'amount': volume_str,
'address': address,
}
return self.req(
'post',
'/wallets/%s/cryptocurrency_withdrawals' % self.wallet_id,
data=payload,
)
def withdraw_crypto_resp(self, req):
response = self.resp(req)
return {'success': True, 'exchange_withdrawal_id': response['withdrawalId']}
def get_order_audit_data(self, skip_recent=0, page=1):
"""
Returns an OrderedDict of order ids mapped to their filled volume (only include
orders that have some trades).
Dropped the skip_recent flag because we don't seem to be using it anywhere.
"""
if skip_recent != 0:
raise ValueEror('skip_recent is deprecated')
orders = OrderedDict()
trades_to_audit = self.all_trades(page=page)
for trade in trades_to_audit:
order_id = str(trade['orderId'])
assert(trade['currency1'] == 'XBT')
trade_amount = abs(Money(trade['currency1Amount'], 'BTC'))
try:
orders[order_id] += trade_amount
except KeyError:
orders[order_id] = trade_amount
# Remove the oldest 2 orders, because its trades might be wrapped around a
# page gap and this would give us an innacurate volume_filled number.
# We need to remove 2 because there could be an ask and a bid.
try:
orders.popitem()
orders.popitem()
except KeyError:
pass
return orders
def fiat_deposit_fee(self, deposit_amount):
return Money('5', 'USD')
def fiat_withdrawal_fee(self, withdrawal_amount):
"""
Itbit fee is from their documentation, and an extra $15 is being charged to us
before it shows up in our bank account (as of the September 2016), so I assume
that's an intermediary fee.
The fee should be a flat $50 on | |
from typing import Dict
from botocore.paginate import Paginator
class ListApps(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Amplify.Client.list_apps`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListApps>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'apps': [
{
'appId': 'string',
'appArn': 'string',
'name': 'string',
'tags': {
'string': 'string'
},
'description': 'string',
'repository': 'string',
'platform': 'IOS'|'ANDROID'|'WEB'|'REACT_NATIVE',
'createTime': datetime(2015, 1, 1),
'updateTime': datetime(2015, 1, 1),
'iamServiceRoleArn': 'string',
'environmentVariables': {
'string': 'string'
},
'defaultDomain': 'string',
'enableBranchAutoBuild': True|False,
'enableBasicAuth': True|False,
'basicAuthCredentials': 'string',
'customRules': [
{
'source': 'string',
'target': 'string',
'status': 'string',
'condition': 'string'
},
],
'productionBranch': {
'lastDeployTime': datetime(2015, 1, 1),
'status': 'string',
'thumbnailUrl': 'string',
'branchName': 'string'
},
'buildSpec': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
Result structure for an Amplify App list request.
- **apps** *(list) --*
List of Amplify Apps.
- *(dict) --*
Amplify App represents different branches of a repository for building, deploying, and hosting.
- **appId** *(string) --*
Unique Id for the Amplify App.
- **appArn** *(string) --*
ARN for the Amplify App.
- **name** *(string) --*
Name for the Amplify App.
- **tags** *(dict) --*
Tag for Amplify App.
- *(string) --*
- *(string) --*
- **description** *(string) --*
Description for the Amplify App.
- **repository** *(string) --*
Repository for the Amplify App.
- **platform** *(string) --*
Platform for the Amplify App.
- **createTime** *(datetime) --*
Create date / time for the Amplify App.
- **updateTime** *(datetime) --*
Update date / time for the Amplify App.
- **iamServiceRoleArn** *(string) --*
IAM service role ARN for the Amplify App.
- **environmentVariables** *(dict) --*
Environment Variables for the Amplify App.
- *(string) --*
- *(string) --*
- **defaultDomain** *(string) --*
Default domain for the Amplify App.
- **enableBranchAutoBuild** *(boolean) --*
Enables auto-building of branches for the Amplify App.
- **enableBasicAuth** *(boolean) --*
Enables Basic Authorization for branches for the Amplify App.
- **basicAuthCredentials** *(string) --*
Basic Authorization credentials for branches for the Amplify App.
- **customRules** *(list) --*
Custom redirect / rewrite rules for the Amplify App.
- *(dict) --*
Custom rewrite / redirect rule.
- **source** *(string) --*
The source pattern for a URL rewrite or redirect rule.
- **target** *(string) --*
The target pattern for a URL rewrite or redirect rule.
- **status** *(string) --*
The status code for a URL rewrite or redirect rule.
- **condition** *(string) --*
The condition for a URL rewrite or redirect rule, e.g. country code.
- **productionBranch** *(dict) --*
Structure with Production Branch information.
- **lastDeployTime** *(datetime) --*
Last Deploy Time of Production Branch.
- **status** *(string) --*
Status of Production Branch.
- **thumbnailUrl** *(string) --*
Thumbnail Url for Production Branch.
- **branchName** *(string) --*
Branch Name for Production Branch.
- **buildSpec** *(string) --*
BuildSpec content for Amplify App.
- **NextToken** *(string) --*
A token to resume pagination.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListBranches(Paginator):
def paginate(self, appId: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Amplify.Client.list_branches`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListBranches>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
appId='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'branches': [
{
'branchArn': 'string',
'branchName': 'string',
'description': 'string',
'tags': {
'string': 'string'
},
'stage': 'PRODUCTION'|'BETA'|'DEVELOPMENT'|'EXPERIMENTAL',
'displayName': 'string',
'enableNotification': True|False,
'createTime': datetime(2015, 1, 1),
'updateTime': datetime(2015, 1, 1),
'environmentVariables': {
'string': 'string'
},
'enableAutoBuild': True|False,
'customDomains': [
'string',
],
'framework': 'string',
'activeJobId': 'string',
'totalNumberOfJobs': 'string',
'enableBasicAuth': True|False,
'thumbnailUrl': 'string',
'basicAuthCredentials': 'string',
'buildSpec': 'string',
'ttl': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
Result structure for list branches request.
- **branches** *(list) --*
List of branches for an Amplify App.
- *(dict) --*
Branch for an Amplify App, which maps to a 3rd party repository branch.
- **branchArn** *(string) --*
ARN for a branch, part of an Amplify App.
- **branchName** *(string) --*
Name for a branch, part of an Amplify App.
- **description** *(string) --*
Description for a branch, part of an Amplify App.
- **tags** *(dict) --*
Tag for branch for Amplify App.
- *(string) --*
- *(string) --*
- **stage** *(string) --*
Stage for a branch, part of an Amplify App.
- **displayName** *(string) --*
Display name for a branch, part of an Amplify App.
- **enableNotification** *(boolean) --*
Enables notifications for a branch, part of an Amplify App.
- **createTime** *(datetime) --*
Creation date and time for a branch, part of an Amplify App.
- **updateTime** *(datetime) --*
Last updated date and time for a branch, part of an Amplify App.
- **environmentVariables** *(dict) --*
Environment Variables specific to a branch, part of an Amplify App.
- *(string) --*
- *(string) --*
- **enableAutoBuild** *(boolean) --*
Enables auto-building on push for a branch, part of an Amplify App.
- **customDomains** *(list) --*
Custom domains for a branch, part of an Amplify App.
- *(string) --*
- **framework** *(string) --*
Framework for a branch, part of an Amplify App.
- **activeJobId** *(string) --*
Id of the active job for a branch, part of an Amplify App.
- **totalNumberOfJobs** *(string) --*
Total number of Jobs part of an Amplify App.
- **enableBasicAuth** *(boolean) --*
Enables Basic Authorization for a branch, part of an Amplify App.
- **thumbnailUrl** *(string) --*
Thumbnail Url for the branch.
- **basicAuthCredentials** *(string) --*
Basic Authorization credentials for a branch, part of an Amplify App.
- **buildSpec** *(string) --*
BuildSpec content for branch for Amplify App.
- **ttl** *(string) --*
The content TTL for the website in seconds.
- **NextToken** *(string) --*
A token to resume pagination.
:type appId: string
:param appId: **[REQUIRED]**
Unique Id for an Amplify App.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListDomainAssociations(Paginator):
def paginate(self, appId: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Amplify.Client.list_domain_associations`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListDomainAssociations>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
appId='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
| |
#!/usr/bin/python
"""
Copyright (c) 2016 King Mongkut's University technology Thonburi
Author: <NAME>
Contact: <EMAIL>
Version: 1.3b 2017-03-01
License: MIT License
The MIT License
Copyright (c) 2016 King Mongkut's University technology Thonburi
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
version = "GenomeManagement_v1.3b"
import re
import gzip
import codecs
from operator import itemgetter, attrgetter
utf8 = codecs.getreader('UTF-8')
class Fasta_manager(object):
def __init__(self, fastaFile, show_genome_stat=False):
self.chromosomeLength = {}
self.chromosomeSeq = {}
self.chromosomeStatistics = {} # Length, GC, AT, N
count_bases = {'A': 0, 'T':0, 'C':0, 'G':0,
'R':0, 'S':0, 'W':0, 'K':0,
'M':0, 'B':0, 'D':0, 'H':0,
'V':0, 'Y':0, 'N': 0
}
if(fastaFile.find('.gz') > 0):
filegz = gzip.open(fastaFile, 'rb')
self.file = utf8(filegz)
else:
self.file = open(fastaFile, 'r')
fasta = self.file.read().split('>')
fasta = fasta[1:]
for chromosome in fasta:
if (chromosome[:50].find(' ') < 0):
header = chromosome[:chromosome[:50].find('\n')]
else:
header = chromosome[:chromosome[:50].find(' ')]
sequence = chromosome[chromosome.find('\n'):-1].replace('\n', '')
length = len(sequence)
self.chromosomeSeq[header] = sequence
self.chromosomeLength[header] = length
if show_genome_stat:
for i in sequence:
i = i.upper()
count_bases[i] += 1
if show_genome_stat:
print("Total sequence length:" , "{:0,}".format(sum(count_bases.values())))
print("Total ungapped length:" , "{:0,}".format(sum(count_bases.values()) - count_bases['N']))
print("Total spanned gaps:", "{:0,}".format(count_bases['N']))
print("Number of chromosomes/scaffolds/contigs: ", "{:0,}".format(len(fasta)))
sumGC = count_bases['G'] + count_bases['C'] + count_bases['S'] + count_bases['Y']/2 + count_bases['K']/2 + count_bases['M']/2 + count_bases['B']*2/3 + count_bases['D']/3 + count_bases['H']/3 + count_bases['V']*2/3 + count_bases['N']/2
print("GC content (%):", "{:0,.2f}".format(sumGC * 100 / sum(count_bases.values())))
print("N content (%):", "{:0,.2f}".format(count_bases['N'] * 100 / sum(count_bases.values())))
scaffold_len = sorted(self.chromosomeLength.values(), reverse=True)
half_sum_len = sum(scaffold_len)/2
sum_len = 0
i = 0
while i < len(scaffold_len) and sum_len < half_sum_len:
sum_len += scaffold_len[i]
i += 1
print("N50:", "{:0,}".format(scaffold_len[i-1]))
print("L50:", "{:0,}".format(i))
def checkChromosome(self, chromosome, start=0, end=1):
if(start>end):
print("Error: checkChromosome(chromosome, start, end) of", chromosome, "[" , start, "-", end ,"], the start position should be less than end")
return False
exit()
elif(chromosome in self.chromosomeLength):
if(end<=self.chromosomeLength[chromosome]):
return True
else:
print("Not found "+chromosome+" at "+ str(end) +", please try again. (the first nucleotide is position = 1)")
return False
else:
print("Not found "+chromosome+" ,please check chromosome again!!!")
return False
def getGCcontent(self, sequence):
GC = sequence.count('G') + sequence.count('C') + sequence.count('g') + sequence.count('c')
AT = sequence.count('A') + sequence.count('T') + sequence.count('a') + sequence.count('t')
return float(GC) * 100 / (AT + GC)
def getGC(self, sequence):
return sequence.count('G') + sequence.count('C') + sequence.count('g') + sequence.count('c')
def getStatisticSequence(self, sequence):
GC = sequence.count('G') + sequence.count('C') + sequence.count('g') + sequence.count('c')
AT = sequence.count('A') + sequence.count('T') + sequence.count('a') + sequence.count('t')
N = sequence.count('N') + sequence.count('n')
return [len(sequence), GC, AT, N, float(GC) * 100 / (AT + GC)]
def getStatisticSeqFromGenome(self, chromosome, start, end, strand):
seqLength = self.getChromosomeLength(chromosome)
if (start > 0 and start < seqLength + 1 and end < seqLength + 1):
if(strand == '+'):
return self.getStatisticSequence(self.chromosomeSeq[chromosome][start - 1:end])
else:
reverse = self.chromosomeSeq[chromosome][start - 1:end]
reverse = self.complementary(reverse[::-1])
return self.getStatisticSequence(reverse)
else:
print("Out of length in seq please check again")
print("chromosome", chromosome, "length:", seqLength)
print("gene position:", start, "to", end, "on", strand, "strand")
exit()
def getChromosomeLength(self, chromosome_name):
return self.chromosomeLength[chromosome_name]
def getSequence(self, chromosome, start, end, strand):
if self.checkChromosome(chromosome, start, end):
seqLength = self.getChromosomeLength(chromosome)
if (start > 0 and start < seqLength + 1 and end < seqLength + 1):
if(strand == '+'):
return self.chromosomeSeq[chromosome][start - 1:end]
else:
reverse = self.chromosomeSeq[chromosome][start - 1:end]
reverse = self.complementary(reverse[::-1])
return reverse
else:
return False
print("\nOut of chromosome length, please check again.")
print("Chromosome length:", seqLength)
print("Error command: getSequence(", chromosome, start, end, strand, ")", sep=', ')
else:
return ""
def getChrSequence(self, chromosome):
return self.chromosomeSeq[chromosome]
def complementary(self, seq):
new = ""
for base in seq:
if(base == 'A'):
new = new + 'T'
elif(base == 'T'):
new = new + 'A'
elif(base == 'G'):
new = new + 'C'
elif(base == 'C'):
new = new + 'G'
elif(base == 'a'):
new = new + 't'
elif(base == 't'):
new = new + 'a'
elif(base == 'g'):
new = new + 'c'
elif(base == 'c'):
new = new + 'g'
else:
new = new + base
return new
def searchSeqInChromosome(self, chromosome_name, pattern):
pattern = pattern.upper()
len_pattern = len(pattern)
index_found = []
# Search pattern in plus strand
index = self.chromosomeSeq[chromosome_name].find(pattern)
while(index > -1):
index_found.append([index + 1, index + len_pattern, '+'])
index = self.chromosomeSeq[chromosome_name].find(pattern, index + 1)
# Search pattern in minus strand
pattern = self.complementary(pattern)[::-1]
index = self.chromosomeSeq[chromosome_name].find(pattern)
while(index > -1):
index_found.append([index + 1, index + len_pattern, '-'])
index = self.chromosomeSeq[chromosome_name].find(pattern, index + 1)
# Return [fistMatch,endMatch,strand]
return index_found
def searchSeqInGenome(self, pattern):
pattern = pattern.upper()
len_pattern = len(pattern)
index_found = []
for chromosome_name, seq in sorted(self.chromosomeSeq.items()):
# Search pattern in plus strand
index = seq.find(pattern)
while(index > -1):
index_found.append([chromosome_name , index + 1, index + len_pattern, '+'])
index = seq.find(pattern, index + 1)
# Search pattern in minus strand
pattern = self.complementary(pattern)[::-1]
index = seq.find(pattern)
while(index > -1):
index_found.append([chromosome_name, index + 1, index + len_pattern, '-'])
index = seq.find(pattern, index + 1)
return index_found
class Gff_manager(object):
def __init__(self, file_name):
self.data = [] # all line
self.gene_struc = {} # {'gene1': [line1 ,line2, line 3]}
self.chromosome_contain_gene = {} # {[(chr1,'+')]: [..........]}
gene_name = ""
if(file_name.find('.gz') > 0):
filegz = gzip.open(file_name, 'rb')
gff_file = utf8(filegz)
else:
gff_file = open(file_name, 'r')
for line in gff_file:
if(line[0] != '#' and line != ''):
line = line.split()
line[3] = int(line[3])
line[4] = int(line[4])
line[8] = line[8].split(';')
self.data.append(line)
if(line[2] != 'gene'):
gene_annotation.append(line)
else:
if(gene_name != ''):
# gene_annotation = sorted(gene_annotation,key=itemgetter(3,4))
self.gene_struc[gene_name] = gene_annotation
gene_annotation = [line]
gene_name = line[8][1][5:]
# gene_annotation = sorted(gene_annotation,key=itemgetter(3,4))
self.gene_struc[gene_name] = gene_annotation
table = self.getTableSpecificType("gene")
table = sorted(table, key=itemgetter(0,6,3,4))
for line in table:
if (line[0],line[6]) in self.chromosome_contain_gene:
self.chromosome_contain_gene[(line[0],line[6])].append(line)
else:
self.chromosome_contain_gene[(line[0],line[6])]=[line]
for key, value in self.chromosome_contain_gene.items():
if (key[1] == '-'):
self.chromosome_contain_gene[key] = sorted(value, key=itemgetter(4,3), reverse=True)
def getNumgerOfGffLine(self):
return len(self.data)
def getTable(self):
return self.data
def getTableSpecificType(self, gene_struc_type):
table = []
for line in self.data:
if(line[2] == gene_struc_type):
table.append(line)
return table
def getTableSpecificTypeAndStrand(self, gene_struc_type, strand):
table = []
for line in self.data:
if(line[2] == gene_struc_type and line[6]==strand):
table.append(line)
return table
def printdata(self,type="five_prime_UTR"):
countLine = 0
for line in self.data:
if(line[2] == type):
print(line[0] + "\t" + line[2] + "\t" + str(line[3]) + "\t" + str(line[4]) + "\t" + line[6] + "\t" + line[8][0])
countLine += 1
def getTableDataOfGeneAndType(self, geneName,Type):
table = []
for i in self.gene_struc[geneName]:
if(i[2]==Type):
table.append(i)
return table
def getTableDataOfGene(self, geneName):
return self.gene_struc[geneName]
def getTranscripthave5UTR(self):
print("gene", "transcript", "label5UTR", "lengthOf5UTR", "strand", "start", "stop", sep='\t')
for line in self.data:
if(line[2] == 'gene'):
geneName = line[8][0][3:]
elif(line[2] == 'five_prime_UTR' or line[2] == '5-UTR'):
transcriptName = line[8][0][3:26]
label5UTR = line[8][0][-1:]
start5UTR = int(line[3])
stop5UTR = int(line[4])
len5UTR = stop5UTR - start5UTR + 1
strand = line[6]
print(geneName, transcriptName, label5UTR, len5UTR, strand, start5UTR, stop5UTR, sep='\t')
def getGeneList(self):
return sorted(list(self.gene_struc.keys()))
def getDataSpecificType(self,gene_component):
table = []
for line in self.data:
if(line[2] == gene_component):
table.append(line)
return table
def getTranscript(self):
for line in self.data:
if(line[2] == 'mRNA'):
print(line[8][0][3:])
def checkGene(self, gene_name):
if gene_name in list(self.gene_struc.keys()):
return True
else:
return False
def getGeneForward(self,gene_name):
# return end position of forward gene, if don't have forward gene return False
x = self.gene_struc[gene_name]
# for i in self.gene_struc[gene_name]:
# print(i)
chromosome = x[0][0]
strand = x[0][6]
start=x[0][3]
end=x[0][4]
table_gene = self.chromosome_contain_gene[(chromosome, strand)]
if(strand=="+"):
i=0
while(i < len(table_gene) and table_gene[i][3] < start):
i=i+1
i=i-1
if(i==-1):
return False
else:
return table_gene[i][4]
else:
i=0
while(i < len(table_gene) and end < table_gene[i][4]):
i=i+1
i=i-1
if(i==-1):
return False
else:
# print(gene_name, strand, start, end)
# print(table_gene[i])
return table_gene[i][3]
class Genome_manager(Fasta_manager, Gff_manager):
def __init__(self, fastaFile, GffFile):
self.fastaFile = fastaFile
Fasta_manager.__init__(self, fastaFile)
Gff_manager.__init__(self, GffFile)
self.list_of_gene_no_promoter = []
def getListOfGeneNoPromoter(self):
return self.list_of_gene_no_promoter
def getGCcontentInTranscript(self, type):
sumGC = 0
sumAT = 0
for line in self.data:
if(line[2] == type):
# print(line[8][0][3:], line[0], line[3], line[4] , line[6], sep='\t',end = '\t')
statistic = Fasta_manager.getStatisticSeqFromGenome(self, line[0], line[3], line[4] , line[6])
# print(statistic[0], statistic[1], statistic[2], sep='\t')
sumGC += statistic[1]
sumAT += statistic[2]
print("Summary GC content in", type, ":", float(sumGC) * 100 / (sumGC + sumAT))
def selectedTSSProtein(self, upstream, downstream):
file_write = open("%s_upstream_-%dto+%d.fa" % (self.fastaFile[:-6], upstream, downstream), 'w')
statistic_of_5_prime_length = []
geneListSelected = []
geneCount = 0
transcriptName = geneName = ''
five_prime_UTR = []
three_prime_UTR = []
CDS = []
count_five_prime_UTR_selected = 0
count_five_prime_UTR_total = 0
count_upstream_out_of_criteria = 0
count_seq = 0
for line in self.data:
if(line[2] == 'gene'):
geneName = line[8][0][3:]
geneCount += 1
elif(line[2] == 'mRNA'):
count_five_prime = len(five_prime_UTR)
if(count_five_prime > 0):
# Gene have five_prime_UTR
count_five_prime_UTR_selected += 1
count_five_prime_UTR_total += count_five_prime
if geneName not in geneListSelected:
geneListSelected.append(geneName)
if(five_prime_UTR[0][6] == '+'):
five_prime_UTR.sort(key=itemgetter (3, 4))
selected_five_prime = five_prime_UTR[count_five_prime - 1]
else:
five_prime_UTR.sort(key=itemgetter (4, 3))
selected_five_prime = five_prime_UTR[0]
sequence = Fasta_manager.getSequence(self, selected_five_prime[0], selected_five_prime[3], selected_five_prime[4], selected_five_prime[6])
statistic_of_5_prime_length.append(len(sequence))
# print(">", transcriptName, sep="")
# print(sequence)
text = self.getPromoterOfGene(upstream, downstream, selected_five_prime)
if(text == False):
count_upstream_out_of_criteria += 1
else:
file_write.writelines(text)
count_seq += 1
else:
# Gene have not five_prime_UTR
pass
transcriptName = line[8][0][3:]
five_prime_UTR = []
three_prime_UTR = []
CDS = []
elif(line[2] == 'five_prime_UTR' or line[2] == '5-UTR'):
five_prime_UTR.append(line)
elif(line[2] == 'tree_prime_UTR' or line[2] == '3-UTR'):
three_prime_UTR.append(line)
elif(line[2] == 'CDS'):
CDS.append(line)
# lastLine imporve data
count_five_prime = len(five_prime_UTR)
if(count_five_prime > 0):
count_five_prime_UTR_selected += 1
count_five_prime_UTR_total += count_five_prime
if geneName not in geneListSelected:
geneListSelected.append(geneName)
if(five_prime_UTR[0][6] == '+'):
five_prime_UTR.sort(key=itemgetter (3, 4))
selected_five_prime = five_prime_UTR[count_five_prime - 1]
else:
five_prime_UTR.sort(key=itemgetter (4, 3))
selected_five_prime = five_prime_UTR[0]
sequence = Fasta_manager.getSequence(self, selected_five_prime[0], selected_five_prime[3], selected_five_prime[4], selected_five_prime[6])
statistic_of_5_prime_length.append(len(sequence))
# print(">", transcriptName, sep="")
# print(sequence)
text = self.getPromoterOfGene(upstream, downstream, selected_five_prime)
if(text == False):
count_upstream_out_of_criteria += 1
else:
file_write.writelines(text)
count_seq += 1
# Get statistic
print("Statistic of genome", "%s_upstream_-%dto+%d.fa" % (self.fastaFile[:-6], upstream, downstream))
print("Number of annotated gene:", geneCount)
print("Number of 5'UTR of known gene:", len(geneListSelected))
print("Number of alternative 5'UTR transcript:", count_five_prime_UTR_total)
print("Number of selected 5'UTR transcript (unique):", count_five_prime_UTR_selected)
print("Upstream correct:", count_seq)
print("Upstream out of criteria:", count_upstream_out_of_criteria)
# Number of 5'UTR of selected transcript
def | |
<reponame>nikmagini/pilot<filename>HPC/EventServer/EventServerJobManager.py
import inspect
import commands
import os
import re
import signal
import sys
import time
import Queue
import multiprocessing
import subprocess
import threading
import json
import traceback
try:
import yampl
except:
print "Failed to import yampl: %s" % traceback.format_exc()
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from pandayoda.yodacore import Logger
from FileHandling import getCPUTimes
from signal_block.signal_block import block_sig, unblock_sig
class EventServerJobManager():
class MessageThread(threading.Thread):
def __init__(self, messageQ, socketname, context, **kwds):
threading.Thread.__init__(self, **kwds)
self.__log = Logger.Logger(filename='EventServiceManager.log')
self.__messageQ = messageQ
self._stop = threading.Event()
try:
self.__messageSrv = yampl.ServerSocket(socketname, context)
except:
self.__log.debug("Exception: failed to start yampl server socket: %s" % traceback.format_exc())
def send(self, message):
try:
self.__messageSrv.send_raw(message)
except:
self.__log.debug("Exception: failed to send yampl message: %s" % traceback.format_exc())
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def __del__(self):
if self.__messageSrv:
del self.__messageSrv
self.__messageSrv = None
def run(self):
try:
while True:
if self.stopped():
if self.__messageSrv:
del self.__messageSrv
self.__messageSrv = None
break
size, buf = self.__messageSrv.try_recv_raw()
if size == -1:
time.sleep(0.00001)
else:
self.__messageQ.put(buf)
except:
self.__log.debug("Exception: Message Thread failed: %s" % traceback.format_exc())
if self.__messageSrv:
del self.__messageSrv
self.__messageSrv = None
class HelperThread(threading.Thread):
def __init__(self, logger, helperFunc, **kwds):
threading.Thread.__init__(self, **kwds)
self.__log = logger
self.__func = helperFunc
self._stop = threading.Event()
self.__log.debug("HelperThread initialized.")
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def run(self):
try:
exec_time = None
while True:
if self.stopped():
break
if exec_time is None or exec_time < time.time() - 60:
self.__func()
exec_time = time.time()
time.sleep(1)
except:
self.__log.debug("Exception: HelperThread failed: %s" % traceback.format_exc())
def __init__(self, rank=None, ATHENA_PROC_NUMBER=1, workingDir=None):
self.__rank = rank
self.__name = "EventServerJobManager"
self.__eventRangeChannelName = "EventRangeChannel"
self.__eventRanges = []
self.__eventRangesStatus = {}
self.__outputMessage = []
self.__messageQueue = multiprocessing.Queue()
self.__messageInQueue = multiprocessing.Queue()
self.__messageThread = None
self.__TokenExtractorCmd = None
self.__TokenExtractorProcess = None
self.__athenaMPProcess = None
self.__athenaMP_isReady = False
self.__athenaMP_needEvents = 0
self.__pollTimeout = 5
self.__child_pid = None
self.__child_cpuTime = {}
if workingDir:
self.__log = Logger.Logger(filename=os.path.join(workingDir, 'EventServiceManager.log'))
else:
self.__log = Logger.Logger(filename='EventServiceManager.log')
self.__childProcs = []
self.__isKilled = False
self.__waitTerminate = False
self.__waitTerminateTime = 1800
self.__startTerminateTime = None
self.__noMoreEvents = False
self.__insertedMessages = 0
self.__ATHENA_PROC_NUMBER = int(ATHENA_PROC_NUMBER)
self.__numOutputs = 0
self.initSignalHandler()
self.__childRetStatus = 0
self.__retry = 0
self.__errEvent = False
# accounting
self.__startTime = time.time()
self.__readyForEventTime = None
self.__endTime = None
self.__startOSTimes = os.times()
self.__log.debug("Rank %s: startOSTimes: %s" % (self.__rank, self.__startOSTimes))
self.__endOSTimes = None
self.__totalQueuedEvents = 0
self.__totalProcessedEvents = 0
self.__cpuConsumptionTime = 0
self.__helperThread = None
def handler(self, signal, frame):
self.__log.debug("!!FAILED!!3000!! Signal %s is caught" % signal)
self.terminate()
sys.exit(-1)
def initSignalHandler(self):
#signal.signal(signal.SIGTERM, self.handler)
#signal.signal(signal.SIGQUIT, self.handler)
#signal.signal(signal.SIGSEGV, self.handler)
#signal.signal(signal.SIGINT, self.handler)
pass
def initEventRangeChannel(self):
self.__eventRangeChannelName = "EventService_EventRangeChannel_%s" % os.getpid()
def getEventRangeChannelName(self):
return self.__eventRangeChannelName
def getSetupTime(self):
if self.__readyForEventTime:
ret = self.__readyForEventTime - self.__startTime
else:
ret = time.time() - self.__startTime
return ret
def getTotalTime(self):
if self.__endTime:
ret = self.__endTime - self.__startTime
else:
ret = time.time() - self.__startTime
return ret
def getCPUConsumptionTimeFromProcPid(self, pid):
try:
if not os.path.exists(os.path.join('/proc/', str(pid), 'stat')):
return 0
with open(os.path.join('/proc/', str(pid), 'stat'), 'r') as pidfile:
proctimes = pidfile.readline()
# get utime from /proc/<pid>/stat, 14 item
utime = proctimes.split(' ')[13]
# get stime from proc/<pid>/stat, 15 item
stime = proctimes.split(' ')[14]
# count total process used time
proctotal = int(utime) + int(stime)
return(float(proctotal))
except:
# self.__log.debug("Rank %s: Failed to get cpu consumption time for pid %s: %s" % (self.__rank, pid, traceback.format_exc()))
return 0
def getCPUConsumptionTimeFromProc(self):
cpuConsumptionTime = 0L
try:
CLOCK_TICKS = os.sysconf("SC_CLK_TCK")
if self.__child_pid:
self.__childProcs = []
self.getChildren(self.__child_pid)
for process in self.__childProcs:
if process not in self.__child_cpuTime.keys():
self.__child_cpuTime[process] = 0
for process in self.__child_cpuTime.keys():
cpuTime = self.getCPUConsumptionTimeFromProcPid(process) / CLOCK_TICKS
if cpuTime > self.__child_cpuTime[process]:
self.__child_cpuTime[process] = cpuTime
cpuConsumptionTime += self.__child_cpuTime[process]
except:
self.__log.debug("Rank %s: Failed to get cpu consumption time from proc: %s" % (self.__rank, traceback.format_exc()))
return cpuConsumptionTime
def getCPUConsumptionTimeReal(self):
cpuConsumptionUnit, cpuConsumptionTime, cpuConversionFactor = getCPUTimes(os.getcwd())
self.__log.debug("Rank %s: cpuConsumptionTime: %s" % (self.__rank, cpuConsumptionTime))
self.__log.debug("Rank %s: start os.times: %s" % (self.__rank, self.__startOSTimes))
self.__log.debug("Rank %s: os.times: %s" % (self.__rank, os.times()))
if cpuConsumptionTime < 10:
endOSTimes = os.times()
if self.__endOSTimes:
endOSTimes = self.__endOSTimes
cpuConsumptionTime = endOSTimes[2] + endOSTimes[3] - self.__startOSTimes[2] - self.__startOSTimes[3]
if cpuConsumptionTime < 0:
cpuConsumptionTime = 0
procCPUConsumptionTime = self.getCPUConsumptionTimeFromProc()
self.__log.debug("Rank %s: cpuConsumptionTime from proc: %s" % (self.__rank, procCPUConsumptionTime))
if self.__isKilled or cpuConsumptionTime < procCPUConsumptionTime * 0.9:
cpuConsumptionTime = procCPUConsumptionTime
self.__log.debug("Rank %s: cpuConsumptionTime: %s" % (self.__rank, cpuConsumptionTime))
return cpuConsumptionTime
def getCPUConsumptionTime(self):
return self.__cpuConsumptionTime
def helperFunc(self):
self.__cpuConsumptionTime = self.getCPUConsumptionTimeReal()
def getCores(self):
return self.__ATHENA_PROC_NUMBER
def getProcessCPUHour(self):
return (self.getTotalTime() - self.getSetupTime()) * self.getCores()
def getTotalCPUHour(self):
return self.getTotalTime() * self.getCores()
def getTotalQueuedEvents(self):
return self.__totalQueuedEvents
def getTotalProcessedEvents(self):
return self.__totalProcessedEvents
def getAccountingMetrics(self):
return {"startTime": self.__startTime,
"readyTime": self.__readyForEventTime,
"endTime": self.__endTime,
"setupTime": self.getSetupTime(),
"runningTime": self.getTotalTime() - self.getSetupTime(),
"cores": self.getCores(),
#"processCPUHour": self.getProcessCPUHour(),
#"totalCPUHour": self.getTotalCPUHour(),
"cpuConsumptionTime": self.getCPUConsumptionTime(),
"queuedEvents": self.getTotalQueuedEvents(),
"processedEvents": self.getTotalProcessedEvents()}
def preSetup(self, preSetup):
if preSetup:
self.__log.debug("Rank %s: PreSetup: %s" % (self.__rank, preSetup))
status, output = commands.getstatusoutput(preSetup)
self.__log.debug("Rank %s: PreSetup status: %s, output: %s" % (self.__rank, status, output))
return status, output
else:
return 0, None
def postRun(self, postRun):
if postRun:
self.__log.debug("Rank %s: postRun: %s" % (self.__rank, postRun))
status, output = commands.getstatusoutput(postRun)
self.__log.debug("Rank %s: postRun status: %s, output: %s" % (self.__rank, status, output))
def initMessageThread(self, socketname='EventService_EventRanges', context='local'):
self.__log.debug("Rank %s: initMessageThread: socketname: %s, context: %s, workdir: %s" %(self.__rank, socketname, context, os.getcwd()))
try:
self.__messageThread = EventServerJobManager.MessageThread(self.__messageQueue, socketname, context)
self.__messageThread.start()
except:
self.__log.warning("Rank %s: Failed to initMessageThread: %s" % (self.__rank, str(traceback.format_exc())))
self.terminate()
def initTokenExtractorProcess(self, cmd):
self.__log.debug("Rank %s: initTokenExtractorProcess: %s, workdir: %s" % (self.__rank, cmd, os.getcwd()))
try:
self.__TokenExtractorCmd = cmd
if cmd:
self.__TokenExtractorProcess = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stdout, shell=True)
# self.__TokenExtractorProcess = subprocess.Popen(cmd, shell=True)
if self.__TokenExtractorProcess.poll() is not None:
self.__log.warning("Rank %s: Failed to initTokenExtractorProcess, poll is not None: %s" % (self.__rank, self.__TokenExtractorProcess.poll()))
self.terminate()
else:
self.__log.debug("Rank %s: TokenExtractor cmd(%s) is None, will not use it" % (self.__rank, cmd))
self.__TokenExtractorProcess = None
except:
self.__log.warning("Rank %s: Failed to initTokenExtractorProcess: %s" % (self.__rank, str(traceback.format_exc())))
self.terminate()
def initAthenaMPProcess(self, cmd):
self.__log.debug("Rank %s: initAthenaMPProcess: %s, workdir: %s" % (self.__rank, cmd, os.getcwd()))
try:
cmd = cmd.replace('PILOT_EVENTRANGECHANNEL_CHANGE_ME', self.getEventRangeChannelName())
self.__athenaMPProcess = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stdout, shell=True)
# self.__athenaMPProcess = subprocess.Popen(cmd, shell=True)
if self.__athenaMPProcess.poll() is not None:
self.__log.warning("Rank %s: Failed to initAthenaMPProcess, poll is not None: %s" % (self.__rank, self.__athenaMPProcess.poll()))
self.terminate()
except:
self.__log.warning("Rank %s: Failed to initAthenaMPProcess: %s" % (self.__rank, str(traceback.format_exc())))
self.terminate()
def init(self, socketname='EventService_EventRanges', context='local', athenaMPCmd=None, tokenExtractorCmd=None):
self.__childRetStatus = 0
child_pid = os.fork()
if child_pid == 0:
# child process
self.initEventRangeChannel()
self.initMessageThread(socketname=self.getEventRangeChannelName(), context=context)
self.initTokenExtractorProcess(tokenExtractorCmd)
self.initAthenaMPProcess(athenaMPCmd)
self.__log.debug("Rank %s: Child main loop start" % (self.__rank))
while True:
if self.isChildDead():
self.__log.warning("Rank %s: Child One Process in ESJobManager is dead." % self.__rank)
self.terminateChild()
break
try:
message = self.__messageInQueue.get(False)
self.__log.debug("Rank %s: Child get message: %s" % (self.__rank, message))
if "Stop_Message_Process" in message:
self.__log.debug("Rank %s: Child stop" % (self.__rank))
break
else:
self.__messageThread.send(message)
#self.__messageInQueue.task_done()
except Queue.Empty:
pass
except:
self.__log.debug("Rank %s: Child Exception: failed to send yampl message: %s" % (self.__rank, traceback.format_exc()))
self.__log.debug("Rank %s: Child main loop end" % (self.__rank))
self.terminateChild()
self.__log.debug("Rank %s: Child terminated" % (self.__rank))
# sys.exit(0)
os._exit(0)
else:
self.__child_pid = child_pid
self.__log.debug("Rank %s: Initialize helper thread" % (self.__rank))
self.__helperThread = EventServerJobManager.HelperThread(self.__log, self.helperFunc)
self.__helperThread.start()
return 0
def insertEventRange(self, message):
self.__log.debug("Rank %s: insertEventRange to ESJobManager: %s" % (self.__rank, message))
self.__eventRanges.append(message)
self.__athenaMP_needEvents -= 1
self.__insertedMessages += 1
if not "No more events" in message:
eventRangeID = message['eventRangeID']
if not eventRangeID in self.__eventRangesStatus:
self.__eventRangesStatus[eventRangeID] = {}
self.__eventRangesStatus[eventRangeID]['status'] = 'new'
#eventRanges= eval(message)
#for eventRange in eventRanges:
# eventRangeID = eventRange['eventRangeID']
# if not eventRangeID in self.__eventRangesStatus:
# self.__eventRangesStatus[eventRangeID] = {}
# self.__eventRangesStatus[eventRangeID]['status'] = 'new'
else:
self.__athenaMP_needEvents = 0
self.__noMoreEvents = True
def insertEventRanges(self, messages):
self.__log.debug("Rank %s: insertEventRanges to ESJobManager: %s" % (self.__rank, messages))
for message in messages:
self.__athenaMP_needEvents -= 1
self.__insertedMessages += 1
self.__eventRanges.append(message)
if not "No more events" in message:
eventRangeID = message['eventRangeID']
if not eventRangeID in self.__eventRangesStatus:
self.__eventRangesStatus[eventRangeID] = {}
self.__eventRangesStatus[eventRangeID]['status'] = 'new'
else:
self.__athenaMP_needEvents = 0
self.__noMoreEvents = True
def getEventRanges(self):
if len(self.__eventRanges) > 0:
eventRanges = self.__eventRanges.pop(0)
self.__log.debug("Rank %s: getEventRanges from ESJobManager(will send to AthenaMP): %s" % (self.__rank, eventRanges))
return eventRanges
return None
def sendEventRangeToAthenaMP(self, eventRanges):
block_sig(signal.SIGTERM)
if "No more events" in eventRanges:
self.__log.debug("Rank %s: sendEventRangeToAthenaMP: %s" % (self.__rank, eventRanges))
self.__messageInQueue.put(eventRanges)
else:
if type(eventRanges) is not list:
eventRanges = [eventRanges]
eventRangeFormat = json.dumps(eventRanges)
self.__log.debug("Rank %s: sendEventRangeToAthenaMP: %s" % (self.__rank, eventRangeFormat))
self.__messageInQueue.put(eventRangeFormat)
self.__totalQueuedEvents += 1
for eventRange in eventRanges:
eventRangeID = eventRange['eventRangeID']
self.__eventRangesStatus[eventRangeID]['status'] = 'processing'
#eventRanges= eval(eventRange)
#for eventRange in eventRanges:
# eventRangeID = eventRange['eventRangeID']
# self.__eventRangesStatus[eventRangeID]['status'] = 'processing'
self.__athenaMP_isReady = False
unblock_sig(signal.SIGTERM)
def getOutput(self):
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Constants for wizards."""
import re
from typing import List, Optional, Union
class Sources:
"""Defaults for wizard source argument."""
CSV_STR: str = "csv text string"
CSV_PATH: str = "csv file {path}"
TEXT_STR: str = "text string"
TEXT_PATH: str = "text file {path}"
LOD: str = "list of dictionaries"
JSON_STR: str = "json string"
JSON_PATH: str = "json file {path}"
class Templates:
"""Query builder templates."""
LEFT: str = "({query}"
"""For building a query with a left parentheses"""
RIGHT: str = "{query})"
"""For building a query with a right parentheses"""
NOT: str = "not {query}"
"""For building a query with a NOT operator"""
OR: str = "or {query}"
"""For building a query with an OR operator"""
AND: str = "and {query}"
"""For building a query with an AND operator"""
COMPLEX: str = "({field} == match([{sub_queries}]))"
"""For building a query for complex fields"""
SUBS: str = " and "
"""Joiner for sub fields in a complex field"""
class Fields:
"""Keys and arguments for field schemas."""
NAME: str = "name"
EXPR_TYPE: str = "expr_field_type"
ANAME: str = "adapter_name"
SUBS: str = "sub_fields"
IS_ALL: str = "is_all"
IS_DETAILS: str = "is_details"
IS_COMPLEX: str = "is_complex"
class Results:
"""Keys for results returned from wizards."""
EXPRS: str = "expressions"
QUERY: str = "query"
class Patterns:
"""Regular expression patterns for validation of values."""
FIELD_VALID: str = re.compile(
r"""(?ix) # case insensitive and verbose
([^a-z0-9:._\-]) # contains characters that are not one of: a-z 0-9 : . _ -
""",
)
FIELD_FIRST_ALPHA: str = re.compile(
r"""(?ix) # case insensitive and verbose
(^[^a-zA-Z]) # starts with characters that are not one of: a-z
"""
)
OP_ALPHA: str = re.compile(
r"""(?ix) # case insensitive and verbose
([^a-z_\-]) # contains characters that are not one of: a-z _ -
"""
)
FLAGS: str = re.compile(
r"""(?ix) # case insensitive and verbose
(?P<flags>[^a-z0-9]*)? # capture optional flags at beginning
(?P<value>.*) # capture the rest as the value
"""
)
FIELD: List[str] = [FIELD_VALID, FIELD_FIRST_ALPHA]
OP: List[str] = [OP_ALPHA]
class Flags:
"""Flag values that can be used in entries."""
NOT: str = "!"
AND: str = "&"
OR: str = "|"
LEFTB: str = "("
RIGHTB: str = ")"
FLAGS: dict = {
AND: "Use and instead of or (default)",
OR: f"Use or instead of and (overrides {AND})",
NOT: "Use not",
LEFTB: "Open a parentheses",
RIGHTB: "Close a parentheses (can also be at end of entry)",
}
LFMT: str = "[" + " ".join(list(FLAGS)) + "]"
RFMT: str = f"[{RIGHTB}]"
FMT_TEXT: str = "\n# " + "\n# ".join([f"{k} {v}" for k, v in FLAGS.items()])
FMT_CSV: str = ", ".join([f"{k} {v}" for k, v in FLAGS.items()])
class Entry:
"""Entry keys and split values."""
SRC: str = "source"
WEIGHT: str = "bracket_weight"
FLAGS: str = "flags"
VALUE: str = "value"
TYPE: str = "type"
REQ: List[str] = [VALUE, TYPE]
"""Required keys for entries"""
SPLIT: str = " "
"""String to split on for expressions"""
CSPLIT: str = " // "
"""String to split on for complex expressions"""
class EntrySq:
"""Entry keys for saved query types."""
NAME: str = "name"
DESC: str = "description"
TAGS: str = "tags"
FIELDS: str = "fields"
DEFAULT: str = "default"
FDEF: str = "fields_default"
FMAN: str = "fields_manual"
REQ: List[str] = [*Entry.REQ]
"""Required keys for saved query types"""
OPT: dict = {DESC: "", TAGS: "", FIELDS: DEFAULT}
"""Optional keys and their defaults for saved query types"""
class Types:
"""Types of entries."""
SIMPLE: str = "simple"
COMPLEX: str = "complex"
SAVED_QUERY: str = "saved_query"
FILE: str = "file"
DICT: List[str] = [SIMPLE, COMPLEX]
"""required keys for the base Wizard class."""
TEXT: List[str] = [*DICT]
"""required keys for the WizardText class."""
SQ: List[str] = [*DICT, SAVED_QUERY]
"""Required keys for the WizardCsv class."""
CLI: List[str] = [*DICT]
"""Required keys for the WizardCsv class."""
class Docs:
"""Documentation strings for wizards."""
SUB_OPT: str = f"[{Entry.CSPLIT} ...]"
OPVAL: str = "FIELD OPERATOR VALUE"
FMT_SIMPLE: str = f"{Flags.LFMT} {OPVAL} {Flags.RFMT}"
FMT_COMPLEX: str = f"{Flags.LFMT} COMPLEX-FIELD{Entry.CSPLIT}SUB-{OPVAL}{SUB_OPT} {Flags.RFMT}"
DESC_SIMPLE: str = "Filter entry for simple fields"
DESC_COMPLEX: str = "Filter entry for complex fields and their sub-fields"
EX_SIMPLE1: str = f"{Flags.LEFTB} hostname contains test"
EX_SIMPLE2: str = f"{Flags.NOT} hostname contains internal {Flags.RIGHTB}"
EX_SIMPLE3: str = f"{Flags.LEFTB} os.type equals windows"
EX_SIMPLE4: str = f"{Flags.OR} os.type equals os x {Flags.RIGHTB}"
EX_COMPLEX1: str = (
f"installed_software{Entry.CSPLIT}name contains chrome"
f"{Entry.CSPLIT}version earlier_than 82"
)
EX_TEXT: str = f"""{Types.SIMPLE:<8} {EX_SIMPLE1}
{Types.SIMPLE:<8} {EX_SIMPLE2}
{Types.SIMPLE:<8} {EX_SIMPLE3}
{Types.SIMPLE:<8} {EX_SIMPLE4}
{Types.COMPLEX:<8} {EX_COMPLEX1}
"""
EX_DICT: str = f"""[
{{
"{Entry.TYPE}": "{Types.SIMPLE}",
"{Entry.VALUE}": "{EX_SIMPLE1}"
}},
{{
"{Entry.TYPE}": "{Types.SIMPLE}",
"{Entry.VALUE}": "{EX_SIMPLE2}"
}},
{{
"{Entry.TYPE}": "{Types.SIMPLE}",
"{Entry.VALUE}": "{EX_SIMPLE3}"
}},
{{
"{Entry.TYPE}": "{Types.SIMPLE}",
"{Entry.VALUE}": "{EX_SIMPLE4}"
}},
{{
"{Entry.TYPE}": "{Types.COMPLEX}",
"{Entry.VALUE}": "{EX_COMPLEX1}"
}}
]
"""
EX_FIELDS: str = "os.distribution,os.os_str,aws:aws_device_type"
EX_CSV: str = f"""
{Entry.TYPE},{Entry.VALUE},{EntrySq.DESC},{EntrySq.TAGS},{EntrySq.FIELDS}
"# If {Entry.TYPE} column is empty or begins with # it is ignored",,,,
"# {Entry.TYPE} of {Types.SIMPLE} or {Types.COMPLEX} will belong to the {Types.SAVED_QUERY} they are under",,,,
"# Column descriptions for {Entry.TYPE} of {Types.SAVED_QUERY}","Name of Saved Query","Description of Saved Query","Tags to apply to Saved Query","Columns to display in Saved Query"
"# Column descriptions for {Entry.TYPE} of {Types.SIMPLE}","Format -- [] represents optional items: {FMT_SIMPLE}","Description: {DESC_SIMPLE}","Only uses columns {Entry.TYPE} and {Entry.VALUE}",
"# Column descriptions for {Entry.TYPE} of {Types.COMPLEX}","Format -- [] represents optional items: {FMT_COMPLEX}","Description: {DESC_COMPLEX}","Only uses columns {Entry.TYPE} and {Entry.VALUE}",
"# Value Flags for {Entry.TYPE} of {Types.SIMPLE} or {Types.COMPLEX}","{Flags.FMT_CSV}",,,
"{Types.SAVED_QUERY}","example 1","Filters, default fields, custom fields","example,tag1,tag2","{EX_FIELDS},{EntrySq.DEFAULT},os.build"
"{Types.SIMPLE}","{EX_SIMPLE1}",,,
"{Types.SIMPLE}","{EX_SIMPLE2}",,,
"{Types.SIMPLE}","{EX_SIMPLE3}",,,
"{Types.SIMPLE}","{EX_SIMPLE4}",,,
"{Types.SAVED_QUERY}","example 2","No filters, no default fields, custom fields","example,tag3,tag4","{EX_FIELDS}"
"{Types.SAVED_QUERY}","example 3","No filters, default fields, no custom fields","example,tag5,tag6",
""" # noqa: E501
TEXT: str = f"""
# Example:
{EX_TEXT}
# Format -- [] represents optional items:
{Types.SIMPLE:<8} {FMT_SIMPLE}
# Description: {DESC_SIMPLE}
{Types.COMPLEX:<8} {FMT_COMPLEX}
# Description: {DESC_COMPLEX}
# Flags:{Flags.FMT_TEXT}
"""
DICT: str = f"""
# Example:
{EX_DICT}
# Format -- [] represents optional items:
# "{Entry.TYPE}": "{Types.SIMPLE}, "{Entry.VALUE}": "{FMT_SIMPLE}"
# Description: "{DESC_SIMPLE}"
# "{Entry.TYPE}": "{Types.COMPLEX}", "{Entry.VALUE}": "{FMT_COMPLEX}"
# Description: "{DESC_COMPLEX}"
# Flags:{Flags.FMT_TEXT}
"""
CSV: str = f"Example:\n{EX_CSV}"
class Expr:
"""Keys for GUI expressions."""
BRACKET_LEFT: str = "leftBracket"
BRACKET_RIGHT: str = "rightBracket"
BRACKET_WEIGHT: str = "bracketWeight"
CHILDREN: str = "children"
CONDITION: str = "condition"
CONTEXT: str = "context"
EXPR: str = "expression"
FIELD: str = "field"
FIELD_TYPE: str = "fieldType"
FILTER: str = "filter"
FILTER_ADAPTERS: str = "filteredAdapters"
IDX: str = "i"
NOT: str = "not"
OP_COMP: str = "compOp"
OP_LOGIC: str = "logicOp"
VALUE: str = "value"
CONTEXT_OBJ: str = "OBJ"
OP_AND: str = "and"
OP_OR: str = "or"
OP_IDX0: str = ""
@classmethod
def get_query(cls, exprs: List[dict]) -> str:
"""Get the query for a list of GUI expressions.
Args:
exprs: list of expressions to build query from
"""
return " ".join([x[cls.FILTER] for x in exprs])
@classmethod
def get_subs_query(cls, sub_exprs: List[dict]) -> str:
"""Get the complex query for a list of GUI child expressions.
Args:
sub_exprs: list of children of a complex expression to build query from
"""
return Templates.SUBS.join([x[cls.CONDITION] for x in sub_exprs])
@classmethod
def build(
cls,
entry: dict,
query: str,
field: dict,
idx: int,
op_comp: str,
value: Optional[Union[int, str, bool]] = None,
is_complex: bool = False,
children: Optional[List[dict]] = None,
) -> dict:
"""Build an expression for the GUI to understand the query.
Args:
entry: entry to build expression from
query: AQL string
field: schema of field
idx: index of this expression
value: raw expression value
op_comp: comparison operator
is_complex: build an expression for a complex filter
children: children of a complex filter
"""
flags = entry.get(Entry.FLAGS, []) or []
weight = entry.get(Entry.WEIGHT, 0)
is_right = Flags.RIGHTB in flags
is_left = Flags.LEFTB in flags
is_not = Flags.NOT in flags
is_or = Flags.OR in flags
if is_not:
query = Templates.NOT.format(query=query)
if is_right:
query = Templates.RIGHT.format(query=query)
if is_left:
query = Templates.LEFT.format(query=query)
if idx:
if is_or:
query = Templates.OR.format(query=query)
op_logic = cls.OP_OR
else:
query = Templates.AND.format(query=query)
op_logic = cls.OP_AND
else:
op_logic = cls.OP_IDX0
expression = {}
expression[cls.BRACKET_WEIGHT] = weight
expression[cls.CHILDREN] = children or [cls.build_child()]
expression[cls.OP_COMP] = op_comp
expression[cls.FIELD] = field[Fields.NAME]
expression[cls.FIELD_TYPE] = field[Fields.EXPR_TYPE]
expression[cls.FILTER] = query
expression[cls.FILTER_ADAPTERS] = None
expression[cls.BRACKET_LEFT] = is_left
expression[cls.OP_LOGIC] = op_logic
expression[cls.NOT] = is_not
expression[cls.BRACKET_RIGHT] = is_right
expression[cls.VALUE] = value
if is_complex:
expression[cls.CONTEXT] = cls.CONTEXT_OBJ
return expression
@classmethod
def build_child(
cls,
query: str = "",
op_comp: str = "",
field: str = "",
value: Optional[Union[int, str, bool]] = None,
idx: int = 0,
| |
= ""
try:
rule_description = rule_params["Description"]
except KeyError:
rule_description = rule_name
my_params = [
{
'ParameterKey': 'RuleName',
'ParameterValue': rule_name,
},
{
'ParameterKey': 'RuleLambdaName',
'ParameterValue': self.__get_lambda_name(rule_name, rule_params),
},
{
'ParameterKey': 'Description',
'ParameterValue': rule_description,
},
{
'ParameterKey': 'LambdaRoleArn',
'ParameterValue': lambdaRoleArn,
},
{
'ParameterKey': 'BoundaryPolicyArn',
'ParameterValue': boundaryPolicyArn,
},
{
'ParameterKey': 'SourceBucket',
'ParameterValue': code_bucket_name,
},
{
'ParameterKey': 'SourcePath',
'ParameterValue': s3_dst,
},
{
'ParameterKey': 'SourceRuntime',
'ParameterValue': self.__get_runtime_string(rule_params),
},
{
'ParameterKey': 'SourceEvents',
'ParameterValue': source_events,
},
{
'ParameterKey': 'SourcePeriodic',
'ParameterValue': source_periodic,
},
{
'ParameterKey': 'SourceInputParameters',
'ParameterValue': json.dumps(combined_input_parameters),
},
{
'ParameterKey': 'SourceHandler',
'ParameterValue': self.__get_handler(rule_name, rule_params)
},
{
'ParameterKey': 'Timeout',
'ParameterValue': str(self.args.lambda_timeout)
}]
layers = self.__get_lambda_layers(session,self.args,params)
if self.args.lambda_layers:
additional_layers = self.args.lambda_layers.split(',')
layers.extend(additional_layers)
if layers:
my_params.append({
'ParameterKey': 'Layers',
'ParameterValue': ",".join(layers)
})
if self.args.lambda_security_groups and self.args.lambda_subnets:
my_params.append({
'ParameterKey': 'SecurityGroupIds',
'ParameterValue': self.args.lambda_security_groups
})
my_params.append({
'ParameterKey': 'SubnetIds',
'ParameterValue': self.args.lambda_subnets
})
#create json of CFN template
cfn_body = os.path.join(path.dirname(__file__), 'template', "configRuleOrganization.json")
template_body = open(cfn_body, "r").read()
json_body = json.loads(template_body)
#debugging
#print(json.dumps(json_body, indent=2))
#deploy config rule
my_cfn = my_session.client('cloudformation')
try:
my_stack_name = self.__get_stack_name_from_rule_name(rule_name)
my_stack = my_cfn.describe_stacks(StackName=my_stack_name)
#If we've gotten here, stack exists and we should update it.
print ("Updating CloudFormation Stack for " + rule_name)
try:
cfn_args = {
'StackName': my_stack_name,
'TemplateBody': json.dumps(json_body),
'Parameters': my_params,
'Capabilities': ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']
}
# If no tags key is specified, or if the tags dict is empty
if cfn_tags is not None:
cfn_args['Tags'] = cfn_tags
response = my_cfn.update_stack(**cfn_args)
except ClientError as e:
if e.response['Error']['Code'] == 'ValidationError':
if 'No updates are to be performed.' in str(e):
#No changes made to Config rule definition, so CloudFormation won't do anything.
print("No changes to Config Rule.")
else:
#Something unexpected has gone wrong. Emit an error and bail.
print('Validation Error on CFN')
print(json.dumps(cfn_args))
print(e)
return 1
else:
raise
my_lambda_arn = self.__get_lambda_arn_for_stack(my_stack_name)
print("Publishing Lambda code...")
my_lambda_client = my_session.client('lambda')
my_lambda_client.update_function_code(
FunctionName=my_lambda_arn,
S3Bucket=code_bucket_name,
S3Key=s3_dst,
Publish=True
)
print("Lambda code updated.")
except ClientError as e:
#If we're in the exception, the stack does not exist and we should create it.
print ("Creating CloudFormation Stack for " + rule_name)
cfn_args = {
'StackName': my_stack_name,
'TemplateBody': json.dumps(json_body),
'Parameters': my_params,
'Capabilities': ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']
}
if cfn_tags is not None:
cfn_args['Tags'] = cfn_tags
response = my_cfn.create_stack(**cfn_args)
#wait for changes to propagate.
self.__wait_for_cfn_stack(my_cfn, my_stack_name)
#Cloudformation is not supporting tagging config rule currently.
if cfn_tags is not None and len(cfn_tags) > 0:
print("WARNING: Tagging is not supported for organization config rules. Only the cloudformation template will be tagged.")
print('Config deploy complete.')
return 0
def export(self):
self.__parse_export_args()
# get the rule names
rule_names = self.__get_rule_list_for_command("export")
# run the export code
print("Running export")
for rule_name in rule_names:
rule_params, cfn_tags = self.__get_rule_parameters(rule_name)
if 'SourceIdentifier' in rule_params:
print("Found Managed Rule, Ignored.")
print("Export support only Custom Rules.")
continue
source_events = []
if 'SourceEvents' in rule_params:
source_events = [rule_params['SourceEvents']]
source_periodic = "NONE"
if 'SourcePeriodic' in rule_params:
source_periodic = rule_params['SourcePeriodic']
combined_input_parameters = {}
if 'InputParameters' in rule_params:
combined_input_parameters.update(json.loads(rule_params['InputParameters']))
if 'OptionalParameters' in rule_params:
# Remove empty parameters
keys_to_delete = []
optional_parameters_json = json.loads(rule_params['OptionalParameters'])
for key, value in optional_parameters_json.items():
if not value:
keys_to_delete.append(key)
for key in keys_to_delete:
del optional_parameters_json[key]
combined_input_parameters.update(optional_parameters_json)
print("Found Custom Rule.")
s3_src = ""
s3_dst = self.__package_function_code(rule_name, rule_params)
layers = []
rdk_lib_version = "0"
my_session = self.__get_boto_session()
layers = self.__get_lambda_layers(my_session, self.args, rule_params)
if self.args.lambda_layers:
additional_layers = self.args.lambda_layers.split(',')
layers.extend(additional_layers)
subnet_ids = []
security_group_ids = []
if self.args.lambda_security_groups:
security_group_ids = self.args.lambda_security_groups.split(",")
if self.args.lambda_subnets:
subnet_ids = self.args.lambda_subnets.split(",")
lambda_role_arn = "NONE"
if self.args.lambda_role_arn:
print("Existing IAM Role provided: " + self.args.lambda_role_arn)
lambda_role_arn = self.args.lambda_role_arn
my_params = {
"rule_name": rule_name,
"rule_lambda_name": self.__get_lambda_name(rule_name, rule_params),
"source_runtime": self.__get_runtime_string(rule_params),
"source_events": source_events,
"source_periodic": source_periodic,
"source_input_parameters": json.dumps(combined_input_parameters),
"source_handler": self.__get_handler(rule_name, rule_params),
"subnet_ids": subnet_ids,
"security_group_ids": security_group_ids,
"lambda_layers": layers,
"lambda_role_arn": lambda_role_arn,
"lambda_timeout": str(self.args.lambda_timeout)
}
params_file_path = os.path.join(os.getcwd(), rules_dir, rule_name, rule_name.lower() + ".tfvars.json")
parameters_file = open(params_file_path, 'w')
json.dump(my_params, parameters_file, indent=4)
parameters_file.close()
# create json of CFN template
print(self.args.format + " version: "+ self.args.version)
tf_file_body = os.path.join(path.dirname(__file__), 'template', self.args.format, self.args.version,
"config_rule.tf")
tf_file_path = os.path.join(os.getcwd(), rules_dir, rule_name, rule_name.lower() + "_rule.tf")
shutil.copy(tf_file_body, tf_file_path)
variables_file_body = os.path.join(path.dirname(__file__), 'template', self.args.format, self.args.version,
"variables.tf")
variables_file_path = os.path.join(os.getcwd(), rules_dir, rule_name, rule_name.lower() + "_variables.tf")
shutil.copy(variables_file_body, variables_file_path)
print("Export completed.This will generate three .tf files.")
def test_local(self):
print ("Running local test!")
tests_successful = True
args = self.__parse_test_args()
#Construct our list of rules to test.
rule_names = self.__get_rule_list_for_command()
for rule_name in rule_names:
rule_params, rule_tags = self.__get_rule_parameters(rule_name)
if rule_params['SourceRuntime'] not in ('python3.6', 'python3.6-lib', 'python3.7', 'python3.7-lib', 'python3.8', 'python3.8-lib', 'python3.9', 'python3.9-lib'):
print ("Skipping " + rule_name + " - Runtime not supported for local testing.")
continue
print("Testing "+rule_name)
test_dir = os.path.join(os.getcwd(), rules_dir, rule_name)
print("Looking for tests in " + test_dir)
if args.verbose == True:
results = unittest.TextTestRunner(buffer=False, verbosity=2).run(self.__create_test_suite(test_dir))
else:
results = unittest.TextTestRunner(buffer=True, verbosity=2).run(self.__create_test_suite(test_dir))
print (results)
tests_successful = tests_successful and results.wasSuccessful()
return int(not tests_successful)
def test_remote(self):
print ("Running test_remote!")
self.__parse_test_args()
#Construct our list of rules to test.
rule_names = self.__get_rule_list_for_command()
#Create our Lambda client.
my_session = self.__get_boto_session()
my_lambda_client = my_session.client('lambda')
for rule_name in rule_names:
print("Testing "+rule_name)
#Get CI JSON from either the CLI or one of the stored templates.
my_cis = self.__get_test_CIs(rule_name)
my_parameters = {}
if self.args.test_parameters:
my_parameters = json.loads(self.args.test_parameters)
for my_ci in my_cis:
print ("\t\tTesting CI " + my_ci['resourceType'])
#Generate test event from templates
test_event = json.load(open(os.path.join(path.dirname(__file__), 'template', event_template_filename), 'r'), strict=False)
my_invoking_event = json.loads(test_event['invokingEvent'])
my_invoking_event['configurationItem'] = my_ci
my_invoking_event['notificationCreationTime'] = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.000Z')
test_event['invokingEvent'] = json.dumps(my_invoking_event)
test_event['ruleParameters'] = json.dumps(my_parameters)
#Get the Lambda function associated with the Rule
stack_name = self.__get_stack_name_from_rule_name(rule_name)
my_lambda_arn = self.__get_lambda_arn_for_stack(stack_name)
#Call Lambda function with test event.
result = my_lambda_client.invoke(
FunctionName=my_lambda_arn,
InvocationType='RequestResponse',
LogType='Tail',
Payload=json.dumps(test_event)
)
#If there's an error dump execution logs to the terminal, if not print out the value returned by the lambda function.
if 'FunctionError' in result:
print(base64.b64decode(str(result['LogResult'])))
else:
print("\t\t\t" + result['Payload'].read())
if self.args.verbose:
print(base64.b64decode(str(result['LogResult'])))
return 0
def status(self):
print ("Running status!")
return 0
def sample_ci(self):
self.args = get_sample_ci_parser().parse_args(self.args.command_args, self.args)
my_test_ci = TestCI(self.args.ci_type)
print(json.dumps(my_test_ci.get_json(), indent=4))
def logs(self):
self.args = get_logs_parser().parse_args(self.args.command_args, self.args)
self.args.rulename = self.__clean_rule_name(self.args.rulename)
my_session = self.__get_boto_session()
cw_logs = my_session.client('logs')
log_group_name = self.__get_log_group_name()
#Retrieve the last number of log events as specified by the user.
try:
log_streams = cw_logs.describe_log_streams(
logGroupName = log_group_name,
orderBy = 'LastEventTime',
descending = True,
limit = int(self.args.number) #This is the worst-case scenario if there is only one event per stream
)
#Sadly we can't just use filter_log_events, since we don't know the timestamps yet and filter_log_events doesn't appear to support ordering.
my_events = self.__get_log_events(cw_logs, log_streams, int(self.args.number))
latest_timestamp = 0
if (my_events is None):
print("No Events to display.")
return(0)
for event in my_events:
if event['timestamp'] > latest_timestamp:
latest_timestamp = event['timestamp']
self.__print_log_event(event)
if self.args.follow:
try:
while True:
#Wait 2 seconds
time.sleep(2)
#Get all events between now and the timestamp of the most recent event.
my_new_events = cw_logs.filter_log_events(
logGroupName = log_group_name,
startTime = latest_timestamp+1,
endTime = int(time.time())*1000,
interleaved = True)
for event in my_new_events['events']:
if 'timestamp' in event:
#Get the timestamp on the most recent event.
if event['timestamp'] > latest_timestamp:
latest_timestamp = event['timestamp']
#Print the event.
self.__print_log_event(event)
except KeyboardInterrupt as k:
sys.exit(0)
except cw_logs.exceptions.ResourceNotFoundException as e:
print(e.response['Error']['Message'])
def rulesets(self):
self.args = get_rulesets_parser().parse_args(self.args.command_args, self.args)
if self.args.subcommand in ['add','remove'] and (not self.args.ruleset or not self.args.rulename):
print("You must specify a ruleset name and a rule for the `add` and `remove` commands.")
return 1
if self.args.subcommand == "list":
self.__list_rulesets()
elif self.args.subcommand == "add":
self.__add_ruleset_rule(self.args.ruleset, self.args.rulename)
elif self.args.subcommand == "remove":
self.__remove_ruleset_rule(self.args.ruleset, self.args.rulename)
else :
print("Unknown subcommand.")
def create_terraform_template(self):
self.args = get_create_rule_template_parser().parse_args(self.args.command_args, self.args)
if self.args.rulesets:
self.args.rulesets = self.args.rulesets.split(',')
print ("Generating Terraform template!")
template = self.__generate_terraform_shell(self.args)
rule_names = self.__get_rule_list_for_command()
for rule_name in rule_names:
rule_input_params = self.__generate_rule_terraform_params(rule_name)
rule_def = self.__generate_rule_terraform(rule_name)
template.append(rule_input_params)
template.append(rule_def)
output_file = open(self.args.output_file, 'w')
output_file.write(json.dumps(template, indent=2))
print("CloudFormation template written to " + self.args.output_file)
def create_rule_template(self):
self.args = get_create_rule_template_parser().parse_args(self.args.command_args, self.args)
if self.args.rulesets:
self.args.rulesets = self.args.rulesets.split(',')
script_for_tag=""
print ("Generating CloudFormation template!")
#First add the common elements - description, parameters, and resource section header
template = {}
template["AWSTemplateFormatVersion"] = "2010-09-09"
template["Description"] = "AWS CloudFormation template to create custom AWS Config rules. You will be billed for the AWS resources used if you create a stack from this template."
optional_parameter_group = {
"Label": { "default": "Optional" },
"Parameters": []
}
required_parameter_group = {
"Label": { "default": "Required" },
"Parameters": []
}
| |
# Used at the time of inference to load the model, along w/ the weights.
# Use it in the following manner:
"""
from UNET_model_def import *
model_wghts = "{}/<my_model_weights_{}>.h5".format(data_dir, HOLDOUT)
model = <my_model_name>((None,None,None,1)) #giving the model definition
model.load_weights(model_wghts)
"""
import keras
data_format = "channels_last"
concat_axis = -1
def unet3D_modelB(input_img, use_upsampling=True, n_out=1, dropout=0.2,
print_summary = False):
"""
3D U-Net model
"""
print("3D U-Net Segmentation")
# Set keras learning phase to train
keras.backend.set_learning_phase(True)
# Don"t initialize variables on the fly
keras.backend.manual_variable_initialization(False)
inputs = keras.layers.Input(shape=input_img, name="Input_Image")
# Use below if wanted to use batch normalization and Relu activation separately
params = dict(kernel_size=(3, 3, 3), activation=None,
padding="same", data_format=data_format,
kernel_initializer="he_uniform")
# params = dict(kernel_size=(3, 3, 3), activation="relu",
# padding="same", data_format=data_format,
# kernel_initializer="he_uniform")
conv1 = keras.layers.Conv3D(name="conv1a", filters=32, **params)(inputs)
conv1 = keras.layers.BatchNormalization(axis =-1)(conv1)
conv1 = keras.layers.Activation('relu')(conv1)
conv1 = keras.layers.Conv3D(name="conv1b", filters=64, **params)(conv1)
conv1 = keras.layers.BatchNormalization(axis =-1)(conv1)
conv1 = keras.layers.Activation('relu')(conv1)
pool1 = keras.layers.MaxPooling3D(name="pool1", pool_size=(2, 2, 2))(conv1)
conv2 = keras.layers.Conv3D(name="conv2a", filters=64, **params)(pool1)
conv2 = keras.layers.BatchNormalization(axis =-1)(conv2)
conv2 = keras.layers.Activation('relu')(conv2)
conv2 = keras.layers.Conv3D(name="conv2b", filters=128, **params)(conv2)
conv2 = keras.layers.BatchNormalization(axis =-1)(conv2)
conv2 = keras.layers.Activation('relu')(conv2)
pool2 = keras.layers.MaxPooling3D(name="pool2", pool_size=(2, 2, 2))(conv2)
conv3 = keras.layers.Conv3D(name="conv3a", filters=128, **params)(pool2)
conv3 = keras.layers.BatchNormalization(axis =-1)(conv3)
conv3 = keras.layers.Activation('relu')(conv3)
conv3 = keras.layers.Dropout(dropout)(conv3) ### Trying dropout layers earlier on, as indicated in the paper
conv3 = keras.layers.Conv3D(name="conv3b", filters=256, **params)(conv3)
conv3 = keras.layers.BatchNormalization(axis =-1)(conv3)
conv3 = keras.layers.Activation('relu')(conv3)
pool3 = keras.layers.MaxPooling3D(name="pool3", pool_size=(2, 2, 2))(conv3)
conv4 = keras.layers.Conv3D(name="conv4a", filters=256, **params)(pool3)
conv4 = keras.layers.BatchNormalization(axis =-1)(conv4)
conv4 = keras.layers.Activation('relu')(conv4)
conv4 = keras.layers.Dropout(dropout)(conv4) ### Trying dropout layers earlier on, as indicated in the paper
conv4 = keras.layers.Conv3D(name="conv4b", filters=512, **params)(conv4)
conv4 = keras.layers.BatchNormalization(axis =-1)(conv4)
conv4 = keras.layers.Activation('relu')(conv4)
if use_upsampling:
up4 = keras.layers.concatenate([keras.layers.UpSampling3D(name="up4", size=(2, 2, 2))(conv4), conv3], axis=concat_axis)
else:
up4 = keras.layers.concatenate([keras.layers.Conv3DTranspose(name="transConv4", filters=512, data_format=data_format,
kernel_size=(2, 2, 2), strides=(2, 2, 2), padding="same")(conv4), conv3], axis=concat_axis)
conv5 = keras.layers.Conv3D(name="conv5a", filters=256, **params)(up4)
conv5 = keras.layers.BatchNormalization(axis =-1)(conv5)
conv5 = keras.layers.Activation('relu')(conv5)
conv5 = keras.layers.Conv3D(name="conv5b", filters=256, **params)(conv5)
conv5 = keras.layers.BatchNormalization(axis =-1)(conv5)
conv5 = keras.layers.Activation('relu')(conv5)
if use_upsampling:
up5 = keras.layers.concatenate([keras.layers.UpSampling3D(name="up5", size=(2, 2, 2))(conv5), conv2], axis=concat_axis)
else:
up5 = keras.layers.concatenate([keras.layers.Conv3DTranspose(name="transConv5", filters=256, data_format=data_format,
kernel_size=(2, 2, 2), strides=(2, 2, 2), padding="same")(conv5), conv2], axis=concat_axis)
conv6 = keras.layers.Conv3D(name="conv6a", filters=128, **params)(up5)
conv6 = keras.layers.BatchNormalization(axis =-1)(conv6)
conv6 = keras.layers.Activation('relu')(conv6)
conv6 = keras.layers.Conv3D(name="conv6b", filters=128, **params)(conv6)
conv6 = keras.layers.BatchNormalization(axis =-1)(conv6)
conv6 = keras.layers.Activation('relu')(conv6)
if use_upsampling:
up6 = keras.layers.concatenate([keras.layers.UpSampling3D(name="up6", size=(2, 2, 2))(conv6), conv1], axis=concat_axis)
else:
up6 = keras.layers.concatenate([keras.layers.Conv3DTranspose(name="transConv6", filters=128, data_format=data_format,
kernel_size=(2, 2, 2), strides=(2, 2, 2), padding="same")(conv6), conv1], axis=concat_axis)
conv7 = keras.layers.Conv3D(name="conv7a", filters=64, **params)(up6)
conv7 = keras.layers.BatchNormalization(axis =-1)(conv7)
conv7 = keras.layers.Activation('relu')(conv7)
conv7 = keras.layers.Conv3D(name="conv7b", filters=32, **params)(conv7)
conv7 = keras.layers.BatchNormalization(axis =-1)(conv7)
conv7 = keras.layers.Activation('relu')(conv7)
pred_msk = keras.layers.Conv3D(name="PredictionMask", filters=n_out, kernel_size=(1, 1, 1),
data_format=data_format, activation="sigmoid")(conv7)
#Branch is created from conv7 which are feature maps
#But global avg pooling on feature maps is not helping and hence changing back to pred_msk
class_pred = keras.layers.GlobalAveragePooling3D(name='PredictionClass')(pred_msk)
model = keras.models.Model(inputs=[inputs], outputs=[pred_msk,class_pred])
if print_summary:
#model = keras.models.Model(inputs=[inputs], outputs=[class_pred])
model.summary()
# return pred
return model
def unet3D_modelC(input_img, use_upsampling=True, n_out=1, dropout=0.2,
print_summary = False):
"""
3D U-Net model
"""
print("3D U-Net Segmentation")
# Set keras learning phase to train
keras.backend.set_learning_phase(True)
# Don"t initialize variables on the fly
keras.backend.manual_variable_initialization(False)
inputs = keras.layers.Input(shape=input_img, name="Input_Image")
# Use below if wanted to use batch normalization and Relu activation separately
params = dict(kernel_size=(3, 3, 3), activation=None,
padding="same", data_format=data_format,
kernel_initializer="he_uniform")
# params = dict(kernel_size=(3, 3, 3), activation="relu",
# padding="same", data_format=data_format,
# kernel_initializer="he_uniform")
conv1 = keras.layers.Conv3D(name="conv1a", filters=32, **params)(inputs)
conv1 = keras.layers.BatchNormalization(axis =-1)(conv1)
conv1 = keras.layers.Activation('relu')(conv1)
conv1 = keras.layers.Conv3D(name="conv1b", filters=64, **params)(conv1)
conv1 = keras.layers.BatchNormalization(axis =-1)(conv1)
conv1 = keras.layers.Activation('relu')(conv1)
pool1 = keras.layers.MaxPooling3D(name="pool1", pool_size=(2, 2, 2))(conv1)
conv2 = keras.layers.Conv3D(name="conv2a", filters=64, **params)(pool1)
conv2 = keras.layers.BatchNormalization(axis =-1)(conv2)
conv2 = keras.layers.Activation('relu')(conv2)
conv2 = keras.layers.Conv3D(name="conv2b", filters=128, **params)(conv2)
conv2 = keras.layers.BatchNormalization(axis =-1)(conv2)
conv2 = keras.layers.Activation('relu')(conv2)
pool2 = keras.layers.MaxPooling3D(name="pool2", pool_size=(2, 2, 2))(conv2)
conv3 = keras.layers.Conv3D(name="conv3a", filters=128, **params)(pool2)
conv3 = keras.layers.BatchNormalization(axis =-1)(conv3)
conv3 = keras.layers.Activation('relu')(conv3)
conv3 = keras.layers.Dropout(dropout)(conv3) ### Trying dropout layers earlier on, as indicated in the paper
conv3 = keras.layers.Conv3D(name="conv3b", filters=256, **params)(conv3)
conv3 = keras.layers.BatchNormalization(axis =-1)(conv3)
conv3 = keras.layers.Activation('relu')(conv3)
pool3 = keras.layers.MaxPooling3D(name="pool3", pool_size=(2, 2, 2))(conv3)
conv4 = keras.layers.Conv3D(name="conv4a", filters=256, **params)(pool3)
conv4 = keras.layers.BatchNormalization(axis =-1)(conv4)
conv4 = keras.layers.Activation('relu')(conv4)
conv4 = keras.layers.Dropout(dropout)(conv4) ### Trying dropout layers earlier on, as indicated in the paper
conv4 = keras.layers.Conv3D(name="conv4b", filters=512, **params)(conv4)
conv4 = keras.layers.BatchNormalization(axis =-1)(conv4)
conv4 = keras.layers.Activation('relu')(conv4)
if use_upsampling:
up4 = keras.layers.concatenate([keras.layers.UpSampling3D(name="up4", size=(2, 2, 2))(conv4), conv3], axis=concat_axis)
else:
up4 = keras.layers.concatenate([keras.layers.Conv3DTranspose(name="transConv4", filters=512, data_format=data_format,
kernel_size=(2, 2, 2), strides=(2, 2, 2), padding="same")(conv4), conv3], axis=concat_axis)
conv5 = keras.layers.Conv3D(name="conv5a", filters=256, **params)(up4)
conv5 = keras.layers.BatchNormalization(axis =-1)(conv5)
conv5 = keras.layers.Activation('relu')(conv5)
conv5 = keras.layers.Conv3D(name="conv5b", filters=256, **params)(conv5)
conv5 = keras.layers.BatchNormalization(axis =-1)(conv5)
conv5 = keras.layers.Activation('relu')(conv5)
if use_upsampling:
up5 = keras.layers.concatenate([keras.layers.UpSampling3D(name="up5", size=(2, 2, 2))(conv5), conv2], axis=concat_axis)
else:
up5 = keras.layers.concatenate([keras.layers.Conv3DTranspose(name="transConv5", filters=256, data_format=data_format,
kernel_size=(2, 2, 2), strides=(2, 2, 2), padding="same")(conv5), conv2], axis=concat_axis)
conv6 = keras.layers.Conv3D(name="conv6a", filters=128, **params)(up5)
conv6 = keras.layers.BatchNormalization(axis =-1)(conv6)
conv6 = keras.layers.Activation('relu')(conv6)
conv6 = keras.layers.Conv3D(name="conv6b", filters=128, **params)(conv6)
conv6 = keras.layers.BatchNormalization(axis =-1)(conv6)
conv6 = keras.layers.Activation('relu')(conv6)
if use_upsampling:
up6 = keras.layers.concatenate([keras.layers.UpSampling3D(name="up6", size=(2, 2, 2))(conv6), conv1], axis=concat_axis)
else:
up6 = keras.layers.concatenate([keras.layers.Conv3DTranspose(name="transConv6", filters=128, data_format=data_format,
kernel_size=(2, 2, 2), strides=(2, 2, 2), padding="same")(conv6), conv1], axis=concat_axis)
conv7 = keras.layers.Conv3D(name="conv7a", filters=64, **params)(up6)
conv7 = keras.layers.BatchNormalization(axis =-1)(conv7)
conv7 = keras.layers.Activation('relu')(conv7)
conv7 = keras.layers.Conv3D(name="conv7b", filters=32, **params)(conv7)
conv7 = keras.layers.BatchNormalization(axis =-1)(conv7)
conv7 = keras.layers.Activation('relu')(conv7)
pred_msk = keras.layers.Conv3D(name="PredictionMask", filters=n_out, kernel_size=(1, 1, 1),
data_format=data_format, activation="sigmoid")(conv7)
#Branch is created from conv7 which are feature maps
#But global avg pooling on feature maps is not helping and hence changing back to pred_msk
class_pred = keras.layers.GlobalAveragePooling3D(name='PredictionClass')(pred_msk)
model = keras.models.Model(inputs=[inputs], outputs=[pred_msk,class_pred])
if print_summary:
#model = keras.models.Model(inputs=[inputs], outputs=[class_pred])
model.summary()
# return pred
return model
def unet3D_ModelB_exp1(input_img, use_upsampling=True, n_out=1, dropout=0.2,
print_summary = False):
"""
3D U-Net model
"""
print("3D U-Net Segmentation")
# Set keras learning phase to train
keras.backend.set_learning_phase(True)
# Don"t initialize variables on the fly
keras.backend.manual_variable_initialization(False)
inputs = keras.layers.Input(shape=input_img, name="Input_Image")
# Use below if wanted to use batch normalization and Relu activation separately
params = dict(kernel_size=(3, 3, 3), activation=None,
padding="same", data_format=data_format,
kernel_initializer="he_uniform")
# params = dict(kernel_size=(3, 3, 3), activation="relu",
# padding="same", data_format=data_format,
# kernel_initializer="he_uniform")
conv1 = keras.layers.Conv3D(name="conv1a", filters=32, **params)(inputs)
conv1 = keras.layers.BatchNormalization(axis =-1)(conv1)
conv1 = keras.layers.Activation('relu')(conv1)
conv1 = keras.layers.Conv3D(name="conv1b", filters=64, **params)(conv1)
conv1 = keras.layers.BatchNormalization(axis =-1)(conv1)
conv1 = keras.layers.Activation('relu')(conv1)
pool1 = keras.layers.MaxPooling3D(name="pool1", pool_size=(2, 2, 2))(conv1)
conv2 = keras.layers.Conv3D(name="conv2a", filters=64, **params)(pool1)
conv2 = keras.layers.BatchNormalization(axis =-1)(conv2)
conv2 = keras.layers.Activation('relu')(conv2)
conv2 = keras.layers.Conv3D(name="conv2b", filters=128, **params)(conv2)
conv2 = keras.layers.BatchNormalization(axis =-1)(conv2)
conv2 = keras.layers.Activation('relu')(conv2)
pool2 = keras.layers.MaxPooling3D(name="pool2", pool_size=(2, 2, 2))(conv2)
conv3 = keras.layers.Conv3D(name="conv3a", filters=128, **params)(pool2)
conv3 = keras.layers.BatchNormalization(axis =-1)(conv3)
conv3 = keras.layers.Activation('relu')(conv3)
conv3 = keras.layers.Dropout(dropout)(conv3) ### Trying dropout layers earlier on, as indicated in the paper
conv3 = keras.layers.Conv3D(name="conv3b", filters=256, **params)(conv3)
conv3 = keras.layers.BatchNormalization(axis =-1)(conv3)
conv3 = keras.layers.Activation('relu')(conv3)
pool3 = keras.layers.MaxPooling3D(name="pool3", pool_size=(2, 2, 2))(conv3)
conv4 = keras.layers.Conv3D(name="conv4a", filters=256, **params)(pool3)
conv4 = keras.layers.BatchNormalization(axis =-1)(conv4)
conv4 = keras.layers.Activation('relu')(conv4)
conv4 = keras.layers.Dropout(dropout)(conv4) ### Trying dropout layers earlier on, as indicated in the paper
conv4 = keras.layers.Conv3D(name="conv4b", filters=512, **params)(conv4)
conv4 = keras.layers.BatchNormalization(axis =-1)(conv4)
conv4 = keras.layers.Activation('relu')(conv4)
if use_upsampling:
up4 = keras.layers.concatenate([keras.layers.UpSampling3D(name="up4", size=(2, 2, 2))(conv4), conv3], axis=concat_axis)
else:
up4 = keras.layers.concatenate([keras.layers.Conv3DTranspose(name="transConv4", filters=512, data_format=data_format,
kernel_size=(2, 2, 2), strides=(2, 2, 2), padding="same")(conv4), conv3], axis=concat_axis)
conv5 = keras.layers.Conv3D(name="conv5a", filters=256, **params)(up4)
conv5 = keras.layers.BatchNormalization(axis =-1)(conv5)
conv5 = keras.layers.Activation('relu')(conv5)
conv5 = keras.layers.Conv3D(name="conv5b", filters=256, **params)(conv5)
conv5 = keras.layers.BatchNormalization(axis =-1)(conv5)
conv5 = keras.layers.Activation('relu')(conv5)
if use_upsampling:
up5 = keras.layers.concatenate([keras.layers.UpSampling3D(name="up5", size=(2, 2, 2))(conv5), conv2], axis=concat_axis)
else:
up5 = keras.layers.concatenate([keras.layers.Conv3DTranspose(name="transConv5", filters=256, data_format=data_format,
kernel_size=(2, 2, 2), strides=(2, 2, 2), padding="same")(conv5), conv2], axis=concat_axis)
conv6 = keras.layers.Conv3D(name="conv6a", filters=128, **params)(up5)
conv6 = keras.layers.BatchNormalization(axis =-1)(conv6)
conv6 = keras.layers.Activation('relu')(conv6)
conv6 = keras.layers.Conv3D(name="conv6b", filters=128, **params)(conv6)
conv6 = keras.layers.BatchNormalization(axis =-1)(conv6)
conv6 = keras.layers.Activation('relu')(conv6)
if use_upsampling:
up6 = keras.layers.concatenate([keras.layers.UpSampling3D(name="up6", size=(2, 2, 2))(conv6), conv1], axis=concat_axis)
else:
up6 = keras.layers.concatenate([keras.layers.Conv3DTranspose(name="transConv6", filters=128, data_format=data_format,
kernel_size=(2, 2, 2), strides=(2, 2, 2), padding="same")(conv6), conv1], axis=concat_axis)
conv7 = keras.layers.Conv3D(name="conv7a", filters=64, **params)(up6)
conv7 = keras.layers.BatchNormalization(axis =-1)(conv7)
conv7 = keras.layers.Activation('relu')(conv7)
conv7 = keras.layers.Conv3D(name="conv7b", filters=32, **params)(conv7)
conv7 = keras.layers.BatchNormalization(axis =-1)(conv7)
conv7 = keras.layers.Activation('relu')(conv7)
pred_msk = keras.layers.Conv3D(name="PredictionMask", filters=n_out, kernel_size=(1, 1, 1),
data_format=data_format, activation="sigmoid")(conv7)
#Branch is created from conv7 which are feature maps
#But global avg pooling on feature maps is not helping and hence changing back to pred_msk
# class_pred = keras.layers.GlobalAveragePooling3D(name='PredictionClass')(pred_msk)
model = keras.models.Model(inputs=[inputs], outputs=[pred_msk])
# model = keras.models.Model(inputs=[inputs], outputs=[pred_msk,class_pred])
if print_summary:
#model = keras.models.Model(inputs=[inputs], outputs=[class_pred])
model.summary()
# return pred
return model
def unet3D_sizeagnostic_Model16(use_upsampling=True, n_out=1, dropout=0.2,
print_summary = False):
"""
3D U-Net model
"""
print("3D U-Net Segmentation")
# Set keras learning phase to train
keras.backend.set_learning_phase(True)
# Don"t initialize variables on the fly
keras.backend.manual_variable_initialization(False)
input_img = (None, None, None,1)
inputs = keras.layers.Input(shape=input_img, name="Input_Image")
# Use below if wanted to use batch normalization and Relu activation separately
params = dict(kernel_size=(3, 3, 3), activation=None,
padding="same", data_format=data_format,
kernel_initializer="he_uniform")
# params = dict(kernel_size=(3, 3, 3), activation="relu",
# padding="same", data_format=data_format,
# kernel_initializer="he_uniform")
conv1 = keras.layers.Conv3D(name="conv1a", filters=32, **params)(inputs)
conv1 = keras.layers.BatchNormalization(axis =-1)(conv1)
conv1 = keras.layers.Activation('relu')(conv1)
conv1 = keras.layers.Conv3D(name="conv1b", filters=64, **params)(conv1)
conv1 = keras.layers.BatchNormalization(axis =-1)(conv1)
conv1 = keras.layers.Activation('relu')(conv1)
pool1 = keras.layers.MaxPooling3D(name="pool1", pool_size=(2, 2, 2))(conv1)
conv2 = keras.layers.Conv3D(name="conv2a", filters=64, **params)(pool1)
conv2 = keras.layers.BatchNormalization(axis =-1)(conv2)
conv2 = keras.layers.Activation('relu')(conv2)
conv2 = keras.layers.Conv3D(name="conv2b", filters=128, **params)(conv2)
conv2 = keras.layers.BatchNormalization(axis =-1)(conv2)
conv2 = keras.layers.Activation('relu')(conv2)
pool2 = keras.layers.MaxPooling3D(name="pool2", pool_size=(2, 2, 2))(conv2)
conv3 = keras.layers.Conv3D(name="conv3a", filters=128, **params)(pool2)
conv3 = keras.layers.BatchNormalization(axis =-1)(conv3)
conv3 = keras.layers.Activation('relu')(conv3)
conv3 = keras.layers.Dropout(dropout)(conv3) ### Trying dropout layers earlier on, as indicated in the paper
conv3 = keras.layers.Conv3D(name="conv3b", filters=256, **params)(conv3)
conv3 = keras.layers.BatchNormalization(axis =-1)(conv3)
conv3 = keras.layers.Activation('relu')(conv3)
pool3 = keras.layers.MaxPooling3D(name="pool3", pool_size=(2, 2, 2))(conv3)
conv4 = keras.layers.Conv3D(name="conv4a", filters=256, **params)(pool3)
conv4 = keras.layers.BatchNormalization(axis =-1)(conv4)
conv4 = keras.layers.Activation('relu')(conv4)
conv4 = keras.layers.Dropout(dropout)(conv4) ### Trying dropout layers earlier on, as indicated in the paper
conv4 = keras.layers.Conv3D(name="conv4b", filters=512, **params)(conv4)
conv4 = keras.layers.BatchNormalization(axis =-1)(conv4)
conv4 = keras.layers.Activation('relu')(conv4)
if use_upsampling:
up4 = keras.layers.concatenate([keras.layers.UpSampling3D(name="up4", size=(2, 2, 2))(conv4), conv3], axis=concat_axis)
else:
up4 = keras.layers.concatenate([keras.layers.Conv3DTranspose(name="transConv4", filters=512, data_format=data_format,
kernel_size=(2, 2, 2), strides=(2, 2, 2), padding="same")(conv4), conv3], axis=concat_axis)
conv5 = keras.layers.Conv3D(name="conv5a", filters=256, **params)(up4)
conv5 = keras.layers.BatchNormalization(axis =-1)(conv5)
conv5 = keras.layers.Activation('relu')(conv5)
conv5 = keras.layers.Conv3D(name="conv5b", filters=256, **params)(conv5)
conv5 = keras.layers.BatchNormalization(axis =-1)(conv5)
conv5 = keras.layers.Activation('relu')(conv5)
if use_upsampling:
up5 = keras.layers.concatenate([keras.layers.UpSampling3D(name="up5", size=(2, 2, 2))(conv5), conv2], axis=concat_axis)
else:
up5 = keras.layers.concatenate([keras.layers.Conv3DTranspose(name="transConv5", filters=256, data_format=data_format,
kernel_size=(2, 2, 2), strides=(2, 2, 2), padding="same")(conv5), conv2], axis=concat_axis)
conv6 = keras.layers.Conv3D(name="conv6a", filters=128, **params)(up5)
conv6 = keras.layers.BatchNormalization(axis =-1)(conv6)
conv6 = keras.layers.Activation('relu')(conv6)
conv6 = keras.layers.Conv3D(name="conv6b", filters=128, **params)(conv6)
conv6 = keras.layers.BatchNormalization(axis =-1)(conv6)
conv6 = keras.layers.Activation('relu')(conv6)
if use_upsampling:
up6 = keras.layers.concatenate([keras.layers.UpSampling3D(name="up6", | |
<gh_stars>0
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.000177602,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202828,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0020468,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.172292,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.298347,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.17111,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.641749,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.169989,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.19799,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.000386684,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00624571,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0451867,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0461909,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0455733,
'Execution Unit/Register Files/Runtime Dynamic': 0.0524366,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.109238,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.286646,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 1.58904,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00192353,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00192353,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.0016745,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000647738,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000663535,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00618509,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0184745,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0444045,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.8245,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.173462,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.150817,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.1822,
'Instruction Fetch Unit/Runtime Dynamic': 0.393343,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0704156,
'L2/Runtime Dynamic': 0.0201378,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.81578,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.78804,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0510735,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0510736,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.05795,
'Load Store Unit/Runtime Dynamic': 1.09099,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.125939,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.251878,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.044696,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.045715,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.175617,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.028551,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.411387,
'Memory Management Unit/Runtime Dynamic': 0.0742661,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 18.4816,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.00134852,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.00882627,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0891093,
'Renaming Unit/Int Front End RAT/Subthreshold | |
<reponame>OpenDaisy/daisy-client
# Copyright 2013 OpenStack Foundation
# Copyright (C) 2013 Yahoo! Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
import os
import tempfile
import testtools
from glanceclient.common import utils
from glanceclient.v2 import shell as test_shell
class ShellV2Test(testtools.TestCase):
def setUp(self):
super(ShellV2Test, self).setUp()
self._mock_utils()
self.gc = self._mock_glance_client()
def _make_args(self, args):
#NOTE(venkatesh): this conversion from a dict to an object
# is required because the test_shell.do_xxx(gc, args) methods
# expects the args to be attributes of an object. If passed as
# dict directly, it throws an AttributeError.
class Args():
def __init__(self, entries):
self.__dict__.update(entries)
return Args(args)
def _mock_glance_client(self):
my_mocked_gc = mock.Mock()
my_mocked_gc.schemas.return_value = 'test'
my_mocked_gc.get.return_value = {}
return my_mocked_gc
def _mock_utils(self):
utils.print_list = mock.Mock()
utils.print_dict = mock.Mock()
utils.save_image = mock.Mock()
def assert_exits_with_msg(self, func, func_args, err_msg):
with mock.patch.object(utils, 'exit') as mocked_utils_exit:
mocked_utils_exit.return_value = '%s' % err_msg
func(self.gc, func_args)
mocked_utils_exit.assert_called_once_with(err_msg)
def test_do_image_list(self):
input = {
'limit': None,
'page_size': 18,
'visibility': True,
'member_status': 'Fake',
'owner': 'test',
'checksum': 'fake_checksum',
'tag': 'fake tag',
'properties': [],
'sort_key': ['name', 'id'],
'sort_dir': ['desc', 'asc'],
'sort': None
}
args = self._make_args(input)
with mock.patch.object(self.gc.images, 'list') as mocked_list:
mocked_list.return_value = {}
test_shell.do_image_list(self.gc, args)
exp_img_filters = {
'owner': 'test',
'member_status': 'Fake',
'visibility': True,
'checksum': 'fake_checksum',
'tag': 'fake tag'
}
mocked_list.assert_called_once_with(page_size=18,
sort_key=['name', 'id'],
sort_dir=['desc', 'asc'],
filters=exp_img_filters)
utils.print_list.assert_called_once_with({}, ['ID', 'Name'])
def test_do_image_list_with_single_sort_key(self):
input = {
'limit': None,
'page_size': 18,
'visibility': True,
'member_status': 'Fake',
'owner': 'test',
'checksum': 'fake_checksum',
'tag': 'fake tag',
'properties': [],
'sort_key': ['name'],
'sort_dir': ['desc'],
'sort': None
}
args = self._make_args(input)
with mock.patch.object(self.gc.images, 'list') as mocked_list:
mocked_list.return_value = {}
test_shell.do_image_list(self.gc, args)
exp_img_filters = {
'owner': 'test',
'member_status': 'Fake',
'visibility': True,
'checksum': 'fake_checksum',
'tag': 'fake tag'
}
mocked_list.assert_called_once_with(page_size=18,
sort_key=['name'],
sort_dir=['desc'],
filters=exp_img_filters)
utils.print_list.assert_called_once_with({}, ['ID', 'Name'])
def test_do_image_list_new_sorting_syntax(self):
input = {
'limit': None,
'page_size': 18,
'visibility': True,
'member_status': 'Fake',
'owner': 'test',
'checksum': 'fake_checksum',
'tag': 'fake tag',
'properties': [],
'sort': 'name:desc,size:asc',
'sort_key': [],
'sort_dir': []
}
args = self._make_args(input)
with mock.patch.object(self.gc.images, 'list') as mocked_list:
mocked_list.return_value = {}
test_shell.do_image_list(self.gc, args)
exp_img_filters = {
'owner': 'test',
'member_status': 'Fake',
'visibility': True,
'checksum': 'fake_checksum',
'tag': 'fake tag'
}
mocked_list.assert_called_once_with(
page_size=18,
sort='name:desc,size:asc',
filters=exp_img_filters)
utils.print_list.assert_called_once_with({}, ['ID', 'Name'])
def test_do_image_list_with_property_filter(self):
input = {
'limit': None,
'page_size': 1,
'visibility': True,
'member_status': 'Fake',
'owner': 'test',
'checksum': 'fake_checksum',
'tag': 'fake tag',
'properties': ['os_distro=NixOS', 'architecture=x86_64'],
'sort_key': ['name'],
'sort_dir': ['desc'],
'sort': None
}
args = self._make_args(input)
with mock.patch.object(self.gc.images, 'list') as mocked_list:
mocked_list.return_value = {}
test_shell.do_image_list(self.gc, args)
exp_img_filters = {
'owner': 'test',
'member_status': 'Fake',
'visibility': True,
'checksum': 'fake_checksum',
'tag': 'fake tag',
'os_distro': 'NixOS',
'architecture': 'x86_64'
}
mocked_list.assert_called_once_with(page_size=1,
sort_key=['name'],
sort_dir=['desc'],
filters=exp_img_filters)
utils.print_list.assert_called_once_with({}, ['ID', 'Name'])
def test_do_image_show(self):
args = self._make_args({'id': 'pass', 'page_size': 18,
'max_column_width': 120})
with mock.patch.object(self.gc.images, 'get') as mocked_list:
ignore_fields = ['self', 'access', 'file', 'schema']
expect_image = dict([(field, field) for field in ignore_fields])
expect_image['id'] = 'pass'
mocked_list.return_value = expect_image
test_shell.do_image_show(self.gc, args)
mocked_list.assert_called_once_with('pass')
utils.print_dict.assert_called_once_with({'id': 'pass'},
max_column_width=120)
@mock.patch('sys.stdin', autospec=True)
def test_do_image_create_no_user_props(self, mock_stdin):
args = self._make_args({'name': 'IMG-01', 'disk_format': 'vhd',
'container_format': 'bare',
'file': None})
with mock.patch.object(self.gc.images, 'create') as mocked_create:
ignore_fields = ['self', 'access', 'file', 'schema']
expect_image = dict([(field, field) for field in ignore_fields])
expect_image['id'] = 'pass'
expect_image['name'] = 'IMG-01'
expect_image['disk_format'] = 'vhd'
expect_image['container_format'] = 'bare'
mocked_create.return_value = expect_image
# Ensure that the test stdin is not considered
# to be supplying image data
mock_stdin.isatty = lambda: True
test_shell.do_image_create(self.gc, args)
mocked_create.assert_called_once_with(name='IMG-01',
disk_format='vhd',
container_format='bare')
utils.print_dict.assert_called_once_with({
'id': 'pass', 'name': 'IMG-01', 'disk_format': 'vhd',
'container_format': 'bare'})
def test_do_image_create_with_file(self):
try:
file_name = None
with open(tempfile.mktemp(), 'w+') as f:
f.write('Some data here')
f.flush()
f.seek(0)
file_name = f.name
temp_args = {'name': 'IMG-01',
'disk_format': 'vhd',
'container_format': 'bare',
'file': file_name,
'progress': False}
args = self._make_args(temp_args)
with mock.patch.object(self.gc.images, 'create') as mocked_create:
with mock.patch.object(self.gc.images, 'get') as mocked_get:
ignore_fields = ['self', 'access', 'schema']
expect_image = dict([(field, field) for field in
ignore_fields])
expect_image['id'] = 'pass'
expect_image['name'] = 'IMG-01'
expect_image['disk_format'] = 'vhd'
expect_image['container_format'] = 'bare'
mocked_create.return_value = expect_image
mocked_get.return_value = expect_image
test_shell.do_image_create(self.gc, args)
temp_args.pop('file', None)
mocked_create.assert_called_once_with(**temp_args)
mocked_get.assert_called_once_with('pass')
utils.print_dict.assert_called_once_with({
'id': 'pass', 'name': 'IMG-01', 'disk_format': 'vhd',
'container_format': 'bare'})
finally:
try:
os.remove(f.name)
except Exception:
pass
@mock.patch('sys.stdin', autospec=True)
def test_do_image_create_with_user_props(self, mock_stdin):
args = self._make_args({'name': 'IMG-01',
'property': ['myprop=myval'],
'file': None})
with mock.patch.object(self.gc.images, 'create') as mocked_create:
ignore_fields = ['self', 'access', 'file', 'schema']
expect_image = dict([(field, field) for field in ignore_fields])
expect_image['id'] = 'pass'
expect_image['name'] = 'IMG-01'
expect_image['myprop'] = 'myval'
mocked_create.return_value = expect_image
# Ensure that the test stdin is not considered
# to be supplying image data
mock_stdin.isatty = lambda: True
test_shell.do_image_create(self.gc, args)
mocked_create.assert_called_once_with(name='IMG-01',
myprop='myval')
utils.print_dict.assert_called_once_with({
'id': 'pass', 'name': 'IMG-01', 'myprop': 'myval'})
def test_do_image_update_no_user_props(self):
args = self._make_args({'id': 'pass', 'name': 'IMG-01',
'disk_format': 'vhd',
'container_format': 'bare'})
with mock.patch.object(self.gc.images, 'update') as mocked_update:
ignore_fields = ['self', 'access', 'file', 'schema']
expect_image = dict([(field, field) for field in ignore_fields])
expect_image['id'] = 'pass'
expect_image['name'] = 'IMG-01'
expect_image['disk_format'] = 'vhd'
expect_image['container_format'] = 'bare'
mocked_update.return_value = expect_image
test_shell.do_image_update(self.gc, args)
mocked_update.assert_called_once_with('pass',
None,
name='IMG-01',
disk_format='vhd',
container_format='bare')
utils.print_dict.assert_called_once_with({
'id': 'pass', 'name': 'IMG-01', 'disk_format': 'vhd',
'container_format': 'bare'})
def test_do_image_update_with_user_props(self):
args = self._make_args({'id': 'pass', 'name': 'IMG-01',
'property': ['myprop=myval']})
with mock.patch.object(self.gc.images, 'update') as mocked_update:
ignore_fields = ['self', 'access', 'file', 'schema']
expect_image = dict([(field, field) for field in ignore_fields])
expect_image['id'] = 'pass'
expect_image['name'] = 'IMG-01'
expect_image['myprop'] = 'myval'
mocked_update.return_value = expect_image
test_shell.do_image_update(self.gc, args)
mocked_update.assert_called_once_with('pass',
None,
name='IMG-01',
myprop='myval')
utils.print_dict.assert_called_once_with({
'id': 'pass', 'name': 'IMG-01', 'myprop': 'myval'})
def test_do_image_update_with_remove_props(self):
args = self._make_args({'id': 'pass', 'name': 'IMG-01',
'disk_format': 'vhd',
'remove-property': ['container_format']})
with mock.patch.object(self.gc.images, 'update') as mocked_update:
ignore_fields = ['self', 'access', 'file', 'schema']
expect_image = dict([(field, field) for field in ignore_fields])
expect_image['id'] = 'pass'
expect_image['name'] = 'IMG-01'
expect_image['disk_format'] = 'vhd'
mocked_update.return_value = expect_image
test_shell.do_image_update(self.gc, args)
mocked_update.assert_called_once_with('pass',
['container_format'],
name='IMG-01',
disk_format='vhd')
utils.print_dict.assert_called_once_with({
'id': 'pass', 'name': 'IMG-01', 'disk_format': 'vhd'})
def test_do_explain(self):
input = {
'page_size': 18,
'id': 'pass',
'schemas': 'test',
'model': 'test',
}
args = self._make_args(input)
with mock.patch.object(utils, 'print_list'):
test_shell.do_explain(self.gc, args)
self.gc.schemas.get.assert_called_once_with('test')
def test_do_location_add(self):
gc = self.gc
loc = {'url': 'http://foo.com/', 'metadata': {'foo': 'bar'}}
args = self._make_args({'id': 'pass',
'url': loc['url'],
'metadata': json.dumps(loc['metadata'])})
with mock.patch.object(gc.images, 'add_location') as mocked_addloc:
expect_image = {'id': 'pass', 'locations': [loc]}
mocked_addloc.return_value = expect_image
test_shell.do_location_add(self.gc, args)
mocked_addloc.assert_called_once_with('pass',
loc['url'],
loc['metadata'])
utils.print_dict.assert_called_once_with(expect_image)
def test_do_location_delete(self):
gc = self.gc
loc_set = set(['http://foo/bar', 'http://spam/ham'])
args = self._make_args({'id': 'pass', 'url': loc_set})
with mock.patch.object(gc.images, 'delete_locations') as mocked_rmloc:
test_shell.do_location_delete(self.gc, args)
mocked_rmloc.assert_called_once_with('pass', loc_set)
def test_do_location_update(self):
gc = self.gc
loc = {'url': 'http://foo.com/', 'metadata': {'foo': 'bar'}}
args = self._make_args({'id': 'pass',
'url': loc['url'],
'metadata': json.dumps(loc['metadata'])})
with mock.patch.object(gc.images, 'update_location') as mocked_modloc:
expect_image = {'id': 'pass', 'locations': [loc]}
mocked_modloc.return_value = expect_image
test_shell.do_location_update(self.gc, args)
mocked_modloc.assert_called_once_with('pass',
loc['url'],
loc['metadata'])
utils.print_dict.assert_called_once_with(expect_image)
def test_image_upload(self):
args = self._make_args(
{'id': 'IMG-01', 'file': 'test', 'size': 1024, 'progress': False})
with mock.patch.object(self.gc.images, 'upload') as mocked_upload:
utils.get_data_file = mock.Mock(return_value='testfile')
mocked_upload.return_value = None
test_shell.do_image_upload(self.gc, args)
mocked_upload.assert_called_once_with('IMG-01', 'testfile', 1024)
def test_image_download(self):
args = self._make_args(
{'id': 'IMG-01', 'file': 'test', 'progress': True})
with mock.patch.object(self.gc.images, 'data') as mocked_data:
def _data():
for c in 'abcedf':
yield c
mocked_data.return_value = utils.IterableWithLength(_data(), 5)
test_shell.do_image_download(self.gc, args)
mocked_data.assert_called_once_with('IMG-01')
def test_do_image_delete(self):
args = self._make_args({'id': 'pass', 'file': 'test'})
with mock.patch.object(self.gc.images, 'delete') as mocked_delete:
mocked_delete.return_value = 0
test_shell.do_image_delete(self.gc, args)
mocked_delete.assert_called_once_with('pass')
def test_do_image_delete_deleted(self):
image_id = 'deleted-img'
args = self._make_args({'id': image_id})
with mock.patch.object(self.gc.images, 'get') as mocked_get:
mocked_get.return_value = self._make_args({'id': image_id,
'status': 'deleted'})
msg = "No image with an ID of '%s' exists." % image_id
self.assert_exits_with_msg(func=test_shell.do_image_delete,
func_args=args,
err_msg=msg)
def test_do_member_list(self):
args = self._make_args({'image_id': 'IMG-01'})
with mock.patch.object(self.gc.image_members, 'list') as mocked_list:
mocked_list.return_value = {}
test_shell.do_member_list(self.gc, args)
mocked_list.assert_called_once_with('IMG-01')
columns = ['Image ID', 'Member ID', 'Status']
utils.print_list.assert_called_once_with({}, columns)
def test_do_member_create(self):
args = self._make_args({'image_id': 'IMG-01', 'member_id': 'MEM-01'})
with mock.patch.object(self.gc.image_members, 'create') as mock_create:
mock_create.return_value = {}
test_shell.do_member_create(self.gc, args)
mock_create.assert_called_once_with('IMG-01', 'MEM-01')
columns = ['Image ID', 'Member ID', 'Status']
utils.print_list.assert_called_once_with([{}], columns)
def test_do_member_create_with_few_arguments(self):
args = self._make_args({'image_id': None, 'member_id': 'MEM-01'})
msg = 'Unable to create member. Specify image_id and member_id'
self.assert_exits_with_msg(func=test_shell.do_member_create,
func_args=args,
err_msg=msg)
def test_do_member_update(self):
input = {
'image_id': 'IMG-01',
'member_id': 'MEM-01',
'member_status': 'status',
}
args = self._make_args(input)
with mock.patch.object(self.gc.image_members, 'update') as mock_update:
mock_update.return_value = {}
test_shell.do_member_update(self.gc, args)
mock_update.assert_called_once_with('IMG-01', 'MEM-01', 'status')
columns = | |
<reponame>LasseWolter/lhotse
import pickle
from typing import Callable, Dict, Optional, Sequence, Union
from tqdm.auto import tqdm
from lhotse import CutSet
from lhotse.serialization import LazyIteratorChain
from lhotse.utils import Pathlike, is_module_available
def export_to_webdataset(
cuts: CutSet,
output_path: Pathlike,
shard_size: Optional[int] = None,
verbose: bool = True,
audio_format: str = "flac",
load_audio: bool = True,
load_features: bool = True,
load_custom: bool = True,
) -> int:
"""
Saves the CutSet metadata along with audio/features data into a WebDataset archive.
The audio and feature data is read, decoded, and encoded into ``audio_format`` for audio,
lilcom for features and arrays with floating point type, and pickle for all other dtypes.
The intended use of this function is to speed up the I/O in training data pipelines by
converting random access reads to sequential access reads.
Supported values for ``audio_format`` are the same as for the ``format`` argument in
``torchaudio.save`` function with ``sox_io`` backend.
If ``shard_size`` is specified, we will leverage WebDataset's ``ShardWriter`` to
create multiple tarballs with ``shard_size`` items per shard. In that mode, we expect
that ``output_path`` contains a pattern like "/path/to/shard-%06d.tar", which will
be internally expanded with the shard index.
Returns number of written shards if sharding is enabled, otherwise 0.
**Examples**
Export cuts with audio, features, and all custom data to a single tarball,
converting audio to FLACs::
>>> cuts = CutSet.from_jsonl_lazy("data/cuts-train.jsonl")
>>> n_shards = export_to_webdataset(
... cuts=cuts,
... output_path="data/cuts-train.tar",
... audio_format="flac",
... )
Export cuts with audio, features, and all custom data to a directory with shards
counting 10000 cuts each, converting audio to SPHERE (sph)::
>>> cuts = CutSet.from_jsonl_lazy("data/cuts-train.jsonl")
>>> n_shards = export_to_webdataset(
... cuts=cuts,
... output_path="data/cuts-train-wds/shard-%06d.tar",
... shard_size=10000,
... audio_format="sph",
... )
The same, but export cuts with only the features being read into memory
(recording and custom data still refers to external storage)::
>>> cuts = CutSet.from_jsonl_lazy("data/cuts-train.jsonl")
>>> n_shards = export_to_webdataset(
... cuts=cuts,
... output_path="data/cuts-train-wds/shard-%06d.tar",
... shard_size=10000,
... load_audio=False,
... load_custom=False,
... )
Export cuts to sharded tarballs stored in the cloud
(in this example AWS S3, using AWS CLI)::
>>> cuts = CutSet.from_jsonl_lazy("data/cuts-train.jsonl")
>>> n_shards = export_to_webdataset(
... cuts=cuts,
... output_path="pipe:aws s3 cp - s3://my-bucket/data/shard-%06d.tar",
... shard_size=10000,
... )
"""
if not is_module_available("webdataset"):
raise ImportError("Please 'pip install webdataset' first.")
from webdataset import TarWriter
if shard_size is not None:
assert shard_size > 0
# Note: this ShardWriter is not from webdataset, but defined below in this file.
sink = ShardWriter(output_path, maxcount=shard_size)
else:
sink = TarWriter(output_path)
num_shards_written = 0
with sink:
for idx, cut in tqdm(
enumerate(cuts), desc="Creating WebDataset tarball(s)", disable=not verbose
):
cut = cut.move_to_memory(
audio_format=audio_format,
load_audio=load_audio,
load_features=load_features,
load_custom=load_custom,
)
data = pickle.dumps(cut.to_dict())
sink.write({"__key__": cut.id, "data": data})
if isinstance(sink, ShardWriter):
num_shards_written = sink.shard
return num_shards_written
class LazyWebdatasetIterator:
"""
LazyWebdatasetIterator provides the ability to read Lhotse objects from a
WebDataset tarball on-the-fly, without reading its full contents into memory.
This class is designed to be a partial "drop-in" replacement for ordinary dicts
to support lazy loading of RecordingSet, SupervisionSet and CutSet.
Since it does not support random access reads, some methods of these classes
might not work properly.
The behaviour of the underlying ``WebDataset`` instance can be customized by
providing its kwargs directly to the constructor of this class.
"""
def __init__(
self, source: Union[Pathlike, Sequence[Pathlike]], **wds_kwargs
) -> None:
if not is_module_available("webdataset"):
raise ImportError("Please 'pip install webdataset' first.")
self.source = source
self.wds_kwargs = wds_kwargs
def set_epoch(self, epoch: int) -> None:
self.wds_kwargs["epoch"] = epoch
def _reset(self) -> None:
if not is_module_available("webdataset"):
raise ImportError("Please 'pip install webdataset' first.")
self._ds = mini_webdataset(self.source, **self.wds_kwargs)
self._ds_iter = iter(self._ds)
def __getstate__(self) -> dict:
"""
Store the state for pickling -- we'll only store the path + kwargs, and re-initialize
this iterator when unpickled. This is necessary to transfer this object across processes
for PyTorch's DataLoader workers.
"""
state = {"source": self.source, "wds_kwargs": self.wds_kwargs}
return state
def __setstate__(self, state: Dict) -> None:
"""Restore the state when unpickled."""
self.__dict__.update(state)
def __iter__(self) -> "LazyWebdatasetIterator":
self._reset()
return self
def __next__(self):
from lhotse.serialization import deserialize_item
data_dict = next(self._ds_iter)
data = pickle.loads(data_dict["data"])
item = deserialize_item(data)
return item
def values(self):
yield from self
def keys(self) -> str:
return (item.id for item in self)
def items(self):
return ((item.id, item) for item in self)
def __add__(self, other) -> LazyIteratorChain:
return LazyIteratorChain(self, other)
def mini_webdataset(
urls: Union[Pathlike, Sequence[Pathlike]],
epoch: int = 0,
repeat: bool = False,
shuffle_shards: bool = False,
shuffle: bool = False,
split_by_worker: bool = False,
split_by_node: bool = False,
shuffle_bufsize: int = 1000,
ignore_error_shards: bool = True,
):
"""
Return a pipeline for WebDataset-style data files.
This is a convenience function for constructing a partial pipeline
that reads from a set of sharded tar files, extracts the individual
files, and groups them together into samples (dictionaries).
You can use all the methods from `Composable` (`then`, `compose`) and
from `Shorthands` (`batched`, `unbatched`, `decode`, `shuffle`, etc.)
on the result.
.. note: This is a reduced version of ``webdataset.WebDataset`` function,
that only uses the functionalities relevant to Lhotse, and makes it
possible to disable the node/worker splitting.
:param urls: the source URLs: a string or a list.
:param epoch: epoch number (used only when shuffling is enabled).
:param repeat: repeat infinitely if True.
:param shuffle: shuffle the items if True (after shuffling the shards, if enabled).
Note: ``shuffle`` is seeded with PID and time, making it non-reproducible across processes.
:param shuffle_shards: shuffle the shards if True.
Only takes effect when ``urls`` is a list of shard paths/urls.
:param split_by_worker: if True, shards are split per DataLoader worker subprocesses,
otherwise each dataloader worker will yield the same data.
Only takes effect when ``urls`` is a list of shard paths/urls.
:param split_by_node: if True, shards are split per node in DDP training,
otherwise on each node we'll yield the same data.
Only takes effect when ``urls`` is a list of shard paths/urls.
:param shuffle_bufsize: Buffer size for the ``shuffle`` argument.
Larger bufsize means more memory usage but potentially improved randomness.
:param ignore_error_shards: when ``True``, we tell WebDataset to ignore shards that
failed during loading and emit a warning. When ``False``, we won't catch the exceptions.
"""
if not is_module_available("webdataset"):
raise ImportError("Please 'pip install webdataset' first.")
from webdataset import PytorchShardList, reraise_exception, warn_and_continue
from webdataset import tariterators
handler = warn_and_continue if ignore_error_shards else reraise_exception
result = PytorchShardList(
urls,
shuffle=shuffle_shards,
split_by_worker=split_by_worker,
split_by_node=split_by_node,
)
result.set_epoch(epoch)
result = result.then(tariterators.url_opener, handler=handler)
result = result.then(tariterators.tar_file_expander, handler=handler)
result = result.then(tariterators.group_by_keys, handler=handler)
if repeat:
result = result.repeat()
if shuffle:
result = result.shuffle(shuffle_bufsize)
return result
class ShardWriter:
"""
Like ``webdataset.TarWriter`` but splits into multiple shards.
Note: this implementation is copied from webdataset and adapted to
allow shard writing using the "pipe:" notation. E.g., this is possible::
>>> writer = ShardWriter("pipe:gzip -c > data/shard-%06d.tar.gz")
Source:
https://github.com/webdataset/webdataset/blob/ccfe88086cdb21a0dc23a6454ce3e3723b6b8033/webdataset/writer.py#L359
"""
def __init__(
self,
pattern: str,
maxcount: int = 100000,
maxsize: float = 3e9,
post: Optional[Callable] = None,
start_shard: int = 0,
**kw,
):
"""Create a ShardWriter.
:param pattern: output file pattern
:param maxcount: maximum number of records per shard (Default value = 100000)
:param maxsize: maximum size of each shard (Default value = 3e9)
:param kw: other options passed to TarWriter
"""
if not is_module_available("webdataset"):
raise ImportError("Please 'pip install webdataset' first.")
self.verbose = 1
self.kw = kw
self.maxcount = maxcount
self.maxsize = maxsize
self.post = post
self.tarstream = None
self.shard = start_shard
self.pattern = pattern
self.total = 0
self.count = 0
self.size = 0
self.fname = None
self.next_stream()
def next_stream(self):
"""Close the current stream and move to the next."""
from webdataset.writer import TarWriter
self.finish()
self.fname = self.pattern % self.shard
if self.verbose:
print(
"# writing",
self.fname,
self.count,
"%.1f GB" % (self.size / 1e9),
self.total,
)
self.shard += 1
self.tarstream = TarWriter(self.fname, **self.kw)
self.count = 0
self.size = 0
def write(self, obj):
"""Write a sample.
:param obj: sample to be written
"""
if (
self.tarstream is None
or self.count >= self.maxcount
or self.size >= self.maxsize
):
self.next_stream()
size = self.tarstream.write(obj)
self.count += 1
self.total += 1
self.size += size
def finish(self):
"""Finish all writing (use close | |
for the fleet.
:param pulumi.Input[str] fleet_type: Fleet type. Valid values are: `ON_DEMAND`, `ALWAYS_ON`
:param pulumi.Input[str] iam_role_arn: ARN of the IAM role to apply to the fleet.
:param pulumi.Input[int] idle_disconnect_timeout_in_seconds: Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnect_timeout_in_seconds` time interval begins.
:param pulumi.Input[str] image_arn: ARN of the public, private, or shared image to use.
:param pulumi.Input[str] image_name: Name of the image used to create the fleet.
:param pulumi.Input[str] instance_type: Instance type to use when launching fleet instances.
:param pulumi.Input[int] max_user_duration_in_seconds: Maximum amount of time that a streaming session can remain active, in seconds.
:param pulumi.Input[str] name: Unique name for the fleet.
:param pulumi.Input[str] state: State of the fleet. Can be `STARTING`, `RUNNING`, `STOPPING` or `STOPPED`
:param pulumi.Input[str] stream_view: AppStream 2.0 view that is displayed to your users when they stream from the fleet. When `APP` is specified, only the windows of applications opened by users display. When `DESKTOP` is specified, the standard desktop that is provided by the operating system displays.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to attach to AppStream instances.
:param pulumi.Input['FleetVpcConfigArgs'] vpc_config: Configuration block for the VPC configuration for the image builder. See below.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if compute_capacity is not None:
pulumi.set(__self__, "compute_capacity", compute_capacity)
if created_time is not None:
pulumi.set(__self__, "created_time", created_time)
if description is not None:
pulumi.set(__self__, "description", description)
if disconnect_timeout_in_seconds is not None:
pulumi.set(__self__, "disconnect_timeout_in_seconds", disconnect_timeout_in_seconds)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if domain_join_info is not None:
pulumi.set(__self__, "domain_join_info", domain_join_info)
if enable_default_internet_access is not None:
pulumi.set(__self__, "enable_default_internet_access", enable_default_internet_access)
if fleet_type is not None:
pulumi.set(__self__, "fleet_type", fleet_type)
if iam_role_arn is not None:
pulumi.set(__self__, "iam_role_arn", iam_role_arn)
if idle_disconnect_timeout_in_seconds is not None:
pulumi.set(__self__, "idle_disconnect_timeout_in_seconds", idle_disconnect_timeout_in_seconds)
if image_arn is not None:
pulumi.set(__self__, "image_arn", image_arn)
if image_name is not None:
pulumi.set(__self__, "image_name", image_name)
if instance_type is not None:
pulumi.set(__self__, "instance_type", instance_type)
if max_user_duration_in_seconds is not None:
pulumi.set(__self__, "max_user_duration_in_seconds", max_user_duration_in_seconds)
if name is not None:
pulumi.set(__self__, "name", name)
if state is not None:
pulumi.set(__self__, "state", state)
if stream_view is not None:
pulumi.set(__self__, "stream_view", stream_view)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if vpc_config is not None:
pulumi.set(__self__, "vpc_config", vpc_config)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the appstream fleet.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="computeCapacity")
def compute_capacity(self) -> Optional[pulumi.Input['FleetComputeCapacityArgs']]:
"""
Configuration block for the desired capacity of the fleet. See below.
"""
return pulumi.get(self, "compute_capacity")
@compute_capacity.setter
def compute_capacity(self, value: Optional[pulumi.Input['FleetComputeCapacityArgs']]):
pulumi.set(self, "compute_capacity", value)
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> Optional[pulumi.Input[str]]:
"""
Date and time, in UTC and extended RFC 3339 format, when the fleet was created.
"""
return pulumi.get(self, "created_time")
@created_time.setter
def created_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_time", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description to display.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="disconnectTimeoutInSeconds")
def disconnect_timeout_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Amount of time that a streaming session remains active after users disconnect.
"""
return pulumi.get(self, "disconnect_timeout_in_seconds")
@disconnect_timeout_in_seconds.setter
def disconnect_timeout_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "disconnect_timeout_in_seconds", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
Human-readable friendly name for the AppStream fleet.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="domainJoinInfo")
def domain_join_info(self) -> Optional[pulumi.Input['FleetDomainJoinInfoArgs']]:
"""
Configuration block for the name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. See below.
"""
return pulumi.get(self, "domain_join_info")
@domain_join_info.setter
def domain_join_info(self, value: Optional[pulumi.Input['FleetDomainJoinInfoArgs']]):
pulumi.set(self, "domain_join_info", value)
@property
@pulumi.getter(name="enableDefaultInternetAccess")
def enable_default_internet_access(self) -> Optional[pulumi.Input[bool]]:
"""
Enables or disables default internet access for the fleet.
"""
return pulumi.get(self, "enable_default_internet_access")
@enable_default_internet_access.setter
def enable_default_internet_access(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_default_internet_access", value)
@property
@pulumi.getter(name="fleetType")
def fleet_type(self) -> Optional[pulumi.Input[str]]:
"""
Fleet type. Valid values are: `ON_DEMAND`, `ALWAYS_ON`
"""
return pulumi.get(self, "fleet_type")
@fleet_type.setter
def fleet_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fleet_type", value)
@property
@pulumi.getter(name="iamRoleArn")
def iam_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the IAM role to apply to the fleet.
"""
return pulumi.get(self, "iam_role_arn")
@iam_role_arn.setter
def iam_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "iam_role_arn", value)
@property
@pulumi.getter(name="idleDisconnectTimeoutInSeconds")
def idle_disconnect_timeout_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnect_timeout_in_seconds` time interval begins.
"""
return pulumi.get(self, "idle_disconnect_timeout_in_seconds")
@idle_disconnect_timeout_in_seconds.setter
def idle_disconnect_timeout_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "idle_disconnect_timeout_in_seconds", value)
@property
@pulumi.getter(name="imageArn")
def image_arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the public, private, or shared image to use.
"""
return pulumi.get(self, "image_arn")
@image_arn.setter
def image_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_arn", value)
@property
@pulumi.getter(name="imageName")
def image_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the image used to create the fleet.
"""
return pulumi.get(self, "image_name")
@image_name.setter
def image_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_name", value)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> Optional[pulumi.Input[str]]:
"""
Instance type to use when launching fleet instances.
"""
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter(name="maxUserDurationInSeconds")
def max_user_duration_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Maximum amount of time that a streaming session can remain active, in seconds.
"""
return pulumi.get(self, "max_user_duration_in_seconds")
@max_user_duration_in_seconds.setter
def max_user_duration_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_user_duration_in_seconds", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name for the fleet.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
State of the fleet. Can be `STARTING`, `RUNNING`, `STOPPING` or `STOPPED`
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="streamView")
def stream_view(self) -> Optional[pulumi.Input[str]]:
"""
AppStream 2.0 view that is displayed to your users when they stream from the fleet. When `APP` is specified, only the windows of applications opened by users display. When `DESKTOP` is specified, the standard desktop that is provided by the operating system displays.
"""
return pulumi.get(self, "stream_view")
@stream_view.setter
def stream_view(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stream_view", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of tags to attach to AppStream instances.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter(name="vpcConfig")
def vpc_config(self) -> Optional[pulumi.Input['FleetVpcConfigArgs']]:
"""
Configuration block for the VPC configuration for the image builder. See below.
"""
return pulumi.get(self, "vpc_config")
@vpc_config.setter
def vpc_config(self, value: Optional[pulumi.Input['FleetVpcConfigArgs']]):
pulumi.set(self, "vpc_config", value)
class Fleet(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compute_capacity: Optional[pulumi.Input[pulumi.InputType['FleetComputeCapacityArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
display_name: Optional[pulumi.Input[str]] = None,
domain_join_info: Optional[pulumi.Input[pulumi.InputType['FleetDomainJoinInfoArgs']]] = None,
enable_default_internet_access: Optional[pulumi.Input[bool]] = None,
fleet_type: Optional[pulumi.Input[str]] = None,
iam_role_arn: Optional[pulumi.Input[str]] = None,
idle_disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
image_arn: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
max_user_duration_in_seconds: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
stream_view: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_config: Optional[pulumi.Input[pulumi.InputType['FleetVpcConfigArgs']]] = None,
__props__=None):
"""
Provides an AppStream fleet.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
test_fleet = aws.appstream.Fleet("testFleet",
compute_capacity=aws.appstream.FleetComputeCapacityArgs(
desired_instances=1,
),
description="test fleet",
display_name="test-fleet",
enable_default_internet_access=False,
fleet_type="ON_DEMAND",
idle_disconnect_timeout_in_seconds=60,
image_name="Amazon-AppStream2-Sample-Image-02-04-2019",
instance_type="stream.standard.large",
max_user_duration_in_seconds=600,
tags={
"TagName": "tag-value",
},
vpc_config=aws.appstream.FleetVpcConfigArgs(
subnet_ids=["subnet-06e9b13400c225127"],
))
```
## Import
`aws_appstream_fleet` can be imported using the id, e.g.,
```sh
$ pulumi import aws:appstream/fleet:Fleet example fleetNameExample
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['FleetComputeCapacityArgs']] compute_capacity: Configuration block for the desired capacity of the fleet. See below.
:param pulumi.Input[str] description: Description to display.
:param pulumi.Input[int] disconnect_timeout_in_seconds: Amount of time that a streaming session remains active after users disconnect.
:param pulumi.Input[str] display_name: Human-readable friendly name for the AppStream fleet.
:param pulumi.Input[pulumi.InputType['FleetDomainJoinInfoArgs']] domain_join_info: Configuration block for the name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. See below.
:param pulumi.Input[bool] enable_default_internet_access: Enables or disables default internet access for the fleet.
:param pulumi.Input[str] fleet_type: Fleet type. Valid values are: `ON_DEMAND`, `ALWAYS_ON`
:param pulumi.Input[str] iam_role_arn: ARN of the IAM role to apply to the fleet.
:param pulumi.Input[int] idle_disconnect_timeout_in_seconds: Amount | |
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.3192
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class Bond(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'start_date': 'datetime',
'maturity_date': 'datetime',
'dom_ccy': 'str',
'flow_conventions': 'FlowConventions',
'principal': 'float',
'coupon_rate': 'float',
'identifiers': 'dict(str, str)',
'instrument_type': 'str'
}
attribute_map = {
'start_date': 'startDate',
'maturity_date': 'maturityDate',
'dom_ccy': 'domCcy',
'flow_conventions': 'flowConventions',
'principal': 'principal',
'coupon_rate': 'couponRate',
'identifiers': 'identifiers',
'instrument_type': 'instrumentType'
}
required_map = {
'start_date': 'required',
'maturity_date': 'required',
'dom_ccy': 'required',
'flow_conventions': 'required',
'principal': 'required',
'coupon_rate': 'required',
'identifiers': 'optional',
'instrument_type': 'required'
}
def __init__(self, start_date=None, maturity_date=None, dom_ccy=None, flow_conventions=None, principal=None, coupon_rate=None, identifiers=None, instrument_type=None): # noqa: E501
"""
Bond - a model defined in OpenAPI
:param start_date: The start date of the instrument. This is normally synonymous with the trade-date. (required)
:type start_date: datetime
:param maturity_date: The final maturity date of the instrument. This means the last date on which the instruments makes a payment of any amount. For the avoidance of doubt, that is not necessarily prior to its last sensitivity date for the purposes of risk; e.g. instruments such as Constant Maturity Swaps (CMS) often have sensitivities to rates beyond their last payment date (required)
:type maturity_date: datetime
:param dom_ccy: The domestic currency of the instrument. (required)
:type dom_ccy: str
:param flow_conventions: (required)
:type flow_conventions: lusid.FlowConventions
:param principal: The face-value or principal for the bond at outset. This might be reduced through its lifetime in the event of amortization or similar. (required)
:type principal: float
:param coupon_rate: simple coupon rate. (required)
:type coupon_rate: float
:param identifiers: external market codes and identifiers for the bond, e.g. ISIN.
:type identifiers: dict(str, str)
:param instrument_type: The available values are: QuotedSecurity, InterestRateSwap, FxForward, Future, ExoticInstrument, FxOption, CreditDefaultSwap, InterestRateSwaption, Bond, EquityOption, FixedLeg, FloatingLeg, BespokeCashFlowsLeg, Unknown, TermDeposit, ContractForDifference, EquitySwap, CashPerpetual, CashSettled, CdsIndex, Basket, FundingLeg, CrossCurrencySwap, FxSwap (required)
:type instrument_type: str
""" # noqa: E501
self._start_date = None
self._maturity_date = None
self._dom_ccy = None
self._flow_conventions = None
self._principal = None
self._coupon_rate = None
self._identifiers = None
self._instrument_type = None
self.discriminator = None
self.start_date = start_date
self.maturity_date = maturity_date
self.dom_ccy = dom_ccy
self.flow_conventions = flow_conventions
self.principal = principal
self.coupon_rate = coupon_rate
self.identifiers = identifiers
self.instrument_type = instrument_type
@property
def start_date(self):
"""Gets the start_date of this Bond. # noqa: E501
The start date of the instrument. This is normally synonymous with the trade-date. # noqa: E501
:return: The start_date of this Bond. # noqa: E501
:rtype: datetime
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this Bond.
The start date of the instrument. This is normally synonymous with the trade-date. # noqa: E501
:param start_date: The start_date of this Bond. # noqa: E501
:type: datetime
"""
if start_date is None:
raise ValueError("Invalid value for `start_date`, must not be `None`") # noqa: E501
self._start_date = start_date
@property
def maturity_date(self):
"""Gets the maturity_date of this Bond. # noqa: E501
The final maturity date of the instrument. This means the last date on which the instruments makes a payment of any amount. For the avoidance of doubt, that is not necessarily prior to its last sensitivity date for the purposes of risk; e.g. instruments such as Constant Maturity Swaps (CMS) often have sensitivities to rates beyond their last payment date # noqa: E501
:return: The maturity_date of this Bond. # noqa: E501
:rtype: datetime
"""
return self._maturity_date
@maturity_date.setter
def maturity_date(self, maturity_date):
"""Sets the maturity_date of this Bond.
The final maturity date of the instrument. This means the last date on which the instruments makes a payment of any amount. For the avoidance of doubt, that is not necessarily prior to its last sensitivity date for the purposes of risk; e.g. instruments such as Constant Maturity Swaps (CMS) often have sensitivities to rates beyond their last payment date # noqa: E501
:param maturity_date: The maturity_date of this Bond. # noqa: E501
:type: datetime
"""
if maturity_date is None:
raise ValueError("Invalid value for `maturity_date`, must not be `None`") # noqa: E501
self._maturity_date = maturity_date
@property
def dom_ccy(self):
"""Gets the dom_ccy of this Bond. # noqa: E501
The domestic currency of the instrument. # noqa: E501
:return: The dom_ccy of this Bond. # noqa: E501
:rtype: str
"""
return self._dom_ccy
@dom_ccy.setter
def dom_ccy(self, dom_ccy):
"""Sets the dom_ccy of this Bond.
The domestic currency of the instrument. # noqa: E501
:param dom_ccy: The dom_ccy of this Bond. # noqa: E501
:type: str
"""
if dom_ccy is None:
raise ValueError("Invalid value for `dom_ccy`, must not be `None`") # noqa: E501
self._dom_ccy = dom_ccy
@property
def flow_conventions(self):
"""Gets the flow_conventions of this Bond. # noqa: E501
:return: The flow_conventions of this Bond. # noqa: E501
:rtype: FlowConventions
"""
return self._flow_conventions
@flow_conventions.setter
def flow_conventions(self, flow_conventions):
"""Sets the flow_conventions of this Bond.
:param flow_conventions: The flow_conventions of this Bond. # noqa: E501
:type: FlowConventions
"""
if flow_conventions is None:
raise ValueError("Invalid value for `flow_conventions`, must not be `None`") # noqa: E501
self._flow_conventions = flow_conventions
@property
def principal(self):
"""Gets the principal of this Bond. # noqa: E501
The face-value or principal for the bond at outset. This might be reduced through its lifetime in the event of amortization or similar. # noqa: E501
:return: The principal of this Bond. # noqa: E501
:rtype: float
"""
return self._principal
@principal.setter
def principal(self, principal):
"""Sets the principal of this Bond.
The face-value or principal for the bond at outset. This might be reduced through its lifetime in the event of amortization or similar. # noqa: E501
:param principal: The principal of this Bond. # noqa: E501
:type: float
"""
if principal is None:
raise ValueError("Invalid value for `principal`, must not be `None`") # noqa: E501
self._principal = principal
@property
def coupon_rate(self):
"""Gets the coupon_rate of this Bond. # noqa: E501
simple coupon rate. # noqa: E501
:return: The coupon_rate of this Bond. # noqa: E501
:rtype: float
"""
return self._coupon_rate
@coupon_rate.setter
def coupon_rate(self, coupon_rate):
"""Sets the coupon_rate of this Bond.
simple coupon rate. # noqa: E501
:param coupon_rate: The coupon_rate of this Bond. # noqa: E501
:type: float
"""
if coupon_rate is None:
raise ValueError("Invalid value for `coupon_rate`, must not be `None`") # noqa: E501
self._coupon_rate = coupon_rate
@property
def identifiers(self):
"""Gets the identifiers of this Bond. # noqa: E501
external market codes and identifiers for the bond, e.g. ISIN. # noqa: E501
:return: The identifiers of this Bond. # noqa: E501
:rtype: dict(str, str)
"""
return self._identifiers
@identifiers.setter
def identifiers(self, identifiers):
"""Sets the identifiers of this Bond.
external market codes and identifiers for the bond, e.g. ISIN. # noqa: E501
:param identifiers: The identifiers of this Bond. # noqa: E501
:type: dict(str, str)
"""
self._identifiers = identifiers
@property
def instrument_type(self):
"""Gets the instrument_type of this Bond. # noqa: E501
The available values are: QuotedSecurity, InterestRateSwap, FxForward, Future, ExoticInstrument, FxOption, CreditDefaultSwap, InterestRateSwaption, Bond, EquityOption, FixedLeg, FloatingLeg, BespokeCashFlowsLeg, Unknown, TermDeposit, ContractForDifference, EquitySwap, CashPerpetual, CashSettled, CdsIndex, Basket, FundingLeg, CrossCurrencySwap, FxSwap # noqa: E501
:return: The instrument_type of this Bond. # noqa: E501
:rtype: str
"""
return self._instrument_type
@instrument_type.setter
def instrument_type(self, instrument_type):
"""Sets the instrument_type of this Bond.
The available values are: QuotedSecurity, InterestRateSwap, FxForward, Future, ExoticInstrument, FxOption, CreditDefaultSwap, InterestRateSwaption, Bond, EquityOption, FixedLeg, FloatingLeg, BespokeCashFlowsLeg, Unknown, TermDeposit, ContractForDifference, EquitySwap, CashPerpetual, CashSettled, CdsIndex, Basket, FundingLeg, CrossCurrencySwap, FxSwap # noqa: E501
:param instrument_type: The instrument_type of this Bond. # noqa: E501
:type: str
"""
if instrument_type is None:
raise ValueError("Invalid value for `instrument_type`, must not be `None`") # noqa: E501
allowed_values = | |
self.kind = None
self.new_col = None
self.add_to = None
self.add_chr_prefix = True
self.id_field = None
self.temp = None
self.include_bed = None
self.include_if = "both"
self.load_genotype = True
self.gt_scores = None
# Track defaults for persistence
self.default_params = {k: v for k, v in self.__dict__.items() if k in {"bedpe",
"weight_field",
"no_translocations",
"allowed_svtypes",
"keep",
"stratify",
"stratify_range",
"allowed_chroms",
"min_size",
"max_size",
"soft_size_filter",
"other_cols",
"slop",
"break_cols",
"sep",
"drop_first_row",
"svtype_field"}}
self.meta_data = {"svbench": pkg_resources.get_distribution("svbench").version,
"date": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
# Update any provided kwargs
if kwargs:
self.set_properties(kwargs)
def __add__(self, other):
if not isinstance(other, CallSet):
raise ValueError("Calling add on {}, should be instance of CallSet".format(type(other)))
n = self.copy()
n.breaks_df["source"] = [n.caller] * len(n.breaks_df)
other_df = other.breaks_df.copy(deep=True)
other_df["source"] = [other.caller] * len(other_df)
n.breaks_df = pd.concat([n.breaks_df, other_df])
n.breaks_df.reset_index(inplace=True)
n.tree = None
return n
def __sub__(self, other):
if not isinstance(other, CallSet):
raise ValueError("Calling add on {}, should be instance of CallSet".format(type(other)))
if self.tree is None or other.tree is None:
raise ValueError("Call add_intervals on both CallSet's before subtracting")
matching_indexes = quantify(other, self, good_indexes_only=True)
n = self.copy()
n.breaks_df = n.breaks_df[~np.array(matching_indexes)]
n.tree = None
return n
def __len__(self):
return len(self.breaks_df) if self.breaks_df is not None else 0
def intersection(self, other):
return self.__iand__(other)
def __iand__(self, other):
if not isinstance(other, CallSet):
raise ValueError("Calling add on {}, should be instance of CallSet".format(type(other)))
if self.tree is None or other.tree is None:
raise ValueError("Call add_intervals on both CallSet's before subtracting")
matching_indexes = quantify(other, self, good_indexes_only=True)
n = self.copy()
n.breaks_df = n.breaks_df[np.array(matching_indexes)]
n.tree = None
return n
# This didnt work as expected
# print("Matching", matching_indexes)
# quit()
# # starts, ends , indexes are all defaultdict, keys=chromosomes
# starts, ends, indexes = get_interval_arrays(list(zip(this_df["chrom"], this_df["start"], this_df["chrom2"],
# this_df["end"], this_df.index)), self.slop)
#
# # chr2:192559403-192559731
# other_tree = other.tree
# #print([i for i in other.breaks_df["end"] if i == 24403011])
# bad_indexes = set([])
# for chrom in starts.keys():
# if chrom in other_tree:
# l_idxs, r_idxs = other_tree[chrom].ncls.all_overlaps_both(starts[chrom], ends[chrom], indexes[chrom])
# bad_indexes |= set(l_idxs)
# print(f"Droppping {len(bad_indexes)} out of {len(this_df)}", file=stderr)
# self.breaks_df = this_df.drop(list(bad_indexes))
# self.add_intervals(self.slop)
# return self
# Needs fixing, as above
# def __iand__(self, other):
# if not isinstance(other, CallSet):
# raise ValueError("Calling add on {}, should be instance of CallSet".format(type(other)))
# if self.tree is None or other.tree is None:
# raise ValueError("Call add_intervals on both CallSet's before subtracting")
# this_df = self.breaks_df
# good_indexes = set([])
# starts, ends, indexes = get_interval_arrays(list(zip(this_df["chrom"], this_df["start"], this_df["chrom2"],
# this_df["end"], this_df.index)), self.slop)
#
# other_tree = other.tree
# for chrom in starts.keys():
# if chrom in other_tree:
# l_idxs, r_idxs = other_tree[chrom].ncls.all_overlaps_both(starts[chrom], ends[chrom], indexes[chrom])
# good_indexes |= set(l_idxs)
#
# self.breaks_df = this_df.loc[sorted(good_indexes)]
# self.add_intervals(self.slop)
# return self
def add_intervals(self, slop=250, interval_type="breakpoint"):
"""Adds intervals to loaded data defined in self.breaks_df.
:param slop: The distance to add and subtract from each variant position.
:type slop: int
:returns: self
:rtype: svbench.CallSet"""
if interval_type not in ("breakpoint", "bed", "bedpe"):
raise ValueError("interval_type must be one of: 'breakpoint', 'bed', 'bedpe'")
self.slop = slop
df = self.breaks_df
if df is None or len(df) == 0:
raise ValueError("breaks_df not found")
self.tree = make_tree(list(zip(df["chrom"], df["start"], df["chrom2"], df["end"], df.index)), slop=slop,
interval_type=interval_type)
return self
def reset_arguments(self):
for k, v in self.default_params.items():
self.__setattr__(k, v)
def set_args(self, args):
# Update existing class attributes with any new ones
for k, v in args.items():
if k != "self":
a = self.__getattribute__(k)
if a is None and v is not None:
self.__setattr__(k, v)
elif v is not None:
if k in self.default_params and v != self.default_params[k]:
self.__setattr__(k, v)
def properties(self):
return self.kwargs
def required(self):
return self.required
def set_properties(self, d):
unset = set(self.required)
new_args = set([])
for k, v in d.items():
if k in self.__dict__:
self.__setattr__(k, v)
else:
if k in unset:
unset.remove(k)
else:
new_args.add(k)
self.kwargs[k] = v
if len(unset) > 0:
raise KeyError(f"All preset key word arguments must be set when calling new. Keys missing: {list(unset)}")
if len(new_args) > 0:
print(f"Warning: unexpected arguments {list(new_args)}", file=stderr)
return self
def copy(self):
"""Create a new deep-copy of this object
:returns: CallSet instance
:rtype: svbench.CallSet"""
return copy.deepcopy(self)
def new(self, **kwargs):
n = self.copy()
n.set_properties(kwargs)
return n
def query(self, query_expression, inplace=False, engine="python"):
if not inplace:
cs = self.copy()
else:
cs = self
l_before = len(cs.breaks_df)
cs.breaks_df = cs.breaks_df.query(query_expression, engine=engine)
print("Filtered by expression, caller={}, dataset={} rows before {}, after {}".format(self.caller, self.dataset,
l_before,
len(cs.breaks_df)), file=stderr)
return cs
def filter_by_size(self, min_size=None, max_size=None, inplace=False, soft=False, keep_translocations=False):
"""Filter the loaded data defined in self.breaks_df by size (base-pairs).
:param min_size: The minimum size threshold for the variant, size < min_size
:type min_size: int
:param max_size: The maximum size threshold for the variant, size > max_size
:type max_size: int
:param inplace: If set to True then filtering is performed inplace
:type inplace: bool
:keep_translocations: False means translocations calls will be dropped
:return: CallSet instance
:rtype: svbench.CallSet
"""
if inplace:
cs = self
else:
cs = self.copy()
done = True
if cs.min_size != min_size:
cs.min_size = min_size
done = False
if cs.max_size != max_size:
cs.max_size = max_size
done = False
if done or (min_size is None and max_size is None): # Nothing to be done
return cs
if min_size is None:
min_s = -1
else:
min_s = min_size
if max_size is None:
max_s = 1e12
else:
max_s = max_size
l_before = len(cs.breaks_df)
df = cs.breaks_df
if keep_translocations:
size_filter = ((df["svlen"] >= min_s) & (df["svlen"] < max_s)) | (df["chrom1"] != df["chrom2"])
else:
size_filter = (df["svlen"] >= min_s) & (df["svlen"] < max_s)
df["size_filter_pass"] = size_filter
if not soft:
df = df[size_filter]
print("Filtered by min_size={}, max_size={}, caller={}, dataset={} rows before {}, after {}".format(
min_size, max_size, self.caller, self.dataset,l_before, size_filter.sum()),
file=stderr)
cs.breaks_df = df
return cs
def filter_by_svtype(self, svtype_set, inplace=False):
l_before = len(self.breaks_df)
bad_i = set([])
if isinstance(svtype_set, str):
svtype_set = set(tuple(svtype_set.split(",")))
for idx, svtype in zip(self.breaks_df.index, self.breaks_df.svtype):
if svtype not in svtype_set:
bad_i.add(idx)
print("Filtered by svtype, caller={}, dataset={} rows before {}, after {}".format(self.caller, self.dataset,
l_before,
l_before - len(bad_i)),
file=stderr)
if not inplace:
s = self.copy()
s.breaks_df = s.breaks_df.drop(bad_i)
return s
self.breaks_df = self.breaks_df.drop(bad_i)
return self
def filter_include_bed(self, include_bed_path, inplace=False):
if not inplace:
v = self.copy()
else:
v = self
l_before = len(v.breaks_df)
bad_i = set([])
if include_bed_path:
include_bed = CallSet().load_bed(path=include_bed_path, bedpe=False).add_intervals(interval_type="bed")
ol_tree = include_bed.tree
df = v.breaks_df
if df is not None and ol_tree is not None:
for index, chrom, start, chrom2, end in zip(df.index, df.chrom, df.start, df.chrom2, df.end):
if ol_tree:
if chrom not in ol_tree:
bad_i.add(index)
else:
if not any(ol_tree[chrom].ncls.find_overlap(start, start + 1)) or not any(ol_tree[chrom2].ncls.find_overlap(end, end + 1)):
bad_i.add(index)
v.breaks_df = v.breaks_df.drop(bad_i)
print("Filtered by include_bed, caller={}, dataset={} rows before {}, after {}".format(v.caller, v.dataset,
l_before,
l_before - len(bad_i)),
file=stderr)
return v
def score_table(self):
if self.scores is not None:
print(f"Score table caller={self.caller} against dataset={self.dataset}", file=stderr)
print(pd.DataFrame.from_records([self.scores], index=None).to_string(), file=stderr)
def set_strata(self, stratify_col, stratify_range):
if not hasattr(stratify_range, '__iter__'):
raise ValueError("stratify_range must be an iterable")
self.breaks_df["strata"] = self.breaks_df[stratify_col]
self.stratify_range = stratify_range
return self
def load(self, path):
"""Load variants from the file path. For this function to work the CallSet instance must have the self.kind \
attribute set.
:param path: File path for input data
:type path: str
:return: CallSet instance
:rtype: svbench.CallSet
"""
if self.kind is None:
raise ValueError("The file kind must be provided with the argument 'kind'")
elif self.kind == "vcf":
return self.load_vcf(path)
elif self.kind == "csv":
return self.load_csv(path)
elif self.kind == "bed" or self.kind == "bedpe":
return self.load_bed(path)
else:
raise ValueError("Unknown file kind {}".format(self.kind))
def parse_str_cols(self, cols):
if type(cols) == list and all(type(i) == str for i in cols):
c = []
for k in cols:
if ":" in k:
c.append(Col(*k.split(":")))
else:
c.append(Col(k))
return c
else:
return cols
def load_vcf(self, path, weight_field=None,
no_translocations=True, allowed_svtypes=None, keep=None, stratify=None, allowed_chroms=None,
min_size=None, max_size=None, soft_size_filter=False,
other_cols=None, include_bed=None, include_if="both", load_genotype=True):
"""Load variants from the vcf file path.
:param path: The path to the vcf input file
:type path: str
:param weight_field: The field used to weight a variant, useful for breaking ties between similar variants when \
benchmarking
:type weight_field: svbench.Col
:param no_translocations: Ignore translocations when loading file
:type no_translocations: | |
#############################################################
#collatrix_functions.py
#this script contains the functions used in all collatrix code
#created by <NAME> (<EMAIL>), October 2020
#updated by <NAME>, June 2021
##############################################################
#import modules
import pandas as pd
import numpy as np
import os, sys
####### ALL VERSION FUNCTIONS ##########
#duplicate check function
def anydup(l): #we'll use this function later to check for duplicates
seen = set()
for x in l:
if x in seen: return True
seen.add(x)
return False
# function that reads in csv as one column, then splits
def readfile(f):
temp=pd.read_csv(f,sep='^',header=None,prefix='X',engine = 'python',quoting=3, na_values = ['""','"'], encoding_errors = "ignore") #read in csv as one column
df00=temp.X0.str.split(',',expand=True) #split rows into columns by delimeter
df00 = df00.replace("",np.nan)
df0 = df00.dropna(how='all',axis = 'rows').reset_index(drop=True)
df0 = df0.fillna('') #replace nans by blank space
return df0
# function that resets header
def fheader(df):
head = df.iloc[0] #make list out of names in first row
df = df[1:] #take the data less the header row
df.columns = head #set the header row as the df header
return(df)
#function to make list of length measurements
def lmeas(df, object, constants):
#make list of Length measurements
l = df[object].tolist() #make list of Object columns aka. names of measurements made
l = [x for x in l if pd.isna(x) == False] #eliminate all empty cells
l = [x for x in l if x not in constants and x != object] #elimate all other instances of Object
l = [x for x in l if x] #make sure no blank elements in list
return(l)
#function that uses anydup and makes list of width measurements
def wmeas(df,l,widths,f):
wlist = []
if anydup(l) == True: #check for any duplicate measurement names, if exists, exit code, print error msg
print("please check file {0} for duplicate Object Names and remove duplicates".format(f))
sys.exit("remove duplicate and run script again")
elif anydup(l) == False:
for i in l: #loop through list of measurement types
if i in df.index:
for w in (w for w in widths if w[0].isdigit()): #loop through the widths
x = df.loc[i,w] #extract cell value of width of measurement type
if pd.isna(x) == False: #if the cell isn't empty
ww = i + "-" + w #combine the names
wlist += [ww] #add this combined name to the master list
else: pass
return(wlist)
#function to set up empty dictionaries
def setup(measurements):
#now we're going to set up a dictionary to fill in with all the measurements
#that we will eventually turn into a dataframe where the keys are the columns
measurements += ['Image','Animal_ID','Altitude','Focal Length','PixD','Notes']
names = ['Image','Animal_ID','Altitude','Focal Length','PixD','Notes']
mDict = dict.fromkeys(measurements)
keys = list(mDict.keys())
#now make list and dictionary for pixel count dataframe
measurements_pixc = ["{0}.PixCount".format(x) if x not in names else x for x in measurements]
mDict_pixc = dict.fromkeys(measurements_pixc)
keys_pixc = list(mDict_pixc.keys())
#make an empty dataframe with the headers being the measurement types/info to pull
df_all = pd.DataFrame(columns = keys)
df_all_pixc = pd.DataFrame(columns = keys_pixc)
return(measurements,names,mDict,mDict_pixc,keys,keys_pixc,df_all,df_all_pixc)
def pull_data(f, df, mDict, mDict_pixc,anFold):
if anFold == 'yes':
aID = os.path.split(os.path.split(f)[0])[1] #extract animal ID
elif anFold == 'no':
aID = df[df[0] == 'Image ID'].loc[:,[1]].values[0] #pull animal id
aID = aID[0]
mDict['Animal_ID'] = aID; mDict_pixc['Animal_ID'] = aID
image = os.path.split(df[df[0] == 'Image Path'].loc[:,1].values[0])[1] #extract image
print(image)
mDict['Image'] = image; mDict_pixc['Image'] = image
alt = float((df[df[0] == 'Altitude'].loc[:,[1]].values[0])[0]) #extract entered altitude
mDict['Altitude'] = alt; mDict_pixc['Altitude'] = alt
focl = float((df[df[0] == 'Focal Length'].loc[:,[1]].values[0])[0]) #extract entered focal length
mDict['Focal Length'] = focl; mDict_pixc['Focal Length'] = focl
pixd = float((df[df[0] == 'Pixel Dimension'].loc[:,[1]].values[0])[0]) #extract entered pixel dimension
mDict['PixD'] = pixd; mDict_pixc['PixD'] = pixd
notes = df[df[0] == 'Notes'].loc[:,[1]].values[0] #extract entered notes
mDict['Notes'] = notes[0]; mDict_pixc['Notes'] = notes
return(mDict, mDict_pixc, image, alt, focl, pixd)
def safe_data(df_L, image):
#get the true values of focal length and altitude to use when recalculating
alt_act = float(df_L[df_L.Image == image].loc[:,'Altitude'].values[0]) #this says: find row where image = image and pull altitude
foc_act = float(df_L[df_L.Image == image].loc[:,'Focal_Length'].values[0])
pixd_act = float(df_L[df_L.Image == image].loc[:,'Pixel_Dimension'].values[0])
return(alt_act,foc_act,pixd_act)
def end_concat(mDict, df_all):
df_dict = pd.DataFrame(data = mDict,index=[1]) #make dictionary into dataframe
df_all = pd.concat([df_all,df_dict],sort = True)
return(df_all)
def df_formatting(df_all):
df_allx = df_all.drop(columns = ['Altitude','Focal Length','PixD']).replace(np.nan,0) #drop non-measurement cols
df_all_cols = df_allx.columns.tolist() #make list of column names
gby = ['Animal_ID','Image','Notes'] #list of non-numeric columns
togroup = [x for x in df_all_cols if x not in gby] #setting up list of columns to be grouped
df_all = df_allx.groupby(['Animal_ID','Image'])[togroup].apply(lambda x: x.astype(float).sum()).reset_index()
df_notes = df_allx.groupby(['Animal_ID','Image'])['Notes'].first().reset_index()
df_all =df_all.merge(df_notes,on=['Animal_ID','Image'])
#sort cols
cols = list(df_all)
a = "AaIiTtEeJjRrBbFfWwCcDdGgHhKkLlMmNnOoPpQqSsUuVvXxYyZz" #make your own ordered alphabet
col = sorted(cols, key=lambda word:[a.index(c) for c in word[0]]) #sort headers based on this alphabet
df_all1 = df_all.loc[:,col] #sort columns based on sorted list of column header
df_all1 = df_all1.replace(0,np.nan) #replace the 0s with nans
return df_all1
######### V4 AND V5 FUNCTION ############
def collate_v4and5(csvs, object, length, constants,safety,df_L,measurements, nonPercMeas, anFold):
for f in csvs:
print(f)
df0 = readfile(f)
idx = df0.loc[df0[0] == object].index #find index (row) values of 'Object'
df = df0.truncate(before=idx[0]) #take subset of df starting at first row containing Object
df = fheader(df)
widths = df.columns.values.tolist()
l = lmeas(df,object, constants)
measurements = measurements + l #add the measurement names to the master list
nonPercMeas = nonPercMeas + l #copy of the master list that does not include widths
df = df.set_index(object)
df = df.replace('nan',np.nan,regex=True)
wlist = wmeas(df,l,widths,f)
measurements = measurements + wlist
measurements,names,mDict,mDict_pixc,keys,keys_pixc,df_all,df_all_pixc = setup(measurements)
for f in csvs:
print(f)
df0 = readfile(f)
idx = df0.loc[df0[0]== object].index #set object column to index
df = df0.truncate(after=idx[0]) #subset df to be only top info section
mDict, mDict_pixc, image, alt, focl, pixd = pull_data(f, df, mDict, mDict_pixc,anFold)
if safety == 'yes': #pull the altitude, focal length, and pix d from the safety csv by image name
alt_act,foc_act,pixd_act = safe_data(df_L, image)
else: pass
#go into the cvs to look for the values
dfGUI = df0.truncate(before=idx[0]) #take subset of df starting at first row containing Object
headG = dfGUI.iloc[0]; dfGUI = dfGUI[1:]; dfGUI.columns = headG
dfGUI = dfGUI.set_index(object)
for key in keys: #loop through the keys aka future column headers
if key in nonPercMeas: #if that key is in the list of measurement types (not widths)
if key in dfGUI.index: #if that key (measurement) is in this csv
x = float(dfGUI.loc[key,length]) #extract the measurement value using location
# now is the time to do the back calculations
pixc = (x/pixd)*(focl/alt) #back calculate the pixel count
if safety == 'yes':
xx = ((alt_act/foc_act)*pixd_act)*pixc #recalculate using the accurate focal length and altitude
elif safety == 'no':
xx = x
else: #if this key is not in the csv
xx = np.nan
mDict[key] = xx #add the value to the respective key
mDict_pixc["{0}.PixCount".format(key)] = pixc #add pixel count to respecitive key in pixel count dataframe
elif "%" in key and key.split("-")[0] in dfGUI.index: #if the key is a width
row = key.split("-")[0] #split the name of the measurement
col = key.split("-")[1] #to get the row and column indices
y = float(dfGUI.loc[row,col]) #to extract the measurement value
#recalculate using accurate focal length and altitude
pixc = (y/pixd)*(focl/alt) #back calculate the pixel count
if safety == 'yes':
yy = ((alt_act/foc_act)*pixd_act)*pixc #recalculate using the accurate focal length and altitude
elif safety == 'no':
yy = y
mDict[key] = yy
mDict_pixc["{0}.PixCount".format(key)] = pixc #add pixel count to respecitive key in pixel count dataframe
elif key not in dfGUI.index and key not in names:
mDict[key] = np.nan
df_all = end_concat(mDict, df_all)
df_all_pixc = end_concat(mDict_pixc, df_all_pixc)
return(df_all,df_all_pixc)
######### V6 FUNCTION #####################
def collate_v6(csvs, object, length, constants,safety,df_L,measurements, nonPercMeas, anFold):
for f in csvs:
print(f)
df0 = readfile(f)
idx = df0.loc[df0[0] == object].index #find index (row) values of 'Object'
df = df0.truncate(before=idx[0]) #take subset of df starting at first row containing Object
df = fheader(df)
l = lmeas(df,object, constants)
measurements = measurements + l #add the measurement names to the master list
nonPercMeas = nonPercMeas + l #copy of the master list that does not include widths
#make list of Width measurements
iwx = df.loc[df['Widths (%)'].str.contains("Width")].index.tolist()
for ix,iw in enumerate(iwx):
if ix +1 < len(iwx): #if there's more than one row containing Width headers
iw1 = iwx[ix+1]-1 | |
import csv, json, argparse, copy, re, os, urllib2
import numpy as np
from scipy.spatial import distance
from sklearn import manifold, metrics, decomposition, preprocessing
import igraph
import jsmin
import rdkit
from rdkit import Chem, DataStructs, Geometry
from rdkit.DataStructs import cDataStructs
from rdkit.Chem import Draw, AllChem, Scaffolds, Lipinski, Crippen, rdMolDescriptors, TemplateAlign
from rdkit.Chem.Scaffolds import MurckoScaffold
PROPS_ORDER = ["mw", "hba", "hbd", "rb", "rc", "arc", "logp", "tpsa"]
PROP2FNC = {
"mw": rdMolDescriptors.CalcExactMolWt,
"hba": Lipinski.NumHAcceptors,
"hbd": Lipinski.NumHDonors,
"rb": Lipinski.NumRotatableBonds,
"rc": Lipinski.RingCount,
"arc": Lipinski.NumAromaticRings,
"logp": Crippen.MolLogP,
"tpsa": rdMolDescriptors.CalcTPSA,
}
PROP2LABEL = {
"mw": "Molecular weight",
"hba": "H-bond acceptors",
"hbd": "H-bond donors",
"rb": "Rotatable bonds",
"rc": "Rings",
"arc": "Aromatic rings",
"logp": "cLogP",
"tpsa": "TPSA"
}
FP2FNC = {
"ecfp4": lambda rdmol: AllChem.GetMorganFingerprintAsBitVect(rdmol, radius=2, nBits=1024),
"ecfp6": lambda rdmol: AllChem.GetMorganFingerprintAsBitVect(rdmol, radius=3, nBits=1024),
"apfp": lambda rdmol: AllChem.GetHashedAtomPairFingerprintAsBitVect(rdmol, nBits=1024),
"ttfp": lambda rdmol: AllChem.GetHashedTopologicalTorsionFingerprintAsBitVect(rdmol, nBits=1024),
"maccs": lambda rdmol: AllChem.GetMACCSKeysFingerprint(rdmol),
}
AVAILABLE_METRICS = ["Tanimoto", "Dice", "Cosine", "Sokal", "Russel", "RogotGoldberg", "AllBit", "Kulczynski", "McConnaughey", "Asymmetric", "BraunBlanquet"]
class ChemSpace():
def __init__(self):
self.category_field = False
self.category_field_delimiter = False
self.label_field = False
self.compound_structure_field = False
self.sdf = False
self.write_structures = True
self.fp = "ecfp4"
self.fingerprint_field = False
self.metric = "Tanimoto"
if self.metric not in AVAILABLE_METRICS:
raise Exception("Metric '{}' not found in available similarity metrics: {}".format(self.metric, AVAILABLE_METRICS))
self.index2rdmol = {}
self.index2fpobj = {}
def read_csv(self, filename, delimiter=",", header=False, missing_value=False, remove_columns=False):
"""Reads data from the CSV file"""
print("Reading file: {}".format(filename))
self.filename = filename
with open(self.filename, "r") as input_file:
reader = csv.reader(input_file, delimiter=delimiter)
rows = [row for row in reader]
self.read_data(rows, header, missing_value, remove_columns)
def read_sdf(self, filename):
"""Reads data from a sdf file"""
print("Reading file: {}".format(filename))
self.sdf = True
self.header = False
self.data = []
self.filename = filename
self.index2rdmol = {}
self.index2fpobj = {}
self.index2props = {}
self.index2category = {}
self.index2label = {}
self.index2id = {}
molsupplier = Chem.SDMolSupplier(str(filename))
not_parsed = []
for index, m in enumerate(molsupplier):
try:
Chem.SanitizeMol(m)
self.index2rdmol[index] = m
self.index2fpobj[index] = FP2FNC[self.fp](m)
self.index2props[index] = m.GetPropsAsDict()
self.index2id[index] = index
except Exception, e:
print(e)
not_parsed.append(index)
self.index_order = self.index2rdmol.keys()
self.index_order.sort()
self.index2row = {i: [] for i in self.index_order}
self.data = self.index2row.values()
if self.label_field is not False and self.label_field in self.index2props[self.index_order[0]]:
self.index2label = {i: self.index2props[i].get(self.label_field) for i in self.index_order}
if self.category_field is not False and self.category_field in self.index2props[self.index_order[0]]:
self.index2category = {i: self.index2props[i].get(self.category_field) for i in self.index_order}
self.__create_chemspace_format__()
def add_compounds_from_file(self, filename, delimiter=","):
print("Reading compounds: {}".format(filename))
self.filename = filename
with open(self.filename, "r") as input_file:
reader = csv.reader(input_file, delimiter=delimiter)
rows = [row for row in reader]
self.add_compounds(rows)
def add_category(self, category):
if not "categories" in self.chemical_space:
self.chemical_space["categories"] = []
self.chemical_space["categories"].append(category)
def add_compounds(self, rows):
"""Reads data in a form of list of lists (tuples)"""
self.compounds = {r[0]: r[1] for r in rows}
self.chemical_space["compounds"] = {}
self.__parse_compounds__()
for key in self.chemical_space["points"]:
if key in self.id2rdmol:
self.chemical_space["compounds"][key] = {"structure": self.__get_compound__(key)}
def read_data(self, rows, header=False, missing_value=False, remove_columns=False):
"""Reads data in a form of list of lists (tuples)"""
self.header = header
self.missing_value = missing_value
data_start = 0
self.data = rows
self.index2id = {}
self.index2row = {}
self.index2compound = {}
self.index2label = {}
self.index2category = {}
if self.header:
self.header = self.data[0]
self.data = self.data[1:]
if self.header:
if remove_columns is not False and len(remove_columns) > 0:
for col in remove_columns:
self.__remove_field__(col)
if self.compound_structure_field and self.compound_structure_field in self.header:
self.index2compound = self.__extract_field__(self.compound_structure_field)
self.__read_compounds__()
if self.label_field and self.label_field in self.header:
self.index2label = self.__extract_field__(self.label_field)
if self.category_field and self.category_field in self.header:
self.index2category = self.__extract_field__(self.category_field)
if self.fingerprint_field and self.fingerprint_field in self.header:
self.index2fp = self.__extract_field__(self.fingerprint_field)
self.index2fpobj = {}
for index, fp in self.index2fp.items():
self.index2fpobj[index] = self.__get_bitvect_for_fp__(fp)
# remove ID field
self.header.pop(0)
self.index2id = {i: row[0] for i, row in enumerate(self.data)}
self.index2row = {i: [round(float(v), 2) if v not in ["", None, "None", self.missing_value] else None for v in row[1:]] for i, row in enumerate(self.data)}
self.index_order = [i for i, row in enumerate(self.data)]
self.data = [self.index2row[i] for i in self.index_order]
if self.missing_value is not False:
self.data, self.missing_values_indexes = self.__impute_missing_values__(self.data)
# self.original_data = self.__return_missing_values__(copy.deepcopy(self.data), self.missing_values_indexes)
# self.original_data = copy.deepcopy(self.index2row)
self.__create_chemspace_format__()
def __read_compounds__(self):
for i, smi in self.index2compound.items():
try:
self.index2rdmol[i] = Chem.MolFromSmiles(smi)
self.index2fpobj[i] = FP2FNC[self.fp](self.index2rdmol[i])
except Exception, e:
print(e)
self.index2rdmol[i] = None
self.index2fpobj[i] = None
def __remove_field__(self, field):
if field in self.header:
index = self.header.index(field)
if index is not False:
self.header.pop(index)
for i, row in enumerate(self.data):
self.data[i].pop(index)
def __extract_field__(self, field):
index2value = {}
if field in self.header:
index = self.header.index(field)
if index is not False:
self.header.pop(index)
for i, row in enumerate(self.data):
index2value[i] = row[index]
self.data[i].pop(index)
return index2value
def __impute_missing_values__(self, data):
datatype2impute = {"numeric": {"strategy":"mean",
"value": lambda x: round(float(value), 3)},
"binary": {"strategy":"most_frequent",
"value": lambda x: int(value)}
}
missing_values_indexes = []
for i, row in enumerate(self.data):
missing_values_indexes.append([j for j, v in enumerate(row) if v == self.missing_value])
for j, value in enumerate(row):
if value == self.missing_value:
data[i][j] = np.nan
imputer = preprocessing.Imputer(missing_values="NaN", strategy=datatype2impute["numeric"]["strategy"])
#error when using median strategy - minus one dimension in imputed data... omg
imputed_data = [list(row) for row in imputer.fit_transform(self.data)]
imputed_data = [[datatype2impute["numeric"]["value"](value) for value in row] for row in imputed_data]
return imputed_data, missing_values_indexes
def __return_missing_values__(self, data, missing_values_indexes):
for i, indexes in enumerate(missing_values_indexes):
if indexes:
for index in indexes:
data[i][index] = None
return data
def __create_chemspace_format__(self):
self.chemical_space = {"points": {}}
for index in self.index_order:
self.chemical_space["points"][index] = {"object_ids": [self.index2id[index]]}
if len(self.index2category):
self.__parse_categories__()
if len(self.index2label):
for index, label in self.index2label.items():
self.chemical_space["points"][index]["label"] = label
for index, row in self.index2row.items():
self.chemical_space["points"][index]["features"] = copy.copy(row)
if self.header:
current_header = self.chemical_space.get("feature_names", [])
current_header.extend(self.header)
self.chemical_space["feature_names"] = current_header
if len(self.index2rdmol) and self.write_structures:
self.chemical_space["compounds"] = {}
for index, rdmol in self.index2rdmol.items():
# self.chemical_space["compounds"][index] = {"structure": self.__get_compound__(rdmol), "smiles": Chem.MolToSmiles(rdmol, True)}
self.chemical_space["compounds"][index] = {"smiles": Chem.MolToSmiles(rdmol, True)}
def __parse_categories__(self):
category2ids = {}
for index, category in self.index2category.items():
categories = [category] if self.category_field_delimiter is False else [c.strip() for c in category.split(self.category_field_delimiter)]
for c in categories:
if c in category2ids:
category2ids[c].add(index)
else:
category2ids[c] = {index}
if not "categories" in self.chemical_space:
self.chemical_space["categories"] = []
for c, ids in category2ids.items():
self.chemical_space["categories"].append({"label": c, "points": list(ids)})
def add_paths(self, paths):
if not self.chemical_space.get("paths", False):
self.chemical_space["paths"] = []
self.chemical_space["paths"].extend(paths)
def add_paths_from_file(self):
pass
def add_physico_chemical_properties(self):
print("Calculating physico-chemical properties: {} compounds".format(len(self.index2rdmol)))
self.pcp = True
if len(self.index2rdmol):
count = len(self.index2rdmol)
i = 0
id2pcp = {}
for index, rdmol in self.index2rdmol.items():
if i%100 == 0 or i == count:
print("{}/{}".format(i, count))
id2pcp[index] = self.__get_pcp_for_rdmol__(rdmol)
i+=1
empty = [None for x in PROP2LABEL]
for i, index in enumerate(self.index_order):
if id2pcp.get(index, False):
pcps = id2pcp[index]
else:
pcps = empty
self.chemical_space["points"][index]["features"].extend(pcps)
self.data[i].extend(pcps)
current_header = self.chemical_space.get("feature_names", [])
current_header.extend([PROP2LABEL[prop] for prop in PROPS_ORDER])
self.chemical_space["feature_names"] = current_header
self.original_data = copy.deepcopy(self.data)
def __get_pcp_for_rdmol__(self, rdmol):
return [round(PROP2FNC[prop](rdmol), 2) for prop in PROPS_ORDER]
def __get_compound__(self, rdmol):
if rdmol is not None:
Chem.Kekulize(rdmol)
AllChem.Compute2DCoords(rdmol)
compound = {"atoms": {}}
atoms = [a for a in rdmol.GetAtoms()]
bond_types = []
for i, a in enumerate(atoms, 1):
number = a.GetIdx()
position = rdmol.GetConformer().GetAtomPosition(number)
compound["atoms"][number] = {
"bonds": {b.GetEndAtomIdx():b.GetBondTypeAsDouble() for b in a.GetBonds() if b.GetEndAtomIdx() != number},
"symbol": a.GetSymbol(),
"charge": a.GetFormalCharge(),
"coordinates": [round(position.x, 3), round(position.y, 3)]
}
bond_types.extend(compound["atoms"][number]["bonds"].values())
else:
compound = None
return compound
def normalize_data(self, feature_range=(0,1)):
"""Normalizes data to a scale from 0 to 1."""
print("Data normalization (scale): {}".format(feature_range))
min_max_scaler = preprocessing.MinMaxScaler(feature_range)
self.data = min_max_scaler.fit_transform(self.data)
self.data = [[round(v, 3) for v in row] for row in self.data]
def __calculate_distance_matrix__(self, similarity_threshold):
print("\nCalculating distance matrix: {} compounds".format(len(self.index2fpobj)))
self.dist_matrix = {x:[] for x in self.index_order}
self.edges = []
self.index2edges = {}
fps_count = len(self.index_order)
for i, index_1 in enumerate(self.index_order):
self.index2edges[index_1] = []
if i%100 == 0 or i == fps_count:
print("{}/{}".format(i, fps_count))
for j, index_2 in enumerate(self.index_order[i:], i):
sim = DataStructs.FingerprintSimilarity(self.index2fpobj[index_1], self.index2fpobj[index_2], metric=getattr(DataStructs, "{}Similarity".format(self.metric)))
self.dist_matrix[index_1].append(1-sim)
if index_1 != index_2:
self.dist_matrix[index_2].append(1-sim)
if sim >= similarity_threshold:
self.edges.append((index_1, index_2))
self.index2edges[index_1].append([index_2])
def __get_edges__(self, similarity_threshold=0.7, k=2):
print("\nCalculating edges [similarity threshold={}]: {} compounds".format(similarity_threshold, len(self.index2fpobj)))
self.edges = []
self.index2edges = {}
count = len(self.index_order)
for i, index in enumerate(self.index_order):
if (i+1)%100 == 0:
print("{}/{}".format(i, count))
values = [[idx, v] for idx, v in zip(self.index_order, self.dist_matrix[index]) if idx != index]
values.sort(key=lambda x: x[1])
if 1-values[1][1] >= similarity_threshold:
self.index2edges[index] = []
for v in values:
if 1-v[1] >= similarity_threshold:
self.edges.append((index, v[0]))
self.index2edges[index].append([v[0]])
if len(self.index2edges[index]) == k:
break
else:
break
print("EDGES: {}".format(len(self.edges)))
| |
<reponame>Gee-3/pygna<filename>pygna/block_model.py<gh_stars>10-100
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
import logging
from pygna import output
from pygna.utils import YamlConfig
import pandas as pd
import random
import string
import seaborn as sns
import pygna.output as output
class BlockModel(object):
def __init__(self, block_model_matrix, n_nodes: int = 10, nodes_percentage: list = None):
"""
This class implements a block model reading and elaboration methods
:param block_model_matrix: the matrix to be used as block model
:param n_nodes: the number of nodes
:param nodes_percentage: the percentage of nodes to use for the calculations, passed through a list for example [0.5, 0.5]
"""
self.n_nodes = n_nodes
self.nodes = ["N" + str(i) for i in range(n_nodes)]
self.n_clusters = block_model_matrix.shape[0]
self.graph = nx.Graph()
self.bm = block_model_matrix
self.nodes_in_block = False
self.nodes_percentage = nodes_percentage
self.cluster_dict = {}
def set_nodes(self, nodes_names: list) -> None:
"""
Set the nodes name of the block model
:param nodes_names: the names list
Example
_______
>>> p = 0.5
>>> n_nodes = 1000
>>> matrix = np.array([[1, 2], [3, 4]])
>>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
>>> nodes = list("A", "B", "C")
>>> bm.set_nodes(nodes)
"""
self.nodes = nodes_names
self.n_nodes = len(nodes_names)
def set_bm(self, block_model_matrix: pd.DataFrame) -> None:
"""
Change block model matrix used in the class
:param block_model_matrix: the block model matrix
Example
_______
>>> p = 0.5
>>> n_nodes = 1000
>>> matrix = np.array([[1, 2], [3, 4]])
>>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
>>> bmm = pd.DataFrame(mydata_matrix)
>>> bm.set_bm(bmm)
"""
if block_model_matrix.shape[0] == self.n_clusters:
self.bm = block_model_matrix
else:
logging.error("the block model is supposed to have %d clusters" % (self.n_clusters))
def set_nodes_in_block_percentage(self, nodes_percentage: list) -> None:
"""
Pass the percentage of nodes in each block as a list, for example [0.5, 0.5]
:param nodes_percentage: percentage of the nodes
Example
_______
>>> p = 0.5
>>> n_nodes = 1000
>>> matrix = np.array([[1, 2], [3, 4]])
>>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
>>> bm.set_nodes_in_block_percentage([0.5, 0.5])
"""
self.nodes_percentage = nodes_percentage
def set_nodes_in_block(self, nodes_in_block: int) -> None:
"""
Set the nodes number in the block model
:param nodes_in_block: the number of nodes in the block list
Example
_______
>>> p = 0.5
>>> n_nodes = 1000
>>> matrix = np.array([[1, 2], [3, 4]])
>>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
>>> bm.set_nodes_in_block(1000)
"""
self.nodes_in_block = nodes_in_block
def create_graph(self) -> None:
"""
Create a graph from the parameters passed in the constructor of the class
Example
_______
>>> bm = BlockModel(np.array(config["BlockModel"]["matrix"]), n_nodes=config["BlockModel"]["n_nodes"], nodes_percentage=config["BlockModel"]["nodes_percentage"])
>>> bm.create_graph()
"""
reject = True
logging.info('Reject=' + str(reject))
while reject:
graph = generate_graph_from_sm(self.n_nodes, self.bm, self.nodes_in_block, self.nodes,
self.nodes_percentage)
LCC = max(nx.connected_components(graph), key=len)
reject = (len(LCC) != self.n_nodes)
logging.info('Reject=' + str(reject))
logging.info('Nodes: %d, in LCC: %d' % (self.n_nodes, len(LCC)))
self.graph = graph
def plot_graph(self, output_folder: str) -> None:
"""
Plot the block model graph
:param output_folder: the folder where to save the result
Example
_______
>>> p = 0.5
>>> n_nodes = 1000
>>> matrix = np.array([[1, 2], [3, 4]])
>>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
>>> bm.plot_graph("block_model_path.pdf")
"""
plot_bm_graph(self.graph, self.bm, output_folder=output_folder)
def write_network(self, output_file: str) -> None:
"""
Save the network on a given file
:param output_file: the output path where to save the results
Example
_______
>>> p = 0.5
>>> n_nodes = 1000
>>> matrix = np.array([[1, 2], [3, 4]])
>>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
>>> bm.write_network("network.tsv")
"""
self.network_file = output_file
logging.info("Network written on %s" % (output_file))
if output_file.endswith(".tsv"):
nx.write_edgelist(self.graph, output_file, data=False, delimiter="\t")
else:
logging.error("output file format unknown")
def write_cluster_genelist(self, output_file: str) -> None:
"""
Save the gene list to a GMT file
:param output_file: the output path where to save the results
Example
_______
>>> p = 0.5
>>> n_nodes = 1000
>>> matrix = np.array([[1, 2], [3, 4]])
>>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
>>> bm.write_cluster_genelist("genes.gmt")
"""
self.genelist_file = output_file
clusters = nx.get_node_attributes(self.graph, "cluster")
for i in set(clusters.values()):
c = "cluster_" + str(i)
self.cluster_dict[c] = {}
self.cluster_dict[c]["descriptor"] = "cluster"
self.cluster_dict[c]["genes"] = [str(j) for j in clusters.keys() if clusters[j] == i]
if output_file.endswith(".gmt"):
output.print_GMT(self.cluster_dict, self.genelist_file)
else:
logging.error("output file format unknown")
def generate_graph_from_sm(n_nodes: int, block_model: pd.DataFrame, nodes_in_block: list = False,
node_names: list = None, nodes_percentage: list = None) -> nx.Graph:
"""
This function creates a graph with n_nodes number of vertices and a matrix block_model that describes the intra e inter-block connectivity.
The nodes_in_block is parameter, list, to control the number of nodes in each cluster
:param n_nodes: the number of nodes in the block model
:param block_model: the block model to elaborate
:param nodes_in_block: the list of nodes in the block model
:param node_names: the list of names in the block model
:param nodes_percentage: the percentage of nodes to use for the calculations, passed through a list for example [0.5, 0.5]
Example
_______
>>> bm = pd.DataFrame(mydata_matrix)
>>> nodes = list("A","B","C")
>>> graph = generate_graph_from_sm(n_nodes, bm, nodes_in_block, nodes, nodes_percentage)
"""
if not node_names:
node_names = range(n_nodes)
edges = []
G = nx.Graph()
if nodes_percentage:
cluster = np.random.choice(block_model.shape[0], size=n_nodes, p=nodes_percentage)
np.random.shuffle(cluster)
elif nodes_in_block:
list_temp = [nodes_in_block[i] * [i] for i in range(len(nodes_in_block))]
cluster = np.array([val for sublist in list_temp for val in sublist])
np.random.shuffle(cluster)
else:
# cluster is an array of random numbers corresponding to the cluster of each node
cluster = np.random.randint(block_model.shape[0], size=n_nodes)
for i in range(n_nodes):
G.add_node(node_names[i], cluster=cluster[i])
for i in range(n_nodes):
for j in range(i + 1, n_nodes):
if np.random.rand() < block_model[cluster[i], cluster[j]]:
edges.append((node_names[i], node_names[j]))
G.add_edges_from(edges)
return G
def plot_bm_graph(graph: nx.Graph, block_model: pd.DataFrame, output_folder: str = None) -> None:
"""
Save the graph on a file
:param graph: the graph with name of the nodes
:param block_model: the block model
:param output_folder: the folder where to save the file
Example
_______
>>> bm = pd.DataFrame(mydata_matrix)
>>> graph = nx.complete_graph(100)
>>> plot_bm_graph(graph, bm, output_folder="./results/")
"""
nodes = graph.nodes()
colors = ['#b15928', '#1f78b4', '#6a3d9a', '#33a02c', '#ff7f00']
cluster = nx.get_node_attributes(graph, 'cluster')
labels = [colors[cluster[n]] for n in nodes]
layout = nx.spring_layout(graph)
plt.figure(figsize=(13.5, 5))
plt.subplot(1, 3, 1)
nx.draw(graph, nodelist=nodes, pos=layout, node_color='#636363', node_size=50, edge_color='#bdbdbd')
plt.title("Observed network")
plt.subplot(1, 3, 2)
plt.imshow(block_model, cmap='OrRd', interpolation='nearest')
plt.title("Stochastic block matrix")
plt.subplot(1, 3, 3)
legend = []
for ix, c in enumerate(colors):
legend.append(mpatches.Patch(color=c, label='C%d' % ix))
nx.draw(graph, nodelist=nodes, pos=layout, node_color=labels, node_size=50, edge_color='#bdbdbd')
plt.legend(handles=legend, ncol=len(colors), mode="expand", borderaxespad=0)
plt.title("SB clustering")
plt.savefig(output_folder + 'block_model.pdf', bbox_inches='tight')
def generate_sbm_network(input_file: "yaml configuration file") -> None:
"""
This function generates a simulated network, using the block model matrix given as input and saves both the network and the cluster nodes.
All parameters must be specified in a yaml file.
This function allows to create network and geneset for any type of SBM
"""
ym = YamlConfig()
config = ym.load_config(input_file)
print(config)
bm = BlockModel(np.array(config["BlockModel"]["matrix"]), n_nodes=config["BlockModel"]["n_nodes"],
nodes_percentage=config["BlockModel"]["nodes_percentage"])
outpath = config["Simulations"]["output_folder"]
suffix = config["Simulations"]["suffix"]
for i in range(config["Simulations"]["n_simulated"]):
bm.create_graph()
bm.write_network(outpath + suffix + "_s_" + str(i) + "_network.tsv")
bm.write_cluster_genelist(outpath + suffix + "_s_" + str(i) + "_genes.gmt")
# bm.plot_graph(outpath+suffix+"_s_"+str(i))
def generate_sbm2_network(output_folder: 'folder where the simulations are saved',
prefix: 'prefix for the simulations' = 'sbm',
n_nodes: 'nodes in the network' = 1000,
theta0: 'probability of connection in the cluster' = '0.9,0.7,0.5,0.2',
percentage: 'percentage of nodes in cluster 0, use ratio 0.1 = 10 percent' = '0.1',
density: 'multiplicative parameter used to define network density' = '0.06,0.1,0.2',
n_simulations: 'number of simulated networks for each configuration' = 3
):
"""
This function generates the simulated networks and genesets using the stochastic block model with 2 BLOCKS as described in the paper. The output names are going to be prefix_t_<theta0>_p_<percentage>_d_<density>_s_<n_simulation>_network.tsv or _genes.gmt
One connected cluster while the rest of the network has the same probability of connection. SBM = d *[theta0, 1-theta0 1-theta0, 1-theta0]
The simulator checks for connectedness of the generated network, if the generated net is not connected, a new simulation is generated.
"""
teta_ii = [float(i) for i in theta0.replace(' ', '').split(',')]
percentages = [float(i) for i in percentage.replace(' ', '').split(',')]
density = [float(i) for i in density.replace(' ', '').split(',')]
n_simulated = int(n_simulations)
| |
<reponame>ifwe/digsby
import logging
from traceback import print_exc
from util.introspect import memoize
log = logging.getLogger('pg_accounts2')
import wx
from gui.uberwidgets.PrefPanel import PrefPanel
from gui.pref.prefcontrols import HSizer, VSizer, mark_pref, SText
from wx import EXPAND
import protocols
from gui.textutil import default_font
from gui.accountslist import AccountRow
from gui.accountslist import AccountList
from util.primitives.funcs import find
from operator import attrgetter
import hooks
from contacts.sort_model import ChoiceModel
from pg_contact_list import yield_choices, release_syncs
from common import pref, profile
from peak.util.addons import AddOn
import common
from peak.events import trellis
import util.observe as observe
from util.lego.lattice.blocks import IValueListener
from util.lego.lattice.frame import ObservableAttrBindable
class IServiceProviderGUIMetaData(protocols.Interface):
provider_id = unicode()
icon = property() #wx.Bitmap (skin.get....)
name = unicode()
popularity = int() #needed for comparison
service_components = list() #IServiceComponentGUIData
class IServiceProviderInstanceGUIMetaData(protocols.Interface):
account_name = unicode()
#service_components = list()
class IServiceComponentGUIMetaData(protocols.Interface):
icon = property() #wx.Bitmap (skin.get....)
service_name = unicode() #'Yahoo Messenger'
type = str() #twitter is a 'social', aim is an 'im'
component_id = str() # 'msn'
from plugin_manager.plugin_registry import ServiceProviderPlugin, \
ServiceComponentPlugin
import services.service_provider as sp
class ServiceProviderPluginGUIMetaData(protocols.Adapter):
protocols.advise(asAdapterForTypes = [ServiceProviderPlugin],
instancesProvide = [IServiceProviderGUIMetaData])
@property
def provider_id(self):
return self.subject.provider_id
@property
def icon(self):
from gui import skin
return skin.get('serviceprovidericons.%s' % self.subject.provider_id)
@property
def provider_name(self):
return self.subject.name
@property
def popularity(self):
return getattr(getattr(getattr(self.subject, 'info', None), 'provider_info', None), 'popularity', 0)
@property
def service_components(self):
return sp.get_meta_components_for_provider(self.subject.provider_id)
class ServiceComponentPluginGUIMetaData(protocols.Adapter):
protocols.advise(asAdapterForTypes = [ServiceComponentPlugin],
instancesProvide = [IServiceComponentGUIMetaData])
@property
def icon(self):
from gui import skin
return skin.get('serviceicons.%s' % self.subject.shortname)
@property
def service_name(self):
return self.subject.info.get('service_name') or self.subject.info.get('name')
@property
def component_id(self):
return self.subject.shortname
@property
def type(self):
return self.subject.component_type
class StringServiceComponentGUIData(object):
protocols.advise(asAdapterForTypes=[basestring],
instancesProvide=[IServiceComponentGUIMetaData])
def __init__(self, subject):
self.subject = subject
@property
def service_name(self):
from common import protocolmeta
p = protocolmeta.protocols.get(self.subject)
if p:
name = p.get('service_name') or p.get('name')
else:
name = self.subject.capitalize()
return name
return p.name if p else self.subject.capitalize()
@property
def icon(self):
from gui import skin
return skin.get('serviceicons.%s' % self.subject, skin.get('serviceprovidericons.%s' % self.subject, None))
@property
def type(self):
from common import protocolmeta
p = protocolmeta.protocols.get(self.subject)
return p.get('component_type', p.get('type')) if p else 'unknown'
@property
def component_id(self):
return self.subject
def get_meta_for_provider(provider_instance):
return IServiceProviderGUIMetaData(sp.get_meta_service_provider(provider_instance.provider_id))
protocols.declareAdapterForType(IServiceProviderGUIMetaData, get_meta_for_provider, sp.ServiceProvider)
class IPaintable(protocols.Interface):
def on_paint(e):
pass
class wxPaintingMixin(object):
def __init__(self, *a, **k):
try:
IPaintable(self)
except protocols.AdaptationFailure:
raise
super(wxPaintingMixin, self).__init__(*a, **k)
self.Bind(wx.EVT_PAINT, self.on_paint)
class IDrawable(protocols.Interface):
def draw(dc):
pass
class IDrawingContext(protocols.Interface):
def DrawText(text, x, y): pass
def DrawBitmap(bmp, x, y): pass
def DrawRoundedRectangleRect(rect, radius): pass
def wxPaintEventDrawingContextAdapter(event):
if event.EventType == wx.EVT_PAINT:
return wx.AutoBufferedPaintDC(event.GetEventObject())
protocols.declareImplementation(wx.DC, instancesProvide=[IDrawingContext])
protocols.declareAdapterForType(IDrawingContext, wxPaintEventDrawingContextAdapter, wx.Event)
class IRectagleFactory(protocols.Interface):
def get_rect(x,y,w,h):
pass
class wxRectangleFactory(protocols.Adapter):
protocols.advise(instancesProvide=[IRectagleFactory],
asAdapterForTypes=[wx.DC])
@staticmethod
def get_rect(x,y,w,h):
return wx.Rect(x,y,w,h)
class ServiceProviderGUIMetaIconUser(object):
icon_cache = {}
def get_icon(self, meta, size):
meta = IServiceProviderGUIMetaData(meta)
try:
ret = self.icon_cache[(meta.provider_id, size)]
except KeyError:
ret = self.icon_cache[(meta.provider_id, size)] = meta.icon.PIL.Resized(size).WXB
return ret
class ServiceComponentGUIMetaIconUser(object):
icon_cache = {}
def get_icon(self, meta, size):
meta = IServiceComponentGUIMetaData(meta)
try:
ret = self.icon_cache[(meta.component_id, size)]
except KeyError:
ret = self.icon_cache[(meta.component_id, size)] = meta.icon.PIL.Resized(size).WXB
return ret
class ServiceMetaProviderPanel(wxPaintingMixin, wx.Panel, ServiceProviderGUIMetaIconUser):
protocols.advise(instancesProvide=[IPaintable, IDrawable])
def __init__(self, service_providers, height, spacing, *a, **k):
self.service_providers = [s[0] for s in
sorted(((sp,IServiceProviderGUIMetaData(sp))
for sp in service_providers),
key = (lambda o: o[1].popularity),
reverse = True)]
self.height = height
self.spacing = spacing
super(ServiceMetaProviderPanel, self).__init__(*a, **k)
#wx object operations must wait until after the wx constructor
self.MinSize = (len(service_providers) - 1)*(height + spacing) + height, (height + spacing)
self.w = None
self.Bind(wx.EVT_MOTION, self.on_mouseover, self)
self.Bind(wx.EVT_LEFT_UP, self.on_mouseclick, self)
self.Bind(wx.EVT_ENTER_WINDOW, self.on_mousein, self)
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.Bind(wx.EVT_LEAVE_WINDOW, self.on_mouseout, self)
self.Bind(wx.EVT_MOUSE_CAPTURE_LOST, self.on_capture_lost, self)
self.sx = self.sy = None
def draw(self, obj):
dc = IDrawingContext(obj)
dc.Clear()
size = self.height
offset = self.height+self.spacing
dc.SetPen(wx.TRANSPARENT_PEN)
dc.DrawRectangle(0,0,self.Size.width, self.Size.height)
for i, s in enumerate(self.service_providers):
dc.DrawBitmap(self.get_icon(s, size), i*offset, 0)
on_paint = draw
def service_provider_from_evt(self, e):
if e.GetEventObject() is not self:
return
if not self.HasCapture():
return
lx = e.X
r = self.ClientRect
if not wx.Rect(r.y,r.x,r.width + 1, r.height + 1).Contains(e.Position):
return
offset = self.height+self.spacing
pos = lx / float(offset)
posi = int(pos)
posr = pos - posi
if posr * offset <= (offset - self.spacing):
if posi < len(self.service_providers):
return self.service_providers[posi]
def on_mouseover(self, e):
e.Skip()
if e.GetEventObject() is not self:
return
if not self.HasCapture():
return
lx = e.X
r = self.ClientRect
if not wx.Rect(r.y,r.x,r.width + 1, r.height + 1).Contains(e.Position):
return self.on_mouseout(e)
offset = self.height+self.spacing
pos = lx / float(offset)
posi = int(pos)
posr = pos - posi
if posr * offset <= (offset - self.spacing):
sx = posi * offset + self.height/2
sy = self.height
if posi >= len(self.service_providers):
return self.SetCursor(wx.StockCursor(wx.CURSOR_DEFAULT))
self.SetCursor(wx.StockCursor(wx.CURSOR_HAND))
if self.w is None:
self.w = ServiceProviderBubble(self.service_providers[0], 0, parent=self)
if self.w.Shown and sx == self.sx and sy == self.sy:
return
self.sx = sx
self.sy = sy
with self.w.Frozen():
self.w.service_provider = IServiceProviderGUIMetaData(self.service_providers[posi])
self.w.show_point_to(self.ClientToScreen((sx,sy)))
else:
self.SetCursor(wx.StockCursor(wx.CURSOR_DEFAULT))
def on_mouseclick(self, e):
e.Skip()
sp = self.service_provider_from_evt(e)
if sp is None:
return
diag = hooks.first('digsby.services.create', parent = self.Top, sp_info = sp, impl="digsby_service_editor")
diag.CenterOnParent()
return_code = diag.ShowModal()
if return_code != wx.ID_SAVE:
log.info("Account creation cancelled. Return code = %r", return_code)
return
info = diag.extract()
sp = hooks.first('digsby.service_provider',
impl = diag.sp_info.provider_id,
**info)
log.info("Created %r", sp)
components = []
types_ = sp.get_component_types()
if 'im' in types_:
sp.autologin = True
for type_ in types_:
comp = sp.get_component(type_)
components.append((comp, type_[:2]))
log.info("\thas component %r: %r", type_, comp)
import services.service_provider as sp
with sp.ServiceProviderContainer(profile()).rebuilding() as container:
profile.account_manager.add_multi(components)
for comp, type_ in components:
try:
if hasattr(comp, 'enable'):
comp.enable()
else:
comp.enabled = True
except Exception:
print_exc()
try:
on_create = getattr(comp, 'onCreate', None) #CamelCase for GUI code
if on_create is not None:
on_create()
except Exception:
print_exc()
if type_ == 'em':
hooks.notify('digsby.email.new_account', parent = self.Top, protocol = comp.protocol, **info)
container.rebuild()
def on_mousein(self, e):
if e.GetEventObject() is not self:
return
self.CaptureMouse()
def on_mouseout(self, e):
if self.w is not None:
self.w.Hide()
while self.HasCapture():
self.ReleaseMouse()
self.SetCursor(wx.StockCursor(wx.CURSOR_DEFAULT))
def on_capture_lost(self, e):
pass
class BubbleWindow(wx.Frame):
def __init__(self, internal_size, parent = None, *a, **k):
style = wx.FRAME_SHAPED | wx.BORDER_NONE | wx.FRAME_NO_TASKBAR | \
((parent and wx.FRAME_FLOAT_ON_PARENT) or wx.STAY_ON_TOP)
ret = super(BubbleWindow, self).__init__(parent, style=style, *a, **k)
self.Bind(wx.EVT_PAINT, self.on_paint)
self.internal_size = internal_size
self.point = (10,0)
return ret
def draw_content(self, dc):
pass
@property
def poly(self):
xborder = 0
yborder = 0
startx, starty = (0,10)
px,py = self.point
endx, endy = (2*xborder + self.internal_size[0] + startx), (2*yborder + self.internal_size[1] + starty)
return ((px,py),(px+1,py), (px+11,starty), (endx,starty),(endx,endy),(startx,endy),(startx,starty),(px,starty),(px,py))
def show_point_to(self, point):
x,y = point
xborder = 0
yborder = 0
startx, starty = (0,10)
px,py = self.point
endx, endy = (2*xborder + self.internal_size[0] + startx), (2*yborder + self.internal_size[1] + starty)
with self.Frozen():
self.Position = (x - px, y - py)
self.Size = (endx+1, endy+1)
self.SetShape(get_polyregion(self.poly, endx, endy))
self.ShowNoActivate(True)
def on_paint(self, e):
dc = wx.AutoBufferedPaintDC(self)
# gc = wx.GraphicsContext.Create(dc)
o = wx.Color(254,214,76) # skin!
y = wx.Color(255,251,184)
dc.SetPen(wx.Pen(o))
dc.SetBrush(wx.Brush(y))
dc.DrawPolygon(self.poly)
self.draw_content(dc)
class ServiceProviderBubble(BubbleWindow, ServiceComponentGUIMetaIconUser):
def __init__(self, service_provider, font_adjust = 0, *a, **k):
self.service_provider = IServiceProviderGUIMetaData(service_provider)
super(ServiceProviderBubble, self).__init__((0,0), *a, **k)
self.Font = default_font()
self.font_adjust = font_adjust
self._service_provider = None
self.recalcsize()
def recalcsize(self):
if self.service_provider is self._service_provider:
return
ftitle = self.Font
ftitle.PointSize = int((ftitle.PointSize + self.font_adjust) * 4/3.)
fbase = self.Font
fbase.PointSize += self.font_adjust
h = 0
h += ftitle.LineHeight + 2
h += ftitle.Descent * 2
lh = round(fbase.LineHeight / 4. + .25) * 4 + 2
#^ .25 = .5 - 1/4. (ceil, except when exactly divisible by 4)
h += lh*len(self.service_provider.service_components) - 2
dc = wx.ClientDC(self)
dc.Font = ftitle
w = dc.GetTextExtent(self.service_provider.provider_name)[0]
self.offset = dc.GetTextExtent(" ")[0]
dc.Font = fbase
w2 = max([dc.GetTextExtent(" " + IServiceComponentGUIMetaData(s).service_name)[0]
for s in self.service_provider.service_components] + [0])
w2 += lh
w2 += self.offset
w = max(w, w2)
w += ftitle.Descent * 4
h += 2
self.internal_size = (int(w), int(h))
self._service_provider = self.service_provider
def draw_content(self, dc):
ftitle = self.Font
ftitle.PointSize = int((ftitle.PointSize + self.font_adjust) * 4/3.)
fbase = self.Font
fbase.PointSize += self.font_adjust
dc.Font = ftitle
x,y = ftitle.Descent * 2, 10 + ftitle.Descent * 1
dc.DrawText(self.service_provider.provider_name, x, y)
y += ftitle.LineHeight + 2
lh = int(round(fbase.LineHeight / 4. + .25)) * 4 + 2
dc.Font = fbase
x += self.offset
for s in sorted((IServiceComponentGUIMetaData(sc)
for sc in self.service_provider.service_components),
key = attrgetter('type'),
cmp = component_sort):
dc.DrawBitmap(self.get_icon(s, lh -2), x, y)
dc.DrawText(" " + s.service_name, x+lh, y)
y += lh
def show_point_to(self, point):
self.recalcsize()
return super(ServiceProviderBubble, self).show_point_to(point)
@memoize
def get_polyregion(points, w, h, border=1):
i = wx.EmptyImage(w + border, h + border)
b = i.WXB
m = wx.MemoryDC(b)
m.Clear()
m.SetBrush(wx.Brush(wx.Color(0,0,0)))
m.SetPen(wx.Pen(wx.Color(0,0,0)))
#ceil(border/2)?
m.DrawRectangle(0,0,w + border, h + border)
m.SetBrush(wx.Brush(wx.Color(255,255,255)))
m.SetPen(wx.Pen(wx.Color(255,255,255)))
m.DrawPolygon(points)
m.SelectObject(wx.NullBitmap)
del m
b.SetMaskColour(wx.Color(0,0,0))
return wx.RegionFromBitmap(b)
class IServiceProviderInstance(protocols.Interface):
icon = property()
def grayify(i, grayshade):
return int(round((i + grayshade) / 2.))
def grayify3(tuple_, grayshade):
| |
= c_int
lib.nxigetslab_.argtypes = [c_void_p, c_void_p, c_int_p, c_int_p]
def getslab(self, slab_offset, slab_shape):
"""
Get a slab from the data array.
Offsets are 0-origin. Shape can be inferred from the data.
Offset and shape must each have one entry per dimension.
Raises ValueError if this fails.
Corresponds to NXgetslab(handle,data,offset,shape)
"""
# TODO: consider accepting preallocated data so we don't thrash memory
shape,dtype = self.getinfo()
datafn,pdata,size = self._poutput(dtype,slab_shape)
slab_offset = numpy.array(slab_offset,'i')
slab_shape = numpy.array(slab_shape,'i')
status = self.lib.nxigetslab_(self.handle,pdata,
slab_offset.ctypes.data_as(c_int_p),
slab_shape.ctypes.data_as(c_int_p))
#print "slab",offset,size,data
if status == ERROR:
raise ValueError, "Could not read slab: %s"%(self._loc())
return datafn()
lib.nxiputdata_.restype = c_int
lib.nxiputdata_.argtypes = [c_void_p, c_void_p]
def putdata(self, data):
"""
Write data into the currently open data block.
Raises ValueError if this fails.
Corresponds to NXputdata(handle, data)
"""
shape,dtype = self.getinfo()
data,pdata = self._pinput(data,dtype,shape)
status = self.lib.nxiputdata_(self.handle,pdata)
if status == ERROR:
raise ValueError, "Could not write data: %s"%(self._loc())
lib.nxiputslab_.restype = c_int
lib.nxiputslab_.argtypes = [c_void_p, c_void_p, c_int_p, c_int_p]
def putslab(self, data, slab_offset, slab_shape):
"""
Put a slab into the data array.
Offsets are 0-origin. Shape can be inferred from the data.
Offset and shape must each have one entry per dimension.
Raises ValueError if this fails.
Corresponds to NXputslab(handle,data,offset,shape)
"""
shape,dtype = self.getinfo()
data,pdata = self._pinput(data,dtype,slab_shape)
slab_offset = numpy.array(slab_offset,'i')
slab_shape = numpy.array(slab_shape,'i')
#print "slab",offset,size,data
status = self.lib.nxiputslab_(self.handle,pdata,
slab_offset.ctypes.data_as(c_int_p),
slab_shape.ctypes.data_as(c_int_p))
if status == ERROR:
raise ValueError, "Could not write slab: %s"%(self._loc())
# ==== Attributes ====
lib.nxiinitattrdir_.restype = c_int
lib.nxiinitattrdir_.argtypes = [c_void_p]
def initattrdir(self):
"""
Reset the getnextattr list to the first attribute.
Raises RuntimeError if this fails.
Corresponds to NXinitattrdir(handle)
"""
status = self.lib.nxiinitattrdir_(self.handle)
if status == ERROR:
raise RuntimeError, \
"Could not reset attribute list: %s"%(self._loc())
lib.nxigetattrinfo_.restype = c_int
lib.nxigetattrinfo_.argtypes = [c_void_p, c_int_p]
def getattrinfo(self):
"""
Returns the number of attributes for the currently open
group/data object. Do not call getnextattr() more than
this number of times.
Raises RuntimeError if this fails.
Corresponds to NXgetattrinfo(handl, &n)
"""
n = c_int(0)
status = self.lib.nxigetattrinfo_(self.handle,_ref(n))
if status == ERROR:
raise RuntimeError, "Could not get attr info: %s"%(self._loc())
#print "num attrs",n.value
return n.value
lib.nxigetnextattr_.restype = c_int
lib.nxigetnextattr_.argtypes = [c_void_p, c_char_p, c_int_p, c_int_p]
def getnextattr(self):
"""
Returns the name, length, and data type for the next attribute.
Call getattrinfo to determine the number of attributes before
calling getnextattr. Data type is returned as a string. See
getinfo for details. Length is the number of elements in the
attribute.
Raises RuntimeError if NeXus returns ERROR or EOD.
Corresponds to NXgetnextattr(handle,name,&length,&storage)
but with storage converted from HDF values to numpy compatible
strings.
Note: NeXus API documentation seems to say that length is the number
of bytes required to store the entire attribute.
"""
name = ctypes.create_string_buffer(MAXNAMELEN)
length = c_int(0)
storage = c_int(0)
status = self.lib.nxigetnextattr_(self.handle,name,_ref(length),_ref(storage))
if status == ERROR or status == EOD:
raise RuntimeError, "Could not get next attr: %s"%(self._loc())
dtype = _pytype_code[storage.value]
#print "next attr",name.value,length.value,dtype
return name.value, length.value, dtype
# TODO: Resolve discrepency between NeXus API documentation and
# TODO: apparent behaviour for getattr/putattr length.
lib.nxigetattr_.restype = c_int
lib.nxigetattr_.argtypes = [c_void_p, c_char_p, c_void_p, c_int_p, c_int_p]
def getattr(self, name, length, dtype):
"""
Returns the value of the named attribute. Requires length and
data type from getnextattr to allocate the appropriate amount of
space for the attribute.
Corresponds to NXgetattr(handle,name,data,&length,&storage)
"""
datafn,pdata,size = self._poutput(str(dtype),[length])
storage = c_int(_nxtype_code[str(dtype)])
#print "retrieving",name,length,dtype,size
size = c_int(size)
status = self.lib.nxigetattr_(self.handle,name,pdata,_ref(size),_ref(storage))
if status == ERROR:
raise ValueError, "Could not read attr %s: %s" % (name,self._loc())
#print "attr",name,datafn(),size
return datafn()
lib.nxiputattr_.restype = c_int
lib.nxiputattr_.argtypes = [c_void_p, c_char_p, c_void_p, c_int, c_int]
def putattr(self, name, value, dtype = None):
"""
Saves the named attribute. The attribute value is a string
or a scalar.
Raises ValueError if the attribute could not be saved.
Corresponds to NXputattr(handle,name,data,length,storage)
Note length is the number of elements to write rather
than the number of bytes to write.
"""
# Establish attribute type
if dtype == None:
# Type is inferred from value
if hasattr(value,'dtype'):
dtype = str(value.dtype)
elif _is_string_like(value):
dtype = 'char'
else:
value = numpy.array(value)
dtype = str(value.dtype)
else:
# Set value to type
dtype = str(dtype)
if dtype == 'char' and not _is_string_like(value):
raise TypeError, "Expected string for 'char' attribute value"
if dtype != 'char':
value = numpy.array(value,dtype=dtype)
# Determine shape
if dtype == 'char':
length = len(value)
data = value
elif numpy.prod(value.shape) != 1:
# NAPI silently ignores attribute arrays
raise TypeError, "Attribute value must be scalar or string"
else:
length = 1
data = value.ctypes.data
# Perform the call
storage = c_int(_nxtype_code[dtype])
status = self.lib.nxiputattr_(self.handle,name,data,length,storage)
if status == ERROR:
raise ValueError, "Could not write attr %s: %s"%(name,self._loc())
def attrs(self):
"""
Iterate over attributes.
for name,value in file.attrs():
process(name,value)
This automatically reads the attributes of the group/data. Do not
change the active group/data while processing the list.
This does not correspond to an existing NeXus API function, but
combines the work of attrinfo/initattrdir/getnextattr/getattr.
"""
self.initattrdir()
n = self.getattrinfo()
for i in range(n):
name,length,dtype = self.getnextattr()
value = self.getattr(name,length,dtype)
yield name,value
# ==== Linking ====
lib.nxigetgroupid_.restype = c_int
lib.nxigetgroupid_.argtypes = [c_void_p, c_NXlink_p]
def getgroupID(self):
"""
Return the id of the current group so we can link to it later.
Raises RuntimeError
Corresponds to NXgetgroupID(handle, &ID)
"""
ID = _NXlink()
status = self.lib.nxigetgroupid_(self.handle,_ref(ID))
if status == ERROR:
raise RuntimeError, "Could not link to group: %s"%(self._loc())
return ID
lib.nxigetdataid_.restype = c_int
lib.nxigetdataid_.argtypes = [c_void_p, c_NXlink_p]
def getdataID(self):
"""
Return the id of the current data so we can link to it later.
Raises RuntimeError
Corresponds to NXgetdataID(handle, &ID)
"""
ID = _NXlink()
status = self.lib.nxigetdataid_(self.handle,_ref(ID))
if status == ERROR:
raise RuntimeError, "Could not link to data: %s"%(self._loc())
return ID
lib.nximakelink_.restype = c_int
lib.nximakelink_.argtypes = [c_void_p, c_NXlink_p]
def makelink(self, ID):
"""
Link the previously captured group/data ID into the currently
open group.
Raises RuntimeError
Corresponds to NXmakelink(handle, &ID)
"""
status = self.lib.nximakelink_(self.handle,_ref(ID))
if status == ERROR:
raise RuntimeError, "Could not make link: %s"%(self._loc())
lib.nximakenamedlink_.restype = c_int
lib.nximakenamedlink_.argtypes = [c_void_p, c_char_p, c_NXlink_p]
def makenamedlink(self,name,ID):
"""
Link the previously captured group/data ID into the currently
open group, but under a different name.
Raises RuntimeError
Corresponds to NXmakenamedlink(handle,name,&ID)
"""
status = self.lib.nximakenamedlink_(self.handle,name,_ref(ID))
if status == ERROR:
raise RuntimeError, "Could not make link %s: %s"%(name,self._loc())
lib.nxisameid_.restype = c_int
lib.nxisameid_.argtypes = [c_void_p, c_NXlink_p, c_NXlink_p]
def sameID(self, ID1, ID2):
"""
Return True of ID1 and ID2 point to the same group/data.
This should not raise any errors.
Corresponds to NXsameID(handle,&ID1,&ID2)
"""
status = self.lib.nxisameid_(self.handle, _ref(ID1), _ref(ID2))
return status == OK
lib.nxiopensourcegroup_.restype = c_int
lib.nxiopensourcegroup_.argtyps = [c_void_p]
def opensourcegroup(self):
"""
If the current node is a linked to another group or data, then
open the group or data that it is linked to.
Note: it is unclear how can we tell if we are linked, other than
perhaps the existence of a 'target' attribute in the current item.
Raises RuntimeError
Corresponds to NXopensourcegroup(handle)
"""
status = self.lib.nxiopensourcegroup_(self.handle)
if status == ERROR:
raise RuntimeError, "Could not open source group: %s"%(self._loc())
def link(self):
"""
Returns the item which the current item links to, or None if the
current item is not linked. This is equivalent to scanning the
attributes for target and returning it if target is not equal
to self.
This does not correspond to an existing NeXus API function, but
combines the work of attrinfo/initattrdir/getnextattr/getattr.
"""
# To preserve the semantics we must read in the whole list
# first, then process the entries one by one.
pathstr = "/"+"/".join(self.path)
#print "checking for links from",pathstr
n = self.getattrinfo()
self.initattrdir()
for i in range(n):
name,length,dtype = self.getnextattr()
if name == "target":
target = self.getattr(name,length,dtype)
#print "target %s, path %s"%(target,pathstr)
if target != pathstr:
return target
else:
return None
return None
# ==== External linking ====
lib.nxiinquirefile_.restype = c_int
lib.nxiinquirefile_.argtypes = [c_void_p, c_char_p, c_int]
def inquirefile(self, maxnamelen=MAXPATHLEN):
"""
Return the filename for the current file. This may be different
from the file that was opened (file.filename) if the | |
from __future__ import absolute_import, division
from psychopy import locale_setup
from psychopy import prefs
from psychopy import sound, gui, visual, core, data, event, logging, clock
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np
import pandas as pd
from numpy import (sin, cos, tan, log, log10, pi, average,
sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os
import sys
import time
from psychopy.hardware import keyboard
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
# Store info about the experiment session
expName = 'cartoon' # from the Builder filename that created this script
expInfo = {'participant': '', '': ''}
dlg = gui.DlgFromDict(dictionary=expInfo, sortKeys=False, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['participant'], expName, expInfo['date'])
frameTolerance = 0.001 # how close to onset before 'same' frame
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(
size=[1440, 900], fullscr=True, screen=0,
winType='pyglet', allowGUI=False, allowStencil=False,
monitor='testMonitor', color=[-1.000,-1.000,-1.000], colorSpace='rgb',
blendMode='avg', useFBO=True,
units='cm')
win.monitor.setSizePix([2560, 1600])
win.monitor.setWidth(33.3)
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
# create a default keyboard (e.g. to check for escape)
defaultKeyboard = keyboard.Keyboard()
# Initialize components for Routine "Consent"
ConsentClock = core.Clock()
ConsentPage = visual.ImageStim(
win=win,
name='ConsentPage',
image=_thisDir + '/images/Consent.jpeg', mask=None,
ori=0, pos=(0, 0), size=(10.2, 10.0),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False, units="cm",
texRes=128, interpolate=True, depth=0.0)
ConsentResp = keyboard.Keyboard()
# Initialize components for Routine "Break"
BreakClock = core.Clock()
blankscreen = visual.TextStim(win=win, name='blankscreen',
text=None,
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR', units="cm",
depth=0.0);
# Initialize components for Routine "Instructions1"
Instructions1Clock = core.Clock()
textInstructions = visual.TextStim(win=win, name='textInstructions',
text='Task Instructions\n\nIn this experiment, you will switch back and forth between 2 games.\n\n In the first game, press X for letters c & o and N for letters i & l.\n\nIn the second game, press X for letters d & b and N for letters q & p.\n\nYou will switch between both games 4 times.\n\nYou will be reminded of the instructions each time you switch!\n\n The task takes less than 20 minutes! \n\n Press SPACEBAR for more instructions!',
font='Arial',
pos=(0, 0), height=0.5, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR', units="cm",
depth=0.0);
textInstructionsResp = keyboard.Keyboard()
# Initialize components for Routine "Break"
BreakClock = core.Clock()
blankscreen = visual.TextStim(win=win, name='blankscreen',
text=None,
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR', units="cm",
depth=0.0);
# Initialize components for Routine "Instructions2"
Instructions2Clock = core.Clock()
imgInstructions = visual.ImageStim(
win=win,
name='imgInstructions',
image=_thisDir + '/images/Instructions.jpeg', mask=None,
ori=0, pos=(0, 0), size=(11.0, 11.0),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False, units="cm",
texRes=128, interpolate=True, depth=0.0)
imgInstructionsResp = keyboard.Keyboard()
# Initialize components for Routine "Break"
BreakClock = core.Clock()
blankscreen = visual.TextStim(win=win, name='blankscreen',
text=None,
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR', units="cm",
depth=0.0);
# Initialize components for Routine "PracticeWarning"
PracticeWarningClock = core.Clock()
PracticeText = visual.TextStim(win=win, name='PracticeText',
text='PRESS SPACEBAR TO BEGIN PRACTICE!\n\n Remember: \n\n Game 1: Press X for c & o and N for i & l\n\n Game 2: Press X for d & b and N for q & p',
font='Arial',
pos=(0, 0), height=0.5, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR', units="cm",
depth=0.0);
PracticeTextResp = keyboard.Keyboard()
PracticeClock = core.Clock()
# Initialize components for Routine "ReminderLow"
ReminderLowClock = core.Clock()
imageReminderLow = visual.ImageStim(
win=win,
name='imageReminderLow',
image=_thisDir + '/images/LowReminder.jpeg', mask=None,
ori=0, pos=(0, 0), size=(6.0, 4.0),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False, units="cm",
texRes=128, interpolate=True, depth=0.0)
ReminderLow_Resp = keyboard.Keyboard()
# Initialize components for Routine "ReminderHigh"
ReminderHighClock = core.Clock()
imageReminderHigh = visual.ImageStim(
win=win,
name='imageReminderHigh',
image=_thisDir + '/images/HighReminder.jpeg', mask=None,
ori=0, pos=(0, 0), size=(6.0, 4.0),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False, units="cm",
texRes=128, interpolate=True, depth=0.0)
ReminderHigh_Resp = keyboard.Keyboard()
# Initialize components for Routine "LowLoad1"
def item_generator(name, pos, win):
if name == "circle_situator":
h=1.5
else:
h=0.5
item = visual.TextStim(
win=win, name=name,
text='default text',
font='Arial',
pos=pos, height=h, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR', units="cm",
depth=0.0)
return item
LowLoad1Clock = core.Clock()
item1 = item_generator("item1", (0, 4.1), win)
item1_resp = keyboard.Keyboard()
item2 = item_generator("item2", (1.905, 3.5), win)
item3 = item_generator("item3", (3.5, 2.2), win)
item4 = item_generator("item4", (4.2, 0), win)
item5 = item_generator("item5", (3.5, -2.2), win)
item6 = item_generator("item6", (1.905, -3.5), win)
item7 = item_generator("item7", (0, -4.1), win)
item8 = item_generator("item8", (-1.905, -3.5), win)
item9 = item_generator("item9", (-3.5, -2.2), win)
item10 = item_generator("item10", (-4.2, 0), win)
item11 = item_generator("item11", (-3.5, 2.2), win)
item12 = item_generator("item12", (-1.905, 3.5), win)
item13 = item_generator("circle_situator", (0, 4.1), win)
# Initialize components for Routine "Break"
BreakClock = core.Clock()
blankscreen = visual.TextStim(win=win, name='blankscreen',
text=None,
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR', units="cm",
depth=0.0);
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# ------Prepare to start Routine "Consent"-------
continueRoutine = True
# update component parameters for each repeat
ConsentResp.keys = []
ConsentResp.rt = []
_ConsentResp_allKeys = []
# keep track of which components have finished
ConsentComponents = [ConsentPage, ConsentResp]
for thisComponent in ConsentComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
ConsentClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Consent"-------
while continueRoutine:
# get current time
t = ConsentClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=ConsentClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *ConsentPage* updates
if ConsentPage.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
ConsentPage.frameNStart = frameN # exact frame index
ConsentPage.tStart = t # local t and not account for scr refresh
ConsentPage.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(ConsentPage, 'tStartRefresh') # time at next scr refresh
ConsentPage.setAutoDraw(True)
# *ConsentResp* updates
waitOnFlip = False
if ConsentResp.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
ConsentResp.frameNStart = frameN # exact frame index
ConsentResp.tStart = t # local t and not account for scr refresh
ConsentResp.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(ConsentResp, 'tStartRefresh') # time at next scr refresh
ConsentResp.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(ConsentResp.clock.reset) # t=0 on next screen flip
win.callOnFlip(ConsentResp.clearEvents, eventType='keyboard') # clear events on next screen flip
if ConsentResp.status == STARTED and not waitOnFlip:
theseKeys = ConsentResp.getKeys(keyList=['space'], waitRelease=False)
_ConsentResp_allKeys.extend(theseKeys)
if len(_ConsentResp_allKeys):
ConsentResp.keys = _ConsentResp_allKeys[-1].name # just the last key pressed
ConsentResp.rt = _ConsentResp_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in ConsentComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Consent"-------
for thisComponent in ConsentComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if ConsentResp.keys in ['', [], None]: # No response was made
ConsentResp.keys = None
# the Routine "Consent" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "Break"-------
continueRoutine = True
routineTimer.add(0.500000)
# update component parameters for each repeat
# keep track of which components have finished
BreakComponents = [blankscreen]
for thisComponent in BreakComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
BreakClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Break"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = BreakClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=BreakClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *blankscreen* updates
if blankscreen.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of | |
<filename>Bio/HMM/MarkovModel.py<gh_stars>1-10
"""Deal with representations of Markov Models.
"""
# standard modules
import copy
import math
import random
# biopython
from Bio.Seq import MutableSeq
class MarkovModelBuilder:
"""Interface to build up a Markov Model.
This class is designed to try to separate the task of specifying the
Markov Model from the actual model itself. This is in hopes of making
the actual Markov Model classes smaller.
So, this builder class should be used to create Markov models instead
of trying to initiate a Markov Model directly.
"""
# the default pseudo counts to use
DEFAULT_PSEUDO = 1
def __init__(self, state_alphabet, emission_alphabet):
"""Initialize a builder to create Markov Models.
Arguments:
o state_alphabet -- An alphabet containing all of the letters that
can appear in the states
o emission_alphabet -- An alphabet containing all of the letters for
states that can be emitted by the HMM.
"""
self._state_alphabet = state_alphabet
self._emission_alphabet = emission_alphabet
# the probabilities for transitions and emissions
# by default we have no transitions and all possible emissions
self.transition_prob = {}
self.emission_prob = self._all_blank(state_alphabet,
emission_alphabet)
# the default pseudocounts for transition and emission counting
self.transition_pseudo = {}
self.emission_pseudo = self._all_pseudo(state_alphabet,
emission_alphabet)
def _all_blank(self, first_alphabet, second_alphabet):
"""Return a dictionary with all counts set to zero.
This uses the letters in the first and second alphabet to create
a dictionary with keys of two tuples organized as
(letter of first alphabet, letter of second alphabet). The values
are all set to 0.
"""
all_blank = {}
for first_state in first_alphabet.letters:
for second_state in second_alphabet.letters:
all_blank[(first_state, second_state)] = 0
return all_blank
def _all_pseudo(self, first_alphabet, second_alphabet):
"""Return a dictionary with all counts set to a default value.
This takes the letters in first alphabet and second alphabet and
creates a dictionary with keys of two tuples organized as:
(letter of first alphabet, letter of second alphabet). The values
are all set to the value of the class attribute DEFAULT_PSEUDO.
"""
all_counts = {}
for first_state in first_alphabet.letters:
for second_state in second_alphabet.letters:
all_counts[(first_state, second_state)] = self.DEFAULT_PSEUDO
return all_counts
def get_markov_model(self):
"""Return the markov model corresponding with the current parameters.
Each markov model returned by a call to this function is unique
(ie. they don't influence each other).
"""
transition_prob = copy.deepcopy(self.transition_prob)
emission_prob = copy.deepcopy(self.emission_prob)
transition_pseudo = copy.deepcopy(self.transition_pseudo)
emission_pseudo = copy.deepcopy(self.emission_pseudo)
return HiddenMarkovModel(transition_prob, emission_prob,
transition_pseudo, emission_pseudo)
def set_equal_probabilities(self):
"""Reset all probabilities to be an average value.
This resets the values of all allowed transitions and all allowed
emissions to be equal to 1 divided by the number of possible elements.
This is useful if you just want to initialize a Markov Model to
starting values (ie. if you have no prior notions of what the
probabilities should be -- or if you are just feeling too lazy
to calculate them :-).
Warning 1 -- this will reset all currently set probabilities.
Warning 2 -- This just sets all probabilities for transitions and
emissions to total up to 1, so it doesn't ensure that the sum of
each set of transitions adds up to 1.
"""
# first set the transitions
new_trans_prob = float(1) / float(len(self.transition_prob))
for key in self.transition_prob:
self.transition_prob[key] = new_trans_prob
# now set the emissions
new_emission_prob = float(1) / float(len(self.emission_prob))
for key in self.emission_prob:
self.emission_prob[key] = new_emission_prob
def set_random_probabilities(self):
"""Set all probabilities to randomly generated numbers.
This will reset the value of all allowed transitions and emissions
to random values.
Warning 1 -- This will reset any currently set probabibilities.
Warning 2 -- This does not check to ensure that the sum of
all of the probabilities is less then 1. It just randomly assigns
a probability to each
"""
for key in self.transition_prob:
self.transition_prob[key] = random.random()
for key in self.emission_prob:
self.emission_prob[key] = random.random()
# --- functions to deal with the transitions in the sequence
def allow_all_transitions(self):
"""A convenience function to create transitions between all states.
By default all transitions within the alphabet are disallowed; this
is a way to change this to allow all possible transitions.
"""
# first get all probabilities and pseudo counts set
# to the default values
all_probs = self._all_blank(self._state_alphabet,
self._state_alphabet)
all_pseudo = self._all_pseudo(self._state_alphabet,
self._state_alphabet)
# now set any probabilities and pseudo counts that
# were previously set
for set_key in self.transition_prob:
all_probs[set_key] = self.transition_prob[set_key]
for set_key in self.transition_pseudo:
all_pseudo[set_key] = self.transition_pseudo[set_key]
# finally reinitialize the transition probs and pseudo counts
self.transition_prob = all_probs
self.transition_pseudo = all_pseudo
def allow_transition(self, from_state, to_state, probability = None,
pseudocount = None):
"""Set a transition as being possible between the two states.
probability and pseudocount are optional arguments
specifying the probabilities and pseudo counts for the transition.
If these are not supplied, then the values are set to the
default values.
Raises:
KeyError -- if the two states already have an allowed transition.
"""
# check the sanity of adding these states
for state in [from_state, to_state]:
assert state in self._state_alphabet.letters, \
"State %s was not found in the sequence alphabet" % state
# ensure that the states are not already set
if ((from_state, to_state) not in self.transition_prob and
(from_state, to_state) not in self.transition_pseudo):
# set the initial probability
if probability is None:
probability = 0
self.transition_prob[(from_state, to_state)] = probability
# set the initial pseudocounts
if pseudocount is None:
pseudcount = self.DEFAULT_PSEUDO
self.transition_pseudo[(from_state, to_state)] = pseudocount
else:
raise KeyError("Transtion from %s to %s is already allowed."
% (from_state, to_state))
def destroy_transition(self, from_state, to_state):
"""Restrict transitions between the two states.
Raises:
KeyError if the transition is not currently allowed.
"""
try:
del self.transition_prob[(from_state, to_state)]
del self.transition_pseudo[(from_state, to_state)]
except KeyError:
raise KeyError("Transition from %s to %s is already disallowed."
% (from_state, to_state))
def set_transition_score(self, from_state, to_state, probability):
"""Set the probability of a transition between two states.
Raises:
KeyError if the transition is not allowed.
"""
if (from_state, to_state) in self.transition_prob:
self.transition_prob[(from_state, to_state)] = probability
else:
raise KeyError("Transition from %s to %s is not allowed."
% (from_state, to_state))
def set_transition_pseudocount(self, from_state, to_state, count):
"""Set the default pseudocount for a transition.
To avoid computational problems, it is helpful to be able to
set a 'default' pseudocount to start with for estimating
transition and emission probabilities (see p62 in Durbin et al
for more discussion on this. By default, all transitions have
a pseudocount of 1.
Raises:
KeyError if the transition is not allowed.
"""
if (from_state, to_state) in self.transition_pseudo:
self.transition_pseudo[(from_state, to_state)] = count
else:
raise KeyError("Transition from %s to %s is not allowed."
% (from_state, to_state))
# --- functions to deal with emissions from the sequence
def set_emission_score(self, seq_state, emission_state, probability):
"""Set the probability of a emission from a particular state.
Raises:
KeyError if the emission from the given state is not allowed.
"""
if (seq_state, emission_state) in self.emission_prob:
self.emission_prob[(seq_state, emission_state)] = probability
else:
raise KeyError("Emission of %s from %s is not allowed."
% (emission_state, seq_state))
def set_emission_pseudocount(self, seq_state, emission_state, count):
"""Set the default pseudocount for an emission.
To avoid computational problems, it is helpful to be able to
set a 'default' pseudocount to start with for estimating
transition and emission probabilities (see p62 in Durbin et al
for more discussion on this. By default, all emissions have
a pseudocount of 1.
Raises:
KeyError if the emission from the given state is not allowed.
"""
if (seq_state, emission_state) in self.emission_pseudo:
self.emission_pseudo[(seq_state, emission_state)] = count
else:
raise KeyError("Emission of %s from %s is not allowed."
% (emission_state, seq_state))
class HiddenMarkovModel:
"""Represent a hidden markov model that can be used for state estimation.
"""
def __init__(self, transition_prob, emission_prob, transition_pseudo,
emission_pseudo):
"""Initialize a Markov Model.
Note: You should use the MarkovModelBuilder class instead of
initiating this class directly.
Arguments:
o transition_prob -- A dictionary of transition probabilities for all
possible transitions in the sequence.
o emission_prob -- A dictionary of emission probabilities for all
possible emissions from the sequence states.
o transition_pseudo -- Pseudo-counts to be used for the transitions,
when counting for purposes | |
complete before the upgrade rolls back. The timeout can be in either hh:mm:ss or in d.hh:mm:ss.ms format.
"""
return pulumi.get(self, "upgrade_domain_timeout")
@property
@pulumi.getter(name="upgradeReplicaSetCheckTimeout")
def upgrade_replica_set_check_timeout(self) -> str:
"""
The maximum amount of time to block processing of an upgrade domain and prevent loss of availability when there are unexpected issues. When this timeout expires, processing of the upgrade domain will proceed regardless of availability loss issues. The timeout is reset at the start of each upgrade domain. The timeout can be in either hh:mm:ss or in d.hh:mm:ss.ms format.
"""
return pulumi.get(self, "upgrade_replica_set_check_timeout")
@property
@pulumi.getter(name="upgradeTimeout")
def upgrade_timeout(self) -> str:
"""
The amount of time the overall upgrade has to complete before the upgrade rolls back. The timeout can be in either hh:mm:ss or in d.hh:mm:ss.ms format.
"""
return pulumi.get(self, "upgrade_timeout")
@property
@pulumi.getter(name="deltaHealthPolicy")
def delta_health_policy(self) -> Optional['outputs.ClusterUpgradeDeltaHealthPolicyResponse']:
"""
The cluster delta health policy used when upgrading the cluster.
"""
return pulumi.get(self, "delta_health_policy")
@property
@pulumi.getter(name="forceRestart")
def force_restart(self) -> Optional[bool]:
"""
If true, then processes are forcefully restarted during upgrade even when the code version has not changed (the upgrade only changes configuration or data).
"""
return pulumi.get(self, "force_restart")
@pulumi.output_type
class ClusterVersionDetailsResponse(dict):
"""
The detail of the Service Fabric runtime version result
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "codeVersion":
suggest = "code_version"
elif key == "supportExpiryUtc":
suggest = "support_expiry_utc"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterVersionDetailsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterVersionDetailsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterVersionDetailsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
code_version: Optional[str] = None,
environment: Optional[str] = None,
support_expiry_utc: Optional[str] = None):
"""
The detail of the Service Fabric runtime version result
:param str code_version: The Service Fabric runtime version of the cluster.
:param str environment: Indicates if this version is for Windows or Linux operating system.
:param str support_expiry_utc: The date of expiry of support of the version.
"""
if code_version is not None:
pulumi.set(__self__, "code_version", code_version)
if environment is not None:
pulumi.set(__self__, "environment", environment)
if support_expiry_utc is not None:
pulumi.set(__self__, "support_expiry_utc", support_expiry_utc)
@property
@pulumi.getter(name="codeVersion")
def code_version(self) -> Optional[str]:
"""
The Service Fabric runtime version of the cluster.
"""
return pulumi.get(self, "code_version")
@property
@pulumi.getter
def environment(self) -> Optional[str]:
"""
Indicates if this version is for Windows or Linux operating system.
"""
return pulumi.get(self, "environment")
@property
@pulumi.getter(name="supportExpiryUtc")
def support_expiry_utc(self) -> Optional[str]:
"""
The date of expiry of support of the version.
"""
return pulumi.get(self, "support_expiry_utc")
@pulumi.output_type
class DiagnosticsStorageAccountConfigResponse(dict):
"""
The storage account information for storing Service Fabric diagnostic logs.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "blobEndpoint":
suggest = "blob_endpoint"
elif key == "protectedAccountKeyName":
suggest = "protected_account_key_name"
elif key == "queueEndpoint":
suggest = "queue_endpoint"
elif key == "storageAccountName":
suggest = "storage_account_name"
elif key == "tableEndpoint":
suggest = "table_endpoint"
elif key == "protectedAccountKeyName2":
suggest = "protected_account_key_name2"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DiagnosticsStorageAccountConfigResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DiagnosticsStorageAccountConfigResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DiagnosticsStorageAccountConfigResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
blob_endpoint: str,
protected_account_key_name: str,
queue_endpoint: str,
storage_account_name: str,
table_endpoint: str,
protected_account_key_name2: Optional[str] = None):
"""
The storage account information for storing Service Fabric diagnostic logs.
:param str blob_endpoint: The blob endpoint of the azure storage account.
:param str protected_account_key_name: The protected diagnostics storage key name.
:param str queue_endpoint: The queue endpoint of the azure storage account.
:param str storage_account_name: The Azure storage account name.
:param str table_endpoint: The table endpoint of the azure storage account.
:param str protected_account_key_name2: The secondary protected diagnostics storage key name. If one of the storage account keys is rotated the cluster will fallback to using the other.
"""
pulumi.set(__self__, "blob_endpoint", blob_endpoint)
pulumi.set(__self__, "protected_account_key_name", protected_account_key_name)
pulumi.set(__self__, "queue_endpoint", queue_endpoint)
pulumi.set(__self__, "storage_account_name", storage_account_name)
pulumi.set(__self__, "table_endpoint", table_endpoint)
if protected_account_key_name2 is not None:
pulumi.set(__self__, "protected_account_key_name2", protected_account_key_name2)
@property
@pulumi.getter(name="blobEndpoint")
def blob_endpoint(self) -> str:
"""
The blob endpoint of the azure storage account.
"""
return pulumi.get(self, "blob_endpoint")
@property
@pulumi.getter(name="protectedAccountKeyName")
def protected_account_key_name(self) -> str:
"""
The protected diagnostics storage key name.
"""
return pulumi.get(self, "protected_account_key_name")
@property
@pulumi.getter(name="queueEndpoint")
def queue_endpoint(self) -> str:
"""
The queue endpoint of the azure storage account.
"""
return pulumi.get(self, "queue_endpoint")
@property
@pulumi.getter(name="storageAccountName")
def storage_account_name(self) -> str:
"""
The Azure storage account name.
"""
return pulumi.get(self, "storage_account_name")
@property
@pulumi.getter(name="tableEndpoint")
def table_endpoint(self) -> str:
"""
The table endpoint of the azure storage account.
"""
return pulumi.get(self, "table_endpoint")
@property
@pulumi.getter(name="protectedAccountKeyName2")
def protected_account_key_name2(self) -> Optional[str]:
"""
The secondary protected diagnostics storage key name. If one of the storage account keys is rotated the cluster will fallback to using the other.
"""
return pulumi.get(self, "protected_account_key_name2")
@pulumi.output_type
class EndpointRangeDescriptionResponse(dict):
"""
Port range details
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "endPort":
suggest = "end_port"
elif key == "startPort":
suggest = "start_port"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EndpointRangeDescriptionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EndpointRangeDescriptionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EndpointRangeDescriptionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
end_port: int,
start_port: int):
"""
Port range details
:param int end_port: End port of a range of ports
:param int start_port: Starting port of a range of ports
"""
pulumi.set(__self__, "end_port", end_port)
pulumi.set(__self__, "start_port", start_port)
@property
@pulumi.getter(name="endPort")
def end_port(self) -> int:
"""
End port of a range of ports
"""
return pulumi.get(self, "end_port")
@property
@pulumi.getter(name="startPort")
def start_port(self) -> int:
"""
Starting port of a range of ports
"""
return pulumi.get(self, "start_port")
@pulumi.output_type
class ManagedIdentityResponse(dict):
"""
Describes the managed identities for an Azure resource.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "principalId":
suggest = "principal_id"
elif key == "tenantId":
suggest = "tenant_id"
elif key == "userAssignedIdentities":
suggest = "user_assigned_identities"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ManagedIdentityResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ManagedIdentityResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ManagedIdentityResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
principal_id: str,
tenant_id: str,
type: Optional[str] = None,
user_assigned_identities: Optional[Mapping[str, 'outputs.UserAssignedIdentityResponse']] = None):
"""
Describes the managed identities for an Azure resource.
:param str principal_id: The principal id of the managed identity. This property will only be provided for a system assigned identity.
:param str tenant_id: The tenant id of the managed identity. This property will only be provided for a system assigned identity.
:param str type: The type of managed identity for the resource.
:param Mapping[str, 'UserAssignedIdentityResponse'] user_assigned_identities: The list of user identities associated with the resource. The user identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
"""
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "tenant_id", tenant_id)
if type is not None:
pulumi.set(__self__, "type", type)
if user_assigned_identities is not None:
pulumi.set(__self__, "user_assigned_identities", user_assigned_identities)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The principal id of the managed identity. This property will only be provided for a system assigned identity.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The tenant id of the managed identity. This property will only be provided for a system assigned identity.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
The type of managed identity for the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userAssignedIdentities")
def user_assigned_identities(self) -> Optional[Mapping[str, 'outputs.UserAssignedIdentityResponse']]:
"""
The list of user identities associated with the resource. The user identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
"""
return pulumi.get(self, "user_assigned_identities")
@pulumi.output_type
class NamedPartitionSchemeDescriptionResponse(dict):
"""
Describes the named partition scheme of the service.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "partitionScheme":
suggest = "partition_scheme"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NamedPartitionSchemeDescriptionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NamedPartitionSchemeDescriptionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
| |
<filename>forest_benchmarking/classical_logic/ripple_carry_adder.py
"""
Circuit primitives for classical reversible logic
At the moment it is primarily using the simple adder construction in
[CDKM96] A new quantum ripple-carry addition circuit
Cuccaro, Draper, Kutin, and Moulton
https://arxiv.org/abs/quant-ph/0410184
There are many other classical logic primitives that can be coded see
e.g.
[VBE96] Quantum networks for elementary arithmetic operations
Vedral, Barenco, Ekert
Phys. Rev. A 54, 147 (1996)
https://doi.org/10.1103/PhysRevA.54.147
https://arxiv.org/abs/quant-ph/9511018
"""
from typing import Sequence, Tuple
import networkx as nx
import numpy as np
from numpy import pi
from scipy.spatial.distance import hamming
from pyquil.gates import CNOT, CCNOT, X, I, H, CZ, MEASURE, RESET
from pyquil import Program
from pyquil.quil import Pragma
from pyquil.api import QuantumComputer
from pyquil.unitary_tools import all_bitstrings
from forest_benchmarking.readout import _readout_group_parameterized_bitstring
from forest_benchmarking.classical_logic.primitives import *
from forest_benchmarking.utils import bit_array_to_int, int_to_bit_array
def assign_registers_to_line_or_cycle(start: int, graph: nx.Graph, num_length: int) \
-> Tuple[Sequence[int], Sequence[int], int, int]:
"""
From the start node assign registers as they are laid out in the ideal circuit diagram in
[CDKM96].
Assumes that the there are no dead ends in the graph, and any available neighbor can be
selected from the start without any further checks.
:param start: a node in the graph from which to start the assignment
:param graph: a graph with an unambiguous assignment from the start node, e.g. a cycle or line
:param num_length: the length of the bitstring representation of one summand
:return: the necessary registers and ancilla labels for implementing an adder program to add
the numbers a and b. The output can be passed directly to adder()
"""
if 2 * num_length + 2 > nx.number_of_nodes(graph):
raise ValueError("There are not enough qubits in the graph to support the computation.")
graph = graph.copy()
register_a = []
register_b = []
# set the node at start, and assign the carry_ancilla to this node.
node = start
carry_ancilla = node
neighbors = list(graph.neighbors(node))
idx = 0
while idx < 2 * num_length:
# remove the last assigned node to ensure it is not reassigned.
last_node = node
graph.remove_node(last_node)
# crawl to an arbitrary neighbor node if possible. If not, the assignment has failed.
if len(neighbors) == 0:
raise ValueError("Encountered dead end; assignment failed.")
node = neighbors[0]
neighbors = list(graph.neighbors(node))
# alternate between assigning nodes to the b register and a register, starting with b
if (idx % 2) == 0:
register_b.append(node)
else:
register_a.append(node)
idx += 1
# assign the z_ancilla to a neighbor of the last assignment to a
z_ancilla = next(graph.neighbors(node))
return register_a, register_b, carry_ancilla, z_ancilla
def get_qubit_registers_for_adder(qc: QuantumComputer, num_length: int,
qubits: Sequence[int] = None) \
-> Tuple[Sequence[int], Sequence[int], int, int]:
"""
Searches for a layout among the given qubits for the two n-bit registers and two additional
ancilla that matches the simple layout given in figure 4 of [CDKM96].
This method ignores any considerations of physical characteristics of the qc aside from the
qubit layout. An error is thrown if the appropriate layout is not found.
:param qc: the quantum resource on which an adder program will be executed.
:param num_length: the length of the bitstring representation of one summand
:param qubits: the available qubits on which to run the adder program.
:returns the necessary registers and ancilla labels for implementing an adder
program to add the numbers a and b. The output can be passed directly to adder()
"""
if qubits is None:
unavailable = [] # assume this means all qubits in qc are available
else:
unavailable = [qubit for qubit in qc.qubits() if qubit not in qubits]
graph = qc.qubit_topology()
for qubit in unavailable:
graph.remove_node(qubit)
# network x only provides subgraph isomorphism, but we want a subgraph monomorphism, i.e. we
# specifically want to match the edges desired_layout with some subgraph of graph. To
# accomplish this, we swap the nodes and edges of graph by making a line graph.
line_graph = nx.line_graph(graph)
# We want a path of n nodes, which has n-1 edges. Since we are matching edges of graph with
# nodes of layout we make a layout of n-1 nodes.
num_desired_nodes = 2 * num_length + 2
desired_layout = nx.path_graph(num_desired_nodes - 1)
g_matcher = nx.algorithms.isomorphism.GraphMatcher(line_graph, desired_layout)
try:
# pick out a subgraph isomorphic to the desired_layout if one exists
# this is an isomorphic mapping from edges in graph (equivalently nodes of line_graph) to
# nodes in desired_layout (equivalently edges of a path graph with one more node)
edge_iso = next(g_matcher.subgraph_isomorphisms_iter())
except IndexError:
raise Exception("An appropriate layout for the qubits could not be found among the "
"provided qubits.")
# pick out the edges of the isomorphism from the original graph
subgraph = nx.Graph(graph.edge_subgraph(edge_iso.keys()))
# pick out an endpoint of our path to start the assignment
start_node = -1
for node in subgraph.nodes:
if subgraph.degree(node) == 1: # found an endpoint
start_node = node
break
return assign_registers_to_line_or_cycle(start_node, subgraph, num_length)
def prepare_bitstring(bitstring: Sequence[int], register: Sequence[int], in_x_basis: bool = False):
"""
Creates a program to prepare the input bitstring on the qubits given by the corresponding
label in the register.
:param bitstring:
:param register: a list of qubits on which to prepare the bitstring. The first
:param in_x_basis: if true, prepare the bitstring-representation of the numbers in the x basis.
:returns: state_prep_prog - program
"""
state_prep_prog = Program()
for bit, qubit_label in zip(bitstring, register):
if bit == 1:
state_prep_prog += X(qubit_label)
# if we are doing logic in X basis, follow each bit preparation with a Hadamard
# H |0> = |+> and H |1> = |-> where + and - label the X basis vectors.
if in_x_basis:
state_prep_prog += H(qubit_label)
return state_prep_prog
def adder(num_a: Sequence[int], num_b: Sequence[int], register_a: Sequence[int],
register_b: Sequence[int], carry_ancilla: int, z_ancilla: int, in_x_basis: bool = False,
use_param_program: bool = False) -> Program:
"""
Produces a program implementing reversible adding on a quantum computer to compute a + b.
This implementation is based on [CDKM96], which is easy to implement, if not the most
efficient. Each register of qubit labels should be provided such that the first qubit in
each register is expected to carry the least significant bit of the respective number. This
method also requires two extra ancilla, one initialized to 0 that acts as a dummy initial
carry bit and another (which also probably ought be initialized to 0) that stores the most
significant bit of the addition (should there be a final carry). The most straightforward
ordering of the registers and two ancilla for adding n-bit numbers follows the pattern
carry_ancilla
b_0
a_0
...
b_j
a_j
...
b_n
a_n
z_ancilla
With this layout, all gates in the circuit act on sets of three adjacent qubits. Such a
layout is provided by calling get_qubit_registers_for_adder on the quantum resource. Note
that even with this layout some of the gates used to implement the circuit may not be native.
In particular there are CCNOT gates which must be decomposed and CNOT(q1, q3) gates acting on
potentially non-adjacenct qubits (the layout only ensures q2 is adjacent to both q1 and q3).
The output of the circuit falls on the qubits initially labeled by the b bits (and z_ancilla).
The default option is to compute the addition in the computational (aka Z) basis. By setting
in_x_basis true, the gates CNOT_X_basis and CCNOT_X_basis (defined above) will replace CNOT
and CCNOT so that the computation happens in the X basis.
[CDKM96]
"A new quantum ripple-carry addition circuit"
<NAME>, <NAME>, <NAME>, <NAME>
https://arxiv.org/abs/quant-ph/0410184
:param num_a: the bitstring representation of the number a with least significant bit last
:param num_b: the bitstring representation of the number b with least significant bit last
:param register_a: list of qubit labels for register a, with least significant bit labeled first
:param register_b: list of qubit labels for register b, with least significant bit labeled first
:param carry_ancilla: qubit labeling a zero-initialized qubit, ideally adjacent to b_0
:param z_ancilla: qubit label, a zero-initialized qubit, ideally adjacent to register_a[-1]
:param in_x_basis: if true, prepare the bitstring-representation of the numbers in the x basis
and | |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
LVM class for performing LVM operations.
"""
import math
import re
from itertools import izip
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils as putils
LOG = logging.getLogger(__name__)
class VolumeGroupNotFound(Exception):
def __init__(self, vg_name):
message = (_('Unable to find Volume Group: %s') % vg_name)
super(VolumeGroupNotFound, self).__init__(message)
class VolumeGroupCreationFailed(Exception):
def __init__(self, vg_name):
message = (_('Failed to create Volume Group: %s') % vg_name)
super(VolumeGroupCreationFailed, self).__init__(message)
class LVM(object):
"""LVM object to enable various LVM related operations."""
def __init__(self,
vg_name,
create_vg=False,
physical_volumes=None,
lvm_type='default',
executor=putils.execute):
"""Initialize the LVM object.
The LVM object is based on an LVM VolumeGroup, one instantiation
for each VolumeGroup you have/use.
:param vg_name: Name of existing VG or VG to create
:param create_vg: Indicates the VG doesn't exist
and we want to create it
:param physical_volumes: List of PVs to build VG on
"""
self.vg_name = vg_name
self.pv_list = []
self.lv_list = []
self.vg_size = 0
self.vg_free_space = 0
self.vg_lv_count = 0
self.vg_uuid = None
self.vg_thin_pool = None
self.vg_thin_pool_size = 0
self._execute = executor
if create_vg and physical_volumes is not None:
self.pv_list = physical_volumes
try:
self._create_vg(physical_volumes)
except putils.ProcessExecutionError as err:
LOG.exception(_('Error creating Volume Group'))
LOG.error(_('Cmd :%s') % err.cmd)
LOG.error(_('StdOut :%s') % err.stdout)
LOG.error(_('StdErr :%s') % err.stderr)
raise VolumeGroupCreationFailed(vg_name=self.vg_name)
if self._vg_exists() is False:
LOG.error(_('Unable to locate Volume Group %s') % vg_name)
raise VolumeGroupNotFound(vg_name=vg_name)
if lvm_type == 'thin':
pool_name = "%s-pool" % self.vg_name
if self.get_volume(pool_name) is None:
self.create_thin_pool(pool_name)
else:
self.vg_thin_pool = pool_name
def _size_str(self, size_in_g):
if '.00' in size_in_g:
size_in_g = size_in_g.replace('.00', '')
if int(size_in_g) == 0:
return '100m'
return '%sg' % size_in_g
def _vg_exists(self):
"""Simple check to see if VG exists.
:returns: True if vg specified in object exists, else False
"""
exists = False
cmd = ['vgs', '--noheadings', '-o', 'name']
(out, err) = self._execute(*cmd, root_helper='sudo', run_as_root=True)
if out is not None:
volume_groups = out.split()
if self.vg_name in volume_groups:
exists = True
return exists
def _create_vg(self, pv_list):
cmd = ['vgcreate', self.vg_name, ','.join(pv_list)]
self._execute(*cmd, root_helper='sudo', run_as_root=True)
def _get_vg_uuid(self):
(out, err) = self._execute('vgs', '--noheadings',
'-o uuid', self.vg_name)
if out is not None:
return out.split()
else:
return []
@staticmethod
def supports_thin_provisioning():
"""Static method to check for thin LVM support on a system.
:returns: True if supported, False otherwise
"""
cmd = ['vgs', '--version']
(out, err) = putils.execute(*cmd, root_helper='sudo', run_as_root=True)
lines = out.split('\n')
for line in lines:
if 'LVM version' in line:
version_list = line.split()
version = version_list[2]
if '(2)' in version:
version = version.replace('(2)', '')
version_tuple = tuple(map(int, version.split('.')))
if version_tuple >= (2, 2, 95):
return True
return False
@staticmethod
def get_all_volumes(vg_name=None, no_suffix=True):
"""Static method to get all LV's on a system.
:param vg_name: optional, gathers info for only the specified VG
:param no_suffix: optional, reports sizes in g with no suffix
:returns: List of Dictionaries with LV info
"""
cmd = ['lvs', '--noheadings', '--unit=g', '-o', 'vg_name,name,size']
if no_suffix:
cmd += ['--nosuffix']
if vg_name is not None:
cmd += [vg_name]
(out, err) = putils.execute(*cmd, root_helper='sudo', run_as_root=True)
lv_list = []
if out is not None:
volumes = out.split()
for vg, name, size in izip(*[iter(volumes)] * 3):
lv_list.append({"vg": vg, "name": name, "size": size})
return lv_list
def get_volumes(self):
"""Get all LV's associated with this instantiation (VG).
:returns: List of Dictionaries with LV info
"""
self.lv_list = self.get_all_volumes(self.vg_name)
return self.lv_list
def get_volume(self, name):
"""Get reference object of volume specified by name.
:returns: dict representation of Logical Volume if exists
"""
ref_list = self.get_volumes()
for r in ref_list:
if r['name'] == name:
return r
@staticmethod
def get_all_physical_volumes(vg_name=None, no_suffix=True):
"""Static method to get all PVs on a system.
:param vg_name: optional, gathers info for only the specified VG
:param no_suffix: optional, reports sizes in g with no suffix
:returns: List of Dictionaries with PV info
"""
cmd = ['pvs', '--noheadings',
'--unit=g',
'-o', 'vg_name,name,size,free',
'--separator', ':']
if no_suffix:
cmd += ['--nosuffix']
if vg_name is not None:
cmd += [vg_name]
(out, err) = putils.execute(*cmd, root_helper='sudo', run_as_root=True)
pv_list = []
if out is not None:
pvs = out.split()
for pv in pvs:
fields = pv.split(':')
pv_list.append({'vg': fields[0],
'name': fields[1],
'size': fields[2],
'available': fields[3]})
return pv_list
def get_physical_volumes(self):
"""Get all PVs associated with this instantiation (VG).
:returns: List of Dictionaries with PV info
"""
self.pv_list = self.get_all_physical_volumes(self.vg_name)
return self.pv_list
@staticmethod
def get_all_volume_groups(vg_name=None, no_suffix=True):
"""Static method to get all VGs on a system.
:param vg_name: optional, gathers info for only the specified VG
:param no_suffix: optional, reports sizes in g with no suffix
:returns: List of Dictionaries with VG info
"""
cmd = ['vgs', '--noheadings',
'--unit=g', '-o',
'name,size,free,lv_count,uuid',
'--separator', ':']
if no_suffix:
cmd += ['--nosuffix']
if vg_name is not None:
cmd += [vg_name]
(out, err) = putils.execute(*cmd, root_helper='sudo', run_as_root=True)
vg_list = []
if out is not None:
vgs = out.split()
for vg in vgs:
fields = vg.split(':')
vg_list.append({'name': fields[0],
'size': fields[1],
'available': fields[2],
'lv_count': fields[3],
'uuid': fields[4]})
return vg_list
def update_volume_group_info(self):
"""Update VG info for this instantiation.
Used to update member fields of object and
provide a dict of info for caller.
:returns: Dictionaries of VG info
"""
vg_list = self.get_all_volume_groups(self.vg_name)
if len(vg_list) != 1:
LOG.error(_('Unable to find VG: %s') % self.vg_name)
raise VolumeGroupNotFound(vg_name=self.vg_name)
self.vg_size = vg_list[0]['size']
self.vg_free_space = vg_list[0]['available']
self.vg_lv_count = vg_list[0]['lv_count']
self.vg_uuid = vg_list[0]['uuid']
if self.vg_thin_pool is not None:
for lv in self.get_all_volumes(self.vg_name):
if lv[1] == self.vg_thin_pool:
self.vg_thin_pool_size = lv[2]
def create_thin_pool(self, name=None, size_str=0):
"""Creates a thin provisioning pool for this VG.
The syntax here is slightly different than the default
lvcreate -T, so we'll just write a custom cmd here
and do it.
:param name: Name to use for pool, default is "<vg-name>-pool"
:param size_str: Size to allocate for pool, default is entire VG
"""
if not self.supports_thin_provisioning():
LOG.error(_('Requested to setup thin provisioning, '
'however current LVM version does not '
'support it.'))
return None
if name is None:
name = '%s-pool' % self.vg_name
if size_str == 0:
self.update_volume_group_info()
size_str = self.vg_size
# NOTE(jdg): lvcreate will round up extents
# to avoid issues, let's chop the size off to an int
size_str = re.sub(r'\.\d*', '', size_str)
pool_path = '%s/%s' % (self.vg_name, name)
cmd = ['lvcreate', '-T', '-L', size_str, pool_path]
self._execute(*cmd,
root_helper='sudo',
run_as_root=True)
self.vg_thin_pool = pool_path
def create_volume(self, name, size_str, lv_type='default', mirror_count=0):
"""Creates a logical volume on the object's VG.
:param name: Name to use when creating Logical Volume
:param size_str: Size to use when creating Logical Volume
:param lv_type: Type of Volume (default or thin)
:param mirror_count: Use LVM mirroring with specified count
"""
if lv_type == 'thin':
pool_path = '%s/%s' % (self.vg_name, self.vg_thin_pool)
cmd = ['lvcreate', '-T', '-V', size_str, '-n', name, pool_path]
else:
cmd = ['lvcreate', '-n', name, self.vg_name, '-L', size_str]
if mirror_count > 0:
cmd += ['-m', mirror_count, '--nosync']
terras = int(size_str[:-1]) / 1024.0
if terras >= 1.5:
rsize = int(2 ** math.ceil(math.log(terras) / math.log(2)))
# NOTE(vish): Next power of two for region size. See:
# http://red.ht/U2BPOD
cmd += ['-R', str(rsize)]
try:
self._execute(*cmd,
root_helper='sudo',
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_('Error creating Volume'))
LOG.error(_('Cmd :%s') % err.cmd)
LOG.error(_('StdOut :%s') % err.stdout)
LOG.error(_('StdErr :%s') % err.stderr)
raise
def create_lv_snapshot(self, name, source_lv_name, lv_type='default'):
"""Creates a snapshot of a logical volume.
:param name: Name to assign to new snapshot
:param source_lv_name: Name of Logical Volume to snapshot
:param lv_type: Type of LV (default or thin)
"""
source_lvref = self.get_volume(source_lv_name)
if source_lvref is None:
LOG.error(_("Unable to find LV: %s") % source_lv_name)
return False
cmd = ['lvcreate', '--name', name,
'--snapshot', '%s/%s' % (self.vg_name, source_lv_name)]
if lv_type != 'thin':
size = source_lvref['size']
cmd += ['-L', | |
('2012-02-09')
)
db.time_record.create \
( daily_record = dr
, duration = 7.75
, work_location = '5'
, wp = '2'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-10')
)
db.time_record.create \
( daily_record = dr
, duration = 7.5
, work_location = '5'
, wp = '2'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-11')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-12')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-13')
)
db.time_record.create \
( daily_record = dr
, duration = 9.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-14')
)
db.time_record.create \
( daily_record = dr
, duration = 9.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-15')
)
db.time_record.create \
( daily_record = dr
, duration = 9.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-16')
)
db.time_record.create \
( daily_record = dr
, duration = 9.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-17')
)
db.time_record.create \
( daily_record = dr
, duration = 9.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-18')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-19')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-20')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-21')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-22')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-23')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-24')
)
db.time_record.create \
( daily_record = dr
, duration = 9.75
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-25')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-26')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-27')
)
db.time_record.create \
( daily_record = dr
, duration = 8.75
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-28')
)
db.time_record.create \
( daily_record = dr
, duration = 9.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-02-29')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-01')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-02')
)
db.time_record.create \
( daily_record = dr
, duration = 7.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-03')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-04')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-05')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-06')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-07')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-08')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-09')
)
db.time_record.create \
( daily_record = dr
, duration = 9.75
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-10')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-11')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-12')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-13')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-14')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-15')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-16')
)
db.time_record.create \
( daily_record = dr
, duration = 9.75
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-17')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-18')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-19')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-20')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-21')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-22')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-23')
)
db.time_record.create \
( daily_record = dr
, duration = 9.25
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-24')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-25')
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2012-03-26')
)
db.time_record.create \
( daily_record = dr
, duration = 8.0
, work_location = '1'
, wp = '4'
)
dr = db.daily_record.create \
( user = | |
<reponame>TheShadow29/vognet-pytorch
"""
To create the 4-way dataset
Main motivation:
Currently, not sure if the models ground
based only on object name, or is it really
learning the roles of the visual elements
correctly.
Thus, we create 4-way dataset, for every
data which has S-V-O statistics, we generate
counterfactuals (not sure if this is
the correct name or not). For every image
containing say S1-V1-O1, present it with other
images with the characteristics S2-V1-O1,
S1-V2-O1, S1-V1-O2 as well. Some can be
reduced in case only S-V or O-V are present
More generally, we would like to create a
counterfactuals for anything that can
provide evidence.
Additionally, need to check
- [x] Location words shouldn't be present
- [x] Perform VERB lemmatization
- [x] Distinguish between what is groundable and
what is not
- [x] Check the groundable verbs
"""
from pathlib import Path
import pandas as pd
from tqdm.auto import tqdm
from collections import Counter
import json
import copy
import ast
import numpy as np
from _init_stuff import CN, yaml
from typing import List
np.random.seed(seed=5)
def create_random_list(cfg, srl_annots, ann_row_idx):
"""
Returns 4 random videos
"""
srl_idxs_possible = np.array(srl_annots.index)
vid_segs = srl_annots.vid_seg
vid_seg = vid_segs.loc[ann_row_idx]
srl_row = srl_annots.loc[ann_row_idx]
req_cls_pats = srl_row.req_cls_pats
req_cls_pats_mask = srl_row.req_cls_pats_mask
args_to_use = set(['V', 'ARG0', 'ARG1', 'ARG2', 'ARGM-LOC'])
arg_keys_vis_present = []
arg_keys_lang_present = []
for srl_arg, srl_arg_mask in zip(req_cls_pats, req_cls_pats_mask):
arg_key = srl_arg[0]
arg_keys_lang_present.append(arg_key)
if arg_key == 'V' or arg_key in args_to_use:
arg_keys_vis_present.append(arg_key)
ds4_msk = {}
inds_to_use = {}
num_arg_keys_vis = len(arg_keys_vis_present)
other_anns = np.random.choice(
srl_idxs_possible, size=10 * num_arg_keys_vis,
replace=False
).reshape(num_arg_keys_vis, 10)
for aind, arg_key1 in enumerate(arg_keys_vis_present):
in1 = other_anns[aind].tolist()
assert len(in1) == 10
set1 = set(in1)
set_int = [s for s in set1 if
vid_segs.loc[s] != vid_seg]
# TODO:
# Make replace false, currently true
# because some have low chances of
# appearing
assert len(set_int) > 0
inds_to_use[arg_key1] = set_int
ds4_msk[arg_key1] = 1
return inds_to_use, ds4_msk
def create_similar_list_new(cfg, arg_dicts, srl_annots, ann_row_idx):
"""
Does it for one row. Assumes annotations
exists and can be retrieved via `self`.
The logic:
Each input idx has ARG0, V, ARG1 ...,
(1) Pivot across one argument, say ARG0
(2) Retrieve all other indices such that they
have different ARG0, but same V, ARG1 ... (do
each of them separately)
(3) To retrieve those indices with V, ARG1 same
we can just do intersection of the two sets
To facilitate (2), we first create separate
dictionaries for each V, ARG1 etc. and then
just reference them via self.create_dicts
"""
srl_idxs_possible = np.array(srl_annots.index)
vid_segs = srl_annots.vid_seg
vid_seg = vid_segs.loc[ann_row_idx]
srl_row = srl_annots.loc[ann_row_idx]
req_cls_pats = srl_row.req_cls_pats
req_cls_pats_mask = srl_row.req_cls_pats_mask
args_to_use = set(['V', 'ARG0', 'ARG1', 'ARG2', 'ARGM-LOC'])
some_inds = {}
arg_keys_vis_present = []
arg_keys_lang_present = []
for srl_arg, srl_arg_mask in zip(req_cls_pats, req_cls_pats_mask):
arg_key = srl_arg[0]
arg_keys_lang_present.append(arg_key)
if arg_key == 'V' or arg_key in args_to_use:
arg_keys_vis_present.append(arg_key)
if arg_key in args_to_use:
lemma_key = 'lemma_{}'.format(
arg_key.replace('-', '_').replace('V', 'verb'))
lemma_arg = srl_row[lemma_key]
if isinstance(lemma_arg, list):
assert all([le_arg in arg_dicts[arg_key]
for le_arg in lemma_arg])
if len(lemma_arg) >= 1:
le_arg = lemma_arg
else:
le_arg = cfg.ds.none_word
else:
le_arg = [lemma_arg]
# srl_ind_list = copy.deepcopy(
# arg_dicts[arg_key][le_arg])
# srl_ind_list = arg_dicts[arg_key][le_arg][:]
for le_ar in le_arg:
srl_ind_list = arg_dicts[arg_key][le_ar][:]
srl_ind_list.remove(ann_row_idx)
if arg_key not in some_inds:
some_inds[arg_key] = []
some_inds[arg_key] += srl_ind_list
# # If not groundable but in args_to_use
# else:
# pass
num_arg_keys_vis = len(arg_keys_vis_present)
other_anns = np.random.choice(
srl_idxs_possible, size=10 * num_arg_keys_vis,
replace=False
).reshape(num_arg_keys_vis, 10)
inds_to_use = {}
ds4_msk = {}
for aind, arg_key1 in enumerate(arg_keys_vis_present):
arg_key_to_use = [
ak for ak in arg_keys_vis_present if ak != arg_key1]
set1 = set(some_inds[arg_key_to_use[0]])
set_int1 = set1.intersection(
*[set(some_inds[ak]) for ak in arg_key_to_use[1:]])
curr_set = set(some_inds[arg_key1])
set_int2 = list(set_int1 - curr_set)
set_int = [s for s in set_int2 if
vid_segs.loc[s] != vid_seg]
# TODO:
# Make replace false, currently true
# because some have low chances of
# appearing
if len(set_int) == 0:
# this means similar scenario not found
# inds
ds4_msk[arg_key1] = 0
inds_to_use[arg_key1] = other_anns[aind].tolist()
# inds_to_use[arg_key1] = [-1]
# cfg.ouch += 1
# print('ouch')
else:
ds4_msk[arg_key1] = 1
inds_to_use[arg_key1] = np.random.choice(
set_int, 10, replace=True).tolist()
# cfg.yolo += 1
# print('yolo')
# inds_to_use_lens = [len(v) if v[0] != -1 else 0 for k,
# v in inds_to_use.items()]
# if sum(inds_to_use_lens) == 0:
# cfg.ouch2 += 1
# else:
# cfg.yolo2 += 1
return inds_to_use, ds4_msk
def create_similar_list(cfg, arg_dicts, srl_annots, ann_row_idx):
"""
Does it for one row. Assumes annotations
exists and can be retrieved via `self`.
The logic:
Each input idx has ARG0, V, ARG1 ...,
(1) Pivot across one argument, say ARG0
(2) Retrieve all other indices such that they
have different ARG0, but same V, ARG1 ... (do
each of them separately)
(3) To retrieve those indices with V, ARG1 same
we can just do intersection of the two sets
To facilitate (2), we first create separate
dictionaries for each V, ARG1 etc. and then
just reference them via self.create_dicts
"""
srl_idxs_possible = np.array(srl_annots.index)
vid_segs = srl_annots.vid_seg
vid_seg = vid_segs.loc[ann_row_idx]
srl_row = srl_annots.loc[ann_row_idx]
req_cls_pats = srl_row.req_cls_pats
req_cls_pats_mask = srl_row.req_cls_pats_mask
args_to_use = set(['V', 'ARG0', 'ARG1', 'ARG2', 'ARGM-LOC'])
some_inds = {}
arg_keys_vis_present = []
arg_keys_lang_present = []
for srl_arg, srl_arg_mask in zip(req_cls_pats, req_cls_pats_mask):
arg_key = srl_arg[0]
arg_keys_lang_present.append(arg_key)
if arg_key == 'V' or arg_key in args_to_use:
arg_keys_vis_present.append(arg_key)
if arg_key in args_to_use:
lemma_key = 'lemma_{}'.format(
arg_key.replace('-', '_').replace('V', 'verb'))
lemma_arg = srl_row[lemma_key]
if isinstance(lemma_arg, list):
assert all([le_arg in arg_dicts[arg_key]
for le_arg in lemma_arg])
if len(lemma_arg) >= 1:
le_arg = lemma_arg[0]
else:
le_arg = cfg.ds.none_word
else:
le_arg = lemma_arg
# srl_ind_list = copy.deepcopy(
# arg_dicts[arg_key][le_arg])
# srl_ind_list = arg_dicts[arg_key][le_arg][:]
srl_ind_list = arg_dicts[arg_key][le_arg][:]
srl_ind_list.remove(ann_row_idx)
if arg_key not in some_inds:
some_inds[arg_key] = []
some_inds[arg_key] += srl_ind_list
# # If not groundable but in args_to_use
# else:
# pass
num_arg_keys_vis = len(arg_keys_vis_present)
other_anns = np.random.choice(
srl_idxs_possible, size=10 * num_arg_keys_vis,
replace=False
).reshape(num_arg_keys_vis, 10)
inds_to_use = {}
ds4_msk = {}
for aind, arg_key1 in enumerate(arg_keys_vis_present):
arg_key_to_use = [
ak for ak in arg_keys_vis_present if ak != arg_key1]
set1 = set(some_inds[arg_key_to_use[0]])
set_int1 = set1.intersection(
*[set(some_inds[ak]) for ak in arg_key_to_use[1:]])
curr_set = set(some_inds[arg_key1])
set_int2 = list(set_int1 - curr_set)
set_int = [s for s in set_int2 if
vid_segs.loc[s] != vid_seg]
# TODO:
# Make replace false, currently true
# because some have low chances of
# appearing
if len(set_int) == 0:
# this means similar scenario not found
# inds
ds4_msk[arg_key1] = 0
inds_to_use[arg_key1] = other_anns[aind].tolist()
# inds_to_use[arg_key1] = [-1]
# cfg.ouch += 1
# print('ouch')
else:
ds4_msk[arg_key1] = 1
inds_to_use[arg_key1] = np.random.choice(
set_int, 10, replace=True).tolist()
# cfg.yolo += 1
# print('yolo')
# inds_to_use_lens = [len(v) if v[0] != -1 else 0 for k,
# v in inds_to_use.items()]
# if sum(inds_to_use_lens) == 0:
# cfg.ouch2 += 1
# else:
# cfg.yolo2 += 1
return inds_to_use, ds4_msk
class AnetDSCreator:
def __init__(self, cfg, tdir='.'):
self.cfg = cfg
self.tdir = Path(tdir)
def fix_via_ast(self, df):
for k in df.columns:
first_word = df.iloc[0][k]
if isinstance(first_word, str) and (first_word[0] in '[{'):
df[k] = df[k].apply(
lambda x: ast.literal_eval(x))
return df
def get_stats(self, req_args):
"""
Gets the counts for the argument types
"""
c = Counter()
if isinstance(req_args[0], list):
for x in req_args:
c += Counter(x)
else:
c = Counter(req_args)
return c.most_common()
def create_all_similar_lists(self):
self.create_similar_lists(split_type='train')
self.create_similar_lists(split_type='valid')
def create_similar_lists(self, split_type: str = 'train'):
"""
need to check if only
creating for the validation
set would be enough or not.
Basically, for each input,
generates list of other inputs (idxs)
which have same S,V,O (at least one is same)
"""
if split_type == 'train':
srl_annot_file = self.tdir / self.cfg.ds.trn_verb_ent_file
ds4_dict_file = self.tdir / self.cfg.ds.trn_ds4_dicts
ds4_ind_file = self.tdir / self.cfg.ds.trn_ds4_inds
elif split_type == 'valid':
srl_annot_file = self.tdir / self.cfg.ds.val_verb_ent_file
ds4_dict_file = self.tdir / self.cfg.ds.val_ds4_dicts
ds4_ind_file = self.tdir / self.cfg.ds.val_ds4_inds
elif split_type == 'trn_val':
srl_annot_file = self.tdir / self.cfg.ds.verb_ent_file
ds4_dict_file = self.tdir / self.cfg.ds.ds4_dicts
ds4_ind_file = self.tdir / self.cfg.ds.ds4_inds
elif split_type == 'only_val':
srl_annot_file = Path('./data/anet_verb/val_1_verb_ent_file.csv')
ds4_dict_file = Path(
'./data/anet_verb/val_1_srl_args_dict_obj_to_ind.json'
)
else:
raise NotImplementedError
# elif split_type == 'test':
# srl_annot_file = self.tdir / self.cfg.ds.test_verb_ent_file
# ds4_dict_file = self.tdir / self.cfg.ds.test_ds4_dicts
# ds4_ind_file = self.tdir / self.cfg.ds.test_ds4_inds
# elif split_type == 'val_test':
# # validation file with validation+test indices
# srl_annot_file = self.tdir / self.cfg.ds.test_verb_ent_file
# ds4_dict_file = | |
# cython: auto_pickle=False,embedsignature=True,always_allow_keywords=False
# -*- coding: utf-8 -*
"""
Datastructures to help externalization.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# There are a *lot* of fixme (XXX and the like) in this file.
# Turn those off in general so we can see through the noise.
# pylint:disable=fixme
# pylint:disable=keyword-arg-before-vararg
# stdlib imports
import numbers
import warnings
import six
from six import iteritems
from zope import interface
from zope import schema
from zope.component import getUtility
from zope.schema.interfaces import SchemaNotProvided
from zope.schema.interfaces import IDict
from zope.schema.interfaces import IObject
from nti.schema.interfaces import find_most_derived_interface
from .interfaces import IInternalObjectExternalizer
from .interfaces import IInternalObjectIO
from .interfaces import IInternalObjectIOFinder
from .interfaces import IAnonymousObjectFactory
from .interfaces import StandardInternalFields
# Things imported from cython with matching cimport
from .externalization.dictionary import to_minimal_standard_external_dictionary
from .externalization.dictionary import internal_to_standard_external_dictionary
# Must rename this so it doesn't conflict with method defs;
# that breaks cython
from .externalization.externalizer import to_external_object as _toExternalObject
from .internalization import validate_named_field_value
from .internalization.factories import find_factory_for
from .representation import make_repr
from .factory import AnonymousObjectFactory
from ._base_interfaces import get_standard_external_fields
from ._base_interfaces import get_standard_internal_fields
from ._base_interfaces import get_default_externalization_policy
from ._base_interfaces import NotGiven
from ._interface_cache import cache_for
StandardExternalFields = get_standard_external_fields()
StandardInternalFields = get_standard_internal_fields()
DEFAULT_EXTERNALIZATION_POLICY = get_default_externalization_policy()
IDict_providedBy = IDict.providedBy
IObject_providedBy = IObject.providedBy
__all__ = [
'ExternalizableDictionaryMixin',
'StandardInternalObjectExternalizer',
'AbstractDynamicObjectIO',
'ExternalizableInstanceDict',
'InterfaceObjectIO',
'ModuleScopedInterfaceObjectIO',
]
class ExternalizableDictionaryMixin(object):
"""
Implements a toExternalDictionary method as a base for subclasses.
"""
#: If true, then when asked for the standard dictionary, we will instead
#: produce the *minimal* dictionary. See :func:`~to_minimal_standard_external_dictionary`
__external_use_minimal_base__ = False
def _ext_replacement(self):
"""
Return the object that we are externalizing.
This class returns ``self``, but subclasses will typically override this.
"""
return self
def _ext_standard_external_dictionary(self, replacement, mergeFrom=None, **kwargs):
if self.__external_use_minimal_base__:
return to_minimal_standard_external_dictionary(replacement,
mergeFrom=mergeFrom)
return internal_to_standard_external_dictionary(
replacement,
mergeFrom=mergeFrom,
decorate=kwargs.get('decorate', True),
request=kwargs.get('request', NotGiven),
decorate_callback=kwargs.get('decorate_callback', NotGiven),
policy=kwargs.get("policy", DEFAULT_EXTERNALIZATION_POLICY),
)
def toExternalDictionary(self, mergeFrom=None, *unused_args, **kwargs):
"""
Produce the standard external dictionary for this object.
Uses `_ext_replacement`.
"""
return self._ext_standard_external_dictionary(self._ext_replacement(),
mergeFrom=mergeFrom,
**kwargs)
class StandardInternalObjectExternalizer(ExternalizableDictionaryMixin):
"""
An *adapter* that can be used to implement
:class:`~nti.externalization.interfaces.IInternalObjectExternalizer`.
The result of externalizing is the standard external dictionary
for this adapter's *context* argument.
This can be registered as-is, or subclassed to add additional
items in the external dictionary. In that case, always begin by
calling this implemention first and updating the result.
.. versionadded:: 2.3.0
"""
def __init__(self, context):
"""
The constructor sets ``__external_can_create__`` to `False` (because
creating from just an externalizer makes no sense) and
``__external_class_name__`` to `None` (if you override this value,
it will replace the ``Class`` value in the returned dictionary;
it *must* be a native `str`).
"""
self.context = context
self.__external_can_create__ = False
self.__external_class_name__ = None
def _ext_replacement(self):
"""
Returns this adapter's *context* argument.
"""
return self.context
def toExternalObject(self, **kwargs):
result = self.toExternalDictionary(**kwargs)
if self.__external_class_name__:
result[StandardExternalFields.CLASS] = self.__external_class_name__
return result
interface.classImplements(StandardInternalObjectExternalizer,
IInternalObjectExternalizer)
class AbstractDynamicObjectIO(ExternalizableDictionaryMixin):
"""
Base class for objects that externalize based on dynamic information.
Abstractions are in place to allow subclasses to map external and internal names
independently (this type never uses getattr/setattr/hasattr, except for some
standard fields).
See `InterfaceObjectIO` for a complete implementation.
"""
# TODO: there should be some better way to customize this if desired (an explicit list)
# TODO: Play well with __slots__
# TODO: This won't evolve well. Need something more sophisticated,
# probably a meta class.
# Avoid things super handles
# These all *should* be frozenset() and immutable
_excluded_out_ivars_ = frozenset({
StandardInternalFields.ID,
StandardExternalFields.ID,
StandardInternalFields.CREATOR,
StandardExternalFields.CREATOR,
StandardInternalFields.CONTAINER_ID,
'lastModified',
StandardInternalFields.LAST_MODIFIEDU,
StandardInternalFields.CREATED_TIME,
'links'
})
_excluded_in_ivars_ = frozenset({
StandardInternalFields.ID,
StandardExternalFields.ID,
StandardExternalFields.OID,
StandardInternalFields.CREATOR,
StandardExternalFields.CREATOR,
StandardInternalFields.LAST_MODIFIED,
StandardInternalFields.LAST_MODIFIEDU,
# Also the IDCTimes created/modified values
'created', 'modified',
StandardExternalFields.CLASS,
StandardInternalFields.CONTAINER_ID
})
_ext_primitive_out_ivars_ = frozenset()
_prefer_oid_ = False
def find_factory_for_named_value(self, key, value): # pylint:disable=unused-argument
"""
Uses `.find_factory_for` to locate a factory.
This does not take into account the current object (context)
or the *key*. It only handles finding factories based on the
class or MIME type found within *value*.
"""
return find_factory_for(value)
def _ext_replacement(self):
# Redeclare this here for cython
return self
def _ext_all_possible_keys(self):
"""
This method must return a `frozenset` of native strings.
"""
raise NotImplementedError()
def _ext_setattr(self, ext_self, k, value):
raise NotImplementedError()
def _ext_getattr(self, ext_self, k, default=NotGiven):
"""
_ext_getattr(object, name[, default]) -> value
Return the attribute of the *ext_self* object with the internal name *name*.
If the attribute does not exist, should raise (typically :exc:`AttributeError`),
unless *default* is given, in which case it returns that.
.. versionchanged:: 1.0a4
Add the *default* argument.
"""
raise NotImplementedError()
def _ext_replacement_getattr(self, name, default=NotGiven):
"""
Like `_ext_getattr`, but automatically fills in `_ext_replacement`
for the *ext_self* argument.
.. versionadded:: 1.0a4
"""
return self._ext_getattr(self._ext_replacement(), name, default)
def _ext_keys(self):
"""
Return only the names of attributes that should be externalized.
These values will be used as keys in the external dictionary.
See :meth:`_ext_all_possible_keys`. This implementation then filters out
*private* attributes (those beginning with an underscore),
and those listed in ``_excluded_in_ivars_``.
This method must return a set of native strings.
"""
# Sadly, we cannot yet enforce what type _excluded_out_ivars_ is.
# Mostly it is a set or frozen set (depending on how it was
# combined with the declaration in this class) but some overrides
# in the wild have it as a tuple. We need a metaclass to fix that.
excluded = self._excluded_out_ivars_
return [k for k in self._ext_all_possible_keys()
if (k not in excluded # specifically excluded
and not k.startswith('_'))] # private
# and not callable(getattr(ext_self,k)))] # avoid functions
def _ext_primitive_keys(self):
"""
Return a container of string keys whose values are known to be primitive.
This is an optimization for writing.
This method must return a frozenset.
"""
return self._ext_primitive_out_ivars_
def toExternalDictionary(self, mergeFrom=None, *unused_args, **kwargs):
result = super(AbstractDynamicObjectIO, self).toExternalDictionary(mergeFrom=mergeFrom,
**kwargs)
ext_self = self._ext_replacement()
primitive_ext_keys = self._ext_primitive_keys()
for k in self._ext_keys():
if k in result:
# Standard key already added
continue
ext_val = attr_val = self._ext_getattr(ext_self, k)
__traceback_info__ = k, attr_val
if k not in primitive_ext_keys:
ext_val = _toExternalObject(attr_val, **kwargs)
result[k] = ext_val
if ext_val is not attr_val:
# We want to be sure things we externalize have the
# right parent relationship but if we are directly
# externalizing an existing object (e.g., primitive or
# something that uses a replacement) we don't want to
# change the relationship or even set one in the first
# place---if the object gets pickled later on, that
# could really screw things up (One symptom is
# InvalidObjectReference from ZODB across
# transactions/tests) if ILocation.providedBy(
# result[k] ): (throwing is faster than providedBy)
try:
ext_val.__parent__ = ext_self
except AttributeError:
# toExternalObject is schizophrenic about when it converts
# return values to LocatedExternalDict/List. Sometimes it
# does, sometimes it does not.
pass
if (StandardExternalFields.ID in result
and StandardExternalFields.OID in result
and self._prefer_oid_
and result[StandardExternalFields.ID] != result[StandardExternalFields.OID]):
result[StandardExternalFields.ID] = result[StandardExternalFields.OID]
return result
def toExternalObject(self, mergeFrom=None, *args, **kwargs):
return self.toExternalDictionary(mergeFrom, *args, **kwargs)
def _ext_accept_update_key(self, k, ext_self, ext_keys): # pylint:disable=unused-argument
"""
Returns whether or not this key should be accepted for setting
on the object, or silently ignored.
:param ext_keys: As an optimization, the value of :meth:`_ext_all_possible_keys`
is passed. Keys are only accepted if they are in this list.
"""
return k not in self._excluded_in_ivars_ and k in ext_keys
def _ext_accept_external_id(self, ext_self, parsed): # pylint:disable=unused-argument
"""
If the object we're updating does not have an ``id`` set, but there is an
``ID`` in the external object, should we be able to use it?
:return: boolean
"""
return False # false by default
def updateFromExternalObject(self, parsed, *unused_args, **unused_kwargs):
return self._updateFromExternalObject(parsed)
def _updateFromExternalObject(self, parsed):
updated = False
ext_self = self._ext_replacement()
ext_keys = self._ext_all_possible_keys()
for k, v in iteritems(parsed):
if not self._ext_accept_update_key(k, ext_self, ext_keys):
continue
__traceback_info__ = (k, v)
self._ext_setattr(ext_self, k, v)
updated = True
# TODO: Should these go through _ext_setattr?
if (StandardExternalFields.CONTAINER_ID in parsed
and getattr(ext_self, StandardInternalFields.CONTAINER_ID, parsed) is None):
setattr(ext_self,
StandardInternalFields.CONTAINER_ID,
parsed[StandardExternalFields.CONTAINER_ID])
if (StandardExternalFields.CREATOR in parsed
and getattr(ext_self, StandardInternalFields.CREATOR, parsed) is None):
setattr(ext_self,
StandardInternalFields.CREATOR,
parsed[StandardExternalFields.CREATOR])
if (StandardExternalFields.ID in parsed
and getattr(ext_self, StandardInternalFields.ID, parsed) is None
and self._ext_accept_external_id(ext_self, parsed)):
setattr(ext_self,
StandardInternalFields.ID,
parsed[StandardExternalFields.ID])
return updated
interface.classImplements(AbstractDynamicObjectIO, IInternalObjectIOFinder)
class _ExternalizableInstanceDict(AbstractDynamicObjectIO):
# TODO: there should be some better way to customize this if desired (an explicit list)
# TODO: Play well with __slots__? ZODB supports slots, but doesn't recommend them
# TODO: This won't evolve well. Need | |
<gh_stars>0
import copy
import logging
import os
from pathlib import Path
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
from typing import Any, List, Optional, Text, Dict, Tuple, Union
import rasa.utils.io as io_utils
from rasa.core.domain import Domain
from rasa.core.featurizers import (
TrackerFeaturizer,
FullDialogueTrackerFeaturizer,
LabelTokenizerSingleStateFeaturizer,
MaxHistoryTrackerFeaturizer,
)
from rasa.core.policies.policy import Policy
from rasa.core.constants import DEFAULT_POLICY_PRIORITY, DIALOGUE
from rasa.core.trackers import DialogueStateTracker
from rasa.utils import train_utils
from rasa.utils.tensorflow import layers
from rasa.utils.tensorflow.transformer import TransformerEncoder
from rasa.utils.tensorflow.models import RasaModel
from rasa.utils.tensorflow.model_data import RasaModelData, FeatureSignature
from rasa.utils.tensorflow.constants import (
LABEL,
HIDDEN_LAYERS_SIZES,
TRANSFORMER_SIZE,
NUM_TRANSFORMER_LAYERS,
NUM_HEADS,
BATCH_SIZES,
BATCH_STRATEGY,
EPOCHS,
RANDOM_SEED,
RANKING_LENGTH,
LOSS_TYPE,
SIMILARITY_TYPE,
NUM_NEG,
EVAL_NUM_EXAMPLES,
EVAL_NUM_EPOCHS,
NEGATIVE_MARGIN_SCALE,
REGULARIZATION_CONSTANT,
SCALE_LOSS,
USE_MAX_NEG_SIM,
MAX_NEG_SIM,
MAX_POS_SIM,
EMBEDDING_DIMENSION,
DROP_RATE_DIALOGUE,
DROP_RATE_LABEL,
DROP_RATE_ATTENTION,
WEIGHT_SPARSITY,
KEY_RELATIVE_ATTENTION,
VALUE_RELATIVE_ATTENTION,
MAX_RELATIVE_POSITION,
SOFTMAX,
AUTO,
BALANCED,
TENSORBOARD_LOG_DIR,
TENSORBOARD_LOG_LEVEL,
)
logger = logging.getLogger(__name__)
DIALOGUE_FEATURES = f"{DIALOGUE}_features"
LABEL_FEATURES = f"{LABEL}_features"
LABEL_IDS = f"{LABEL}_ids"
SAVE_MODEL_FILE_NAME = "ted_policy"
class TEDPolicy(Policy):
"""Transformer Embedding Dialogue (TED) Policy is described in
https://arxiv.org/abs/1910.00486.
This policy has a pre-defined architecture, which comprises the
following steps:
- concatenate user input (user intent and entities), previous system actions,
slots and active forms for each time step into an input vector to
pre-transformer embedding layer;
- feed it to transformer;
- apply a dense layer to the output of the transformer to get embeddings of a
dialogue for each time step;
- apply a dense layer to create embeddings for system actions for each time
step;
- calculate the similarity between the dialogue embedding and embedded system
actions. This step is based on the StarSpace
(https://arxiv.org/abs/1709.03856) idea.
"""
SUPPORTS_ONLINE_TRAINING = True
# please make sure to update the docs when changing a default parameter
defaults = {
# ## Architecture of the used neural network
# Hidden layer sizes for layers before the dialogue and label embedding layers.
# The number of hidden layers is equal to the length of the corresponding
# list.
HIDDEN_LAYERS_SIZES: {DIALOGUE: [], LABEL: []},
# Number of units in transformer
TRANSFORMER_SIZE: 128,
# Number of transformer layers
NUM_TRANSFORMER_LAYERS: 1,
# Number of attention heads in transformer
NUM_HEADS: 4,
# If 'True' use key relative embeddings in attention
KEY_RELATIVE_ATTENTION: False,
# If 'True' use value relative embeddings in attention
VALUE_RELATIVE_ATTENTION: False,
# Max position for relative embeddings
MAX_RELATIVE_POSITION: None,
# ## Training parameters
# Initial and final batch sizes:
# Batch size will be linearly increased for each epoch.
BATCH_SIZES: [8, 32],
# Strategy used whenc creating batches.
# Can be either 'sequence' or 'balanced'.
BATCH_STRATEGY: BALANCED,
# Number of epochs to train
EPOCHS: 1,
# Set random seed to any 'int' to get reproducible results
RANDOM_SEED: None,
# ## Parameters for embeddings
# Dimension size of embedding vectors
EMBEDDING_DIMENSION: 20,
# The number of incorrect labels. The algorithm will minimize
# their similarity to the user input during training.
NUM_NEG: 20,
# Type of similarity measure to use, either 'auto' or 'cosine' or 'inner'.
SIMILARITY_TYPE: AUTO,
# The type of the loss function, either 'softmax' or 'margin'.
LOSS_TYPE: SOFTMAX,
# Number of top actions to normalize scores for loss type 'softmax'.
# Set to 0 to turn off normalization.
RANKING_LENGTH: 10,
# Indicates how similar the algorithm should try to make embedding vectors
# for correct labels.
# Should be 0.0 < ... < 1.0 for 'cosine' similarity type.
MAX_POS_SIM: 0.8,
# Maximum negative similarity for incorrect labels.
# Should be -1.0 < ... < 1.0 for 'cosine' similarity type.
MAX_NEG_SIM: -0.2,
# If 'True' the algorithm only minimizes maximum similarity over
# incorrect intent labels, used only if 'loss_type' is set to 'margin'.
USE_MAX_NEG_SIM: True,
# If 'True' scale loss inverse proportionally to the confidence
# of the correct prediction
SCALE_LOSS: True,
# ## Regularization parameters
# The scale of regularization
REGULARIZATION_CONSTANT: 0.001,
# The scale of how important is to minimize the maximum similarity
# between embeddings of different labels,
# used only if 'loss_type' is set to 'margin'.
NEGATIVE_MARGIN_SCALE: 0.8,
# Dropout rate for embedding layers of dialogue features.
DROP_RATE_DIALOGUE: 0.1,
# Dropout rate for embedding layers of label, e.g. action, features.
DROP_RATE_LABEL: 0.0,
# Dropout rate for attention.
DROP_RATE_ATTENTION: 0,
# Sparsity of the weights in dense layers
WEIGHT_SPARSITY: 0.8,
# ## Evaluation parameters
# How often calculate validation accuracy.
# Small values may hurt performance, e.g. model accuracy.
EVAL_NUM_EPOCHS: 20,
# How many examples to use for hold out validation set
# Large values may hurt performance, e.g. model accuracy.
EVAL_NUM_EXAMPLES: 0,
# If you want to use tensorboard to visualize training and validation metrics,
# set this option to a valid output directory.
TENSORBOARD_LOG_DIR: None,
# Define when training metrics for tensorboard should be logged.
# Either after every epoch or for every training step.
# Valid values: 'epoch' and 'minibatch'
TENSORBOARD_LOG_LEVEL: "epoch",
}
@staticmethod
def _standard_featurizer(max_history: Optional[int] = None) -> TrackerFeaturizer:
if max_history is None:
return FullDialogueTrackerFeaturizer(LabelTokenizerSingleStateFeaturizer())
else:
return MaxHistoryTrackerFeaturizer(
LabelTokenizerSingleStateFeaturizer(), max_history=max_history
)
def __init__(
self,
featurizer: Optional[TrackerFeaturizer] = None,
priority: int = DEFAULT_POLICY_PRIORITY,
max_history: Optional[int] = None,
model: Optional[RasaModel] = None,
**kwargs: Any,
) -> None:
"""Declare instance variables with default values."""
if not featurizer:
featurizer = self._standard_featurizer(max_history)
super().__init__(featurizer, priority)
self._load_params(**kwargs)
self.model = model
self._label_data: Optional[RasaModelData] = None
self.data_example: Optional[Dict[Text, List[np.ndarray]]] = None
def _load_params(self, **kwargs: Dict[Text, Any]) -> None:
self.config = copy.deepcopy(self.defaults)
self.config.update(kwargs)
self.config = train_utils.check_deprecated_options(self.config)
self.config = train_utils.update_similarity_type(self.config)
self.config = train_utils.update_evaluation_parameters(self.config)
# data helpers
# noinspection PyPep8Naming
@staticmethod
def _label_ids_for_Y(data_Y: np.ndarray) -> np.ndarray:
"""Prepare Y data for training: extract label_ids.
label_ids are indices of labels, while `data_Y` contains one-hot encodings.
"""
return data_Y.argmax(axis=-1)
# noinspection PyPep8Naming
def _label_features_for_Y(self, label_ids: np.ndarray) -> np.ndarray:
"""Prepare Y data for training: features for label_ids."""
all_label_features = self._label_data.get(LABEL_FEATURES)[0]
is_full_dialogue_featurizer_used = len(label_ids.shape) == 2
if is_full_dialogue_featurizer_used:
return np.stack(
[
np.stack(
[all_label_features[label_idx] for label_idx in seq_label_ids]
)
for seq_label_ids in label_ids
]
)
# max history featurizer is used
return np.stack([all_label_features[label_idx] for label_idx in label_ids])
# noinspection PyPep8Naming
def _create_model_data(
self, data_X: np.ndarray, data_Y: Optional[np.ndarray] = None
) -> RasaModelData:
"""Combine all model related data into RasaModelData."""
label_ids = np.array([])
Y = np.array([])
if data_Y is not None:
label_ids = self._label_ids_for_Y(data_Y)
Y = self._label_features_for_Y(label_ids)
# explicitly add last dimension to label_ids
# to track correctly dynamic sequences
label_ids = np.expand_dims(label_ids, -1)
model_data = RasaModelData(label_key=LABEL_IDS)
model_data.add_features(DIALOGUE_FEATURES, [data_X])
model_data.add_features(LABEL_FEATURES, [Y])
model_data.add_features(LABEL_IDS, [label_ids])
return model_data
def _create_label_data(self, domain: Domain) -> RasaModelData:
# encode all label_ids with policies' featurizer
state_featurizer = self.featurizer.state_featurizer
all_labels = state_featurizer.create_encoded_all_actions(domain)
all_labels = all_labels.astype(np.float32)
label_data = RasaModelData()
label_data.add_features(LABEL_FEATURES, [all_labels])
return label_data
def train(
self,
training_trackers: List[DialogueStateTracker],
domain: Domain,
**kwargs: Any,
) -> None:
"""Train the policy on given training trackers."""
# dealing with training data
training_data = self.featurize_for_training(training_trackers, domain, **kwargs)
self._label_data = self._create_label_data(domain)
# extract actual training data to feed to model
model_data = self._create_model_data(training_data.X, training_data.y)
if model_data.is_empty():
logger.error(
f"Can not train '{self.__class__.__name__}'. No data was provided. "
f"Skipping training of the policy."
)
return
# keep one example for persisting and loading
self.data_example = model_data.first_data_example()
self.model = TED(
model_data.get_signature(),
self.config,
isinstance(self.featurizer, MaxHistoryTrackerFeaturizer),
self._label_data,
)
self.model.fit(
model_data,
self.config[EPOCHS],
self.config[BATCH_SIZES],
self.config[EVAL_NUM_EXAMPLES],
self.config[EVAL_NUM_EPOCHS],
batch_strategy=self.config[BATCH_STRATEGY],
)
def predict_action_probabilities(
self, tracker: DialogueStateTracker, domain: Domain
) -> List[float]:
"""Predict the next action the bot should take.
Return the list of probabilities for the next actions.
"""
if self.model is None:
return self._default_predictions(domain)
# create model data from tracker
data_X = self.featurizer.create_X([tracker], domain)
model_data = self._create_model_data(data_X)
output = self.model.predict(model_data)
confidence = output["action_scores"].numpy()
# remove batch dimension and take the last prediction in the sequence
confidence = confidence[0, -1, :]
if self.config[LOSS_TYPE] == SOFTMAX and self.config[RANKING_LENGTH] > 0:
confidence = train_utils.normalize(confidence, self.config[RANKING_LENGTH])
return confidence.tolist()
def persist(self, path: Text) -> None:
"""Persists the policy to a storage."""
if self.model is None:
logger.debug(
"Method `persist(...)` was called "
"without a trained model present. "
"Nothing to persist then!"
)
return
model_path = Path(path)
tf_model_file = model_path / f"{SAVE_MODEL_FILE_NAME}.tf_model"
io_utils.create_directory_for_file(tf_model_file)
self.featurizer.persist(path)
self.model.save(str(tf_model_file))
io_utils.json_pickle(
model_path / f"{SAVE_MODEL_FILE_NAME}.priority.pkl", self.priority
)
io_utils.pickle_dump(
model_path / f"{SAVE_MODEL_FILE_NAME}.meta.pkl", self.config
)
io_utils.json_pickle(
model_path / f"{SAVE_MODEL_FILE_NAME}.data_example.pkl", self.data_example
)
io_utils.json_pickle(
model_path / f"{SAVE_MODEL_FILE_NAME}.label_data.pkl", self._label_data
)
@classmethod
def load(cls, path: Text) -> "TEDPolicy":
"""Loads a policy from the | |
1, 0, 1, 1)
self.comboBox_23 = QComboBox(self.frame_24)
self.comboBox_23.addItem("")
self.comboBox_23.addItem("")
self.comboBox_23.addItem("")
self.comboBox_23.addItem("")
self.comboBox_23.addItem("")
self.comboBox_23.addItem("")
self.comboBox_23.addItem("")
self.comboBox_23.addItem("")
self.comboBox_23.addItem("")
self.comboBox_23.addItem("")
self.comboBox_23.addItem("")
self.comboBox_23.addItem("")
self.comboBox_23.addItem("")
self.comboBox_23.addItem("")
self.comboBox_23.addItem("")
self.comboBox_23.addItem("")
self.comboBox_23.addItem("")
self.comboBox_23.addItem("")
self.comboBox_23.addItem("")
self.comboBox_23.addItem("")
self.comboBox_23.setObjectName(u"comboBox_23")
self.comboBox_23.setStyleSheet(u"color: rgb(255, 255, 255);\n"
"font: 14pt \"3ds\";")
self.gridLayout_33.addWidget(self.comboBox_23, 1, 1, 1, 1)
self.verticalLayout_16.addWidget(self.frame_24)
self.scrollArea_7.setWidget(self.scrollAreaWidgetContents_7)
self.verticalLayout_17.addWidget(self.scrollArea_7)
self.gridLayout_10.addWidget(self.frame_21, 6, 1, 1, 1)
self.lineEdit_31 = QLineEdit(self.scrollAreaWidgetContents_3)
self.lineEdit_31.setObjectName(u"lineEdit_31")
self.lineEdit_31.setMinimumSize(QSize(500, 50))
self.lineEdit_31.setFont(font13)
self.lineEdit_31.setAlignment(Qt.AlignCenter)
self.gridLayout_10.addWidget(self.lineEdit_31, 5, 1, 1, 1)
self.lineEdit_30 = QLineEdit(self.scrollAreaWidgetContents_3)
self.lineEdit_30.setObjectName(u"lineEdit_30")
self.lineEdit_30.setMinimumSize(QSize(500, 50))
self.lineEdit_30.setFont(font13)
self.lineEdit_30.setAlignment(Qt.AlignCenter)
self.gridLayout_10.addWidget(self.lineEdit_30, 9, 0, 1, 2)
self.lineEdit_24 = QLineEdit(self.scrollAreaWidgetContents_3)
self.lineEdit_24.setObjectName(u"lineEdit_24")
self.lineEdit_24.setMinimumSize(QSize(500, 50))
self.lineEdit_24.setFont(font13)
self.lineEdit_24.setAlignment(Qt.AlignCenter)
self.gridLayout_10.addWidget(self.lineEdit_24, 1, 1, 1, 1)
self.lineEdit_21 = QTextEdit(self.scrollAreaWidgetContents_3)
self.lineEdit_21.setObjectName(u"lineEdit_21")
self.lineEdit_21.setMinimumSize(QSize(0, 300))
self.lineEdit_21.setFont(font13)
self.lineEdit_21.setStyleSheet(u"QTextEdit {\n"
" background-color: transparent;\n"
" border: 2px solid rgb(30, 30, 50);\n"
" padding: 10px;\n"
"}\n"
"QTextEdit:hover {\n"
" border: 2px solid rgb(64, 71, 88);\n"
"}\n"
"QTextEdit:focus {\n"
" border: 2px solid rgb(91, 101, 124);\n"
"}")
self.lineEdit_21.setSizeAdjustPolicy(QAbstractScrollArea.AdjustIgnored)
self.gridLayout_10.addWidget(self.lineEdit_21, 11, 0, 1, 2)
self.lineEdit_28 = QLineEdit(self.scrollAreaWidgetContents_3)
self.lineEdit_28.setObjectName(u"lineEdit_28")
self.lineEdit_28.setMinimumSize(QSize(500, 50))
self.lineEdit_28.setFont(font13)
self.lineEdit_28.setAlignment(Qt.AlignCenter)
self.gridLayout_10.addWidget(self.lineEdit_28, 2, 0, 1, 2)
self.frame_4 = QFrame(self.scrollAreaWidgetContents_3)
self.frame_4.setObjectName(u"frame_4")
self.frame_4.setMinimumSize(QSize(0, 100))
self.frame_4.setFrameShape(QFrame.StyledPanel)
self.frame_4.setFrameShadow(QFrame.Raised)
self.gridLayout_12 = QGridLayout(self.frame_4)
self.gridLayout_12.setObjectName(u"gridLayout_12")
self.btn_add = QPushButton(self.frame_4)
self.btn_add.setObjectName(u"btn_add")
self.btn_add.setEnabled(True)
self.btn_add.setMinimumSize(QSize(0, 50))
font18 = QFont()
font18.setFamily(u"Segoe UI")
font18.setPointSize(12)
font18.setBold(False)
font18.setItalic(False)
font18.setWeight(50)
self.btn_add.setFont(font18)
self.btn_add.setCursor(QCursor(Qt.ArrowCursor))
self.btn_add.setMouseTracking(False)
self.btn_add.setLayoutDirection(Qt.LeftToRight)
self.btn_add.setAutoFillBackground(False)
self.btn_add.setStyleSheet(u"QPushButton {\n"
" background-color: rgb(53, 50, 67);\n"
" color: rgb(208, 208, 208);\n"
" font: 12pt \"Segoe UI\";\n"
"\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" \n"
" \n"
" background-color: rgb(55, 52, 70);\n"
"\n"
"}")
self.btn_add.setCheckable(False)
self.btn_add.setAutoRepeat(False)
self.btn_add.setAutoExclusive(False)
self.btn_add.setAutoDefault(True)
self.btn_add.setFlat(False)
self.gridLayout_12.addWidget(self.btn_add, 0, 0, 1, 1)
self.gridLayout_10.addWidget(self.frame_4, 12, 0, 1, 2)
self.frame_16 = QFrame(self.scrollAreaWidgetContents_3)
self.frame_16.setObjectName(u"frame_16")
self.frame_16.setMinimumSize(QSize(0, 600))
self.frame_16.setFrameShape(QFrame.StyledPanel)
self.frame_16.setFrameShadow(QFrame.Raised)
self.verticalLayout_18 = QVBoxLayout(self.frame_16)
self.verticalLayout_18.setSpacing(9)
self.verticalLayout_18.setObjectName(u"verticalLayout_18")
self.scrollArea_6 = QScrollArea(self.frame_16)
self.scrollArea_6.setObjectName(u"scrollArea_6")
self.scrollArea_6.setWidgetResizable(True)
self.scrollAreaWidgetContents_6 = QWidget()
self.scrollAreaWidgetContents_6.setObjectName(u"scrollAreaWidgetContents_6")
self.scrollAreaWidgetContents_6.setGeometry(QRect(0, -540, 791, 1203))
self.gridLayout_22 = QGridLayout(self.scrollAreaWidgetContents_6)
self.gridLayout_22.setObjectName(u"gridLayout_22")
self.gridLayout_22.setVerticalSpacing(100)
self.frame_17 = QFrame(self.scrollAreaWidgetContents_6)
self.frame_17.setObjectName(u"frame_17")
self.frame_17.setFrameShape(QFrame.StyledPanel)
self.frame_17.setFrameShadow(QFrame.Raised)
self.gridLayout_23 = QGridLayout(self.frame_17)
self.gridLayout_23.setObjectName(u"gridLayout_23")
self.label_17 = QLabel(self.frame_17)
self.label_17.setObjectName(u"label_17")
self.label_17.setFont(font13)
self.gridLayout_23.addWidget(self.label_17, 0, 0, 1, 1)
self.label_19 = QLabel(self.frame_17)
self.label_19.setObjectName(u"label_19")
font19 = QFont()
font19.setFamily(u"3ds")
font19.setPointSize(12)
self.label_19.setFont(font19)
self.label_19.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)
self.gridLayout_23.addWidget(self.label_19, 0, 1, 1, 1)
self.comboBox_3 = QComboBox(self.frame_17)
self.comboBox_3.setObjectName(u"comboBox_3")
self.comboBox_3.setMinimumSize(QSize(180, 0))
self.comboBox_3.setStyleSheet(u"color: rgb(255, 255, 255);\n"
"font: 14pt \"3ds\";")
self.gridLayout_23.addWidget(self.comboBox_3, 0, 2, 1, 1)
self.label_20 = QLabel(self.frame_17)
self.label_20.setObjectName(u"label_20")
self.label_20.setMaximumSize(QSize(16777215, 50))
self.label_20.setFont(font13)
self.label_20.setAlignment(Qt.AlignCenter)
self.gridLayout_23.addWidget(self.label_20, 1, 0, 1, 1)
self.textEdit_3 = QTextEdit(self.frame_17)
self.textEdit_3.setObjectName(u"textEdit_3")
self.textEdit_3.setFont(font17)
self.textEdit_3.setStyleSheet(u"QTextEdit {\n"
" background-color: transparent;\n"
" border: 2px solid rgb(30, 30, 50);\n"
" padding: 10px;\n"
"}\n"
"QTextEdit:hover {\n"
" border: 2px solid rgb(64, 71, 88);\n"
"}\n"
"QTextEdit:focus {\n"
" border: 2px solid rgb(91, 101, 124);\n"
"}")
self.gridLayout_23.addWidget(self.textEdit_3, 1, 1, 1, 2)
self.gridLayout_22.addWidget(self.frame_17, 0, 0, 1, 1)
self.frame_26 = QFrame(self.scrollAreaWidgetContents_6)
self.frame_26.setObjectName(u"frame_26")
self.frame_26.setFrameShape(QFrame.StyledPanel)
self.frame_26.setFrameShadow(QFrame.Raised)
self.gridLayout_24 = QGridLayout(self.frame_26)
self.gridLayout_24.setObjectName(u"gridLayout_24")
self.label_21 = QLabel(self.frame_26)
self.label_21.setObjectName(u"label_21")
self.label_21.setFont(font13)
self.gridLayout_24.addWidget(self.label_21, 0, 0, 1, 1)
self.label_28 = QLabel(self.frame_26)
self.label_28.setObjectName(u"label_28")
self.label_28.setFont(font19)
self.label_28.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)
self.gridLayout_24.addWidget(self.label_28, 0, 1, 1, 1)
self.comboBox_5 = QComboBox(self.frame_26)
self.comboBox_5.setObjectName(u"comboBox_5")
self.comboBox_5.setMinimumSize(QSize(180, 0))
self.comboBox_5.setStyleSheet(u"color: rgb(255, 255, 255);\n"
"font: 14pt \"3ds\";")
self.gridLayout_24.addWidget(self.comboBox_5, 0, 2, 1, 1)
self.label_31 = QLabel(self.frame_26)
self.label_31.setObjectName(u"label_31")
self.label_31.setMaximumSize(QSize(16777215, 50))
self.label_31.setFont(font13)
self.label_31.setAlignment(Qt.AlignCenter)
self.gridLayout_24.addWidget(self.label_31, 1, 0, 1, 1)
self.textEdit_2 = QTextEdit(self.frame_26)
self.textEdit_2.setObjectName(u"textEdit_2")
self.textEdit_2.setFont(font17)
self.textEdit_2.setStyleSheet(u"QTextEdit {\n"
" background-color: transparent;\n"
" border: 2px solid rgb(30, 30, 50);\n"
" padding: 10px;\n"
"}\n"
"QTextEdit:hover {\n"
" border: 2px solid rgb(64, 71, 88);\n"
"}\n"
"QTextEdit:focus {\n"
" border: 2px solid rgb(91, 101, 124);\n"
"}")
self.gridLayout_24.addWidget(self.textEdit_2, 1, 1, 1, 2)
self.gridLayout_22.addWidget(self.frame_26, 1, 0, 1, 1)
self.frame_29 = QFrame(self.scrollAreaWidgetContents_6)
self.frame_29.setObjectName(u"frame_29")
self.frame_29.setFrameShape(QFrame.StyledPanel)
self.frame_29.setFrameShadow(QFrame.Raised)
self.gridLayout_31 = QGridLayout(self.frame_29)
self.gridLayout_31.setObjectName(u"gridLayout_31")
self.label_49 = QLabel(self.frame_29)
self.label_49.setObjectName(u"label_49")
self.label_49.setFont(font13)
self.gridLayout_31.addWidget(self.label_49, 0, 0, 1, 1)
self.label_50 = QLabel(self.frame_29)
self.label_50.setObjectName(u"label_50")
self.label_50.setFont(font19)
self.label_50.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)
self.gridLayout_31.addWidget(self.label_50, 0, 1, 1, 1)
self.comboBox_8 = QComboBox(self.frame_29)
self.comboBox_8.setObjectName(u"comboBox_8")
self.comboBox_8.setMinimumSize(QSize(180, 0))
self.comboBox_8.setStyleSheet(u"color: rgb(255, 255, 255);\n"
"font: 14pt \"3ds\";")
self.gridLayout_31.addWidget(self.comboBox_8, 0, 2, 1, 1)
self.label_51 = QLabel(self.frame_29)
self.label_51.setObjectName(u"label_51")
self.label_51.setMaximumSize(QSize(16777215, 50))
self.label_51.setFont(font13)
self.label_51.setAlignment(Qt.AlignCenter)
self.gridLayout_31.addWidget(self.label_51, 1, 0, 1, 1)
self.textEdit_5 = QTextEdit(self.frame_29)
self.textEdit_5.setObjectName(u"textEdit_5")
self.textEdit_5.setFont(font17)
self.textEdit_5.setStyleSheet(u"QTextEdit {\n"
" background-color: transparent;\n"
" border: 2px solid rgb(30, 30, 50);\n"
" padding: 10px;\n"
"}\n"
"QTextEdit:hover {\n"
" border: 2px solid rgb(64, 71, 88);\n"
"}\n"
"QTextEdit:focus {\n"
" border: 2px solid rgb(91, 101, 124);\n"
"}")
self.gridLayout_31.addWidget(self.textEdit_5, 1, 1, 1, 2)
self.gridLayout_22.addWidget(self.frame_29, 4, 0, 1, 1)
self.frame_28 = QFrame(self.scrollAreaWidgetContents_6)
self.frame_28.setObjectName(u"frame_28")
self.frame_28.setFrameShape(QFrame.StyledPanel)
self.frame_28.setFrameShadow(QFrame.Raised)
self.gridLayout_30 = QGridLayout(self.frame_28)
self.gridLayout_30.setObjectName(u"gridLayout_30")
self.label_35 = QLabel(self.frame_28)
self.label_35.setObjectName(u"label_35")
self.label_35.setFont(font13)
self.gridLayout_30.addWidget(self.label_35, 0, 0, 1, 1)
self.label_36 = QLabel(self.frame_28)
self.label_36.setObjectName(u"label_36")
self.label_36.setFont(font19)
self.label_36.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)
self.gridLayout_30.addWidget(self.label_36, 0, 1, 1, 1)
self.comboBox_7 = QComboBox(self.frame_28)
self.comboBox_7.setObjectName(u"comboBox_7")
self.comboBox_7.setMinimumSize(QSize(180, 0))
self.comboBox_7.setStyleSheet(u"color: rgb(255, 255, 255);\n"
"font: 14pt \"3ds\";")
self.gridLayout_30.addWidget(self.comboBox_7, 0, 2, 1, 1)
self.label_48 = QLabel(self.frame_28)
self.label_48.setObjectName(u"label_48")
self.label_48.setMaximumSize(QSize(16777215, 50))
self.label_48.setFont(font13)
self.label_48.setAlignment(Qt.AlignCenter)
self.gridLayout_30.addWidget(self.label_48, 1, 0, 1, 1)
self.textEdit_4 = QTextEdit(self.frame_28)
self.textEdit_4.setObjectName(u"textEdit_4")
self.textEdit_4.setFont(font17)
self.textEdit_4.setStyleSheet(u"QTextEdit {\n"
" background-color: transparent;\n"
" border: 2px solid rgb(30, 30, 50);\n"
" padding: 10px;\n"
"}\n"
"QTextEdit:hover {\n"
" border: 2px solid rgb(64, 71, 88);\n"
"}\n"
"QTextEdit:focus {\n"
" border: 2px solid rgb(91, 101, 124);\n"
"}")
self.gridLayout_30.addWidget(self.textEdit_4, 1, 1, 1, 2)
self.gridLayout_22.addWidget(self.frame_28, 3, 0, 1, 1)
self.frame_27 = QFrame(self.scrollAreaWidgetContents_6)
self.frame_27.setObjectName(u"frame_27")
self.frame_27.setFrameShape(QFrame.StyledPanel)
self.frame_27.setFrameShadow(QFrame.Raised)
self.gridLayout_25 = QGridLayout(self.frame_27)
self.gridLayout_25.setObjectName(u"gridLayout_25")
self.label_32 = QLabel(self.frame_27)
self.label_32.setObjectName(u"label_32")
self.label_32.setFont(font13)
self.gridLayout_25.addWidget(self.label_32, 0, 0, 1, 1)
self.label_33 = QLabel(self.frame_27)
self.label_33.setObjectName(u"label_33")
self.label_33.setFont(font19)
self.label_33.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)
self.gridLayout_25.addWidget(self.label_33, 0, 1, 1, 1)
self.comboBox_6 = QComboBox(self.frame_27)
self.comboBox_6.setObjectName(u"comboBox_6")
self.comboBox_6.setMinimumSize(QSize(180, 0))
self.comboBox_6.setStyleSheet(u"color: rgb(255, 255, 255);\n"
"font: 14pt \"3ds\";")
self.gridLayout_25.addWidget(self.comboBox_6, 0, 2, 1, 1)
self.label_34 = QLabel(self.frame_27)
self.label_34.setObjectName(u"label_34")
self.label_34.setMaximumSize(QSize(16777215, 50))
self.label_34.setFont(font13)
self.label_34.setAlignment(Qt.AlignCenter)
self.gridLayout_25.addWidget(self.label_34, 1, 0, 1, 1)
self.textEdit = QTextEdit(self.frame_27)
self.textEdit.setObjectName(u"textEdit")
self.textEdit.setFont(font17)
self.textEdit.setStyleSheet(u"QTextEdit {\n"
" background-color: transparent;\n"
" border: 2px solid rgb(30, 30, 50);\n"
" padding: 10px;\n"
"}\n"
"QTextEdit:hover {\n"
" border: 2px solid rgb(64, 71, 88);\n"
"}\n"
"QTextEdit:focus {\n"
" border: 2px solid rgb(91, 101, 124);\n"
"}")
self.gridLayout_25.addWidget(self.textEdit, 1, 1, 1, 2)
self.gridLayout_22.addWidget(self.frame_27, 2, 0, 1, 1)
self.scrollArea_6.setWidget(self.scrollAreaWidgetContents_6)
self.verticalLayout_18.addWidget(self.scrollArea_6)
self.gridLayout_10.addWidget(self.frame_16, 10, 0, 1, 2)
self.scrollArea_4.setWidget(self.scrollAreaWidgetContents_3)
self.verticalLayout_9.addWidget(self.scrollArea_4)
self.stackedWidget.addWidget(self.page_add_client)
self.page_suppr_client = QWidget()
self.page_suppr_client.setObjectName(u"page_suppr_client")
self.gridLayout_8 = QGridLayout(self.page_suppr_client)
self.gridLayout_8.setObjectName(u"gridLayout_8")
self.frame_7 = QFrame(self.page_suppr_client)
self.frame_7.setObjectName(u"frame_7")
self.frame_7.setFrameShape(QFrame.StyledPanel)
self.frame_7.setFrameShadow(QFrame.Raised)
self.gridLayout_16 = QGridLayout(self.frame_7)
self.gridLayout_16.setObjectName(u"gridLayout_16")
self.label_26 = QLabel(self.frame_7)
self.label_26.setObjectName(u"label_26")
font20 = QFont()
font20.setFamily(u"Meloriac")
font20.setPointSize(40)
self.label_26.setFont(font20)
self.label_26.setLayoutDirection(Qt.LeftToRight)
self.label_26.setStyleSheet(u"")
self.label_26.setAlignment(Qt.AlignCenter)
self.gridLayout_16.addWidget(self.label_26, 0, 0, 1, 1)
self.gridLayout_8.addWidget(self.frame_7, 0, 0, 1, 1)
self.frame_8 = QFrame(self.page_suppr_client)
self.frame_8.setObjectName(u"frame_8")
self.frame_8.setFrameShape(QFrame.StyledPanel)
self.frame_8.setFrameShadow(QFrame.Raised)
self.gridLayout_17 = QGridLayout(self.frame_8)
self.gridLayout_17.setObjectName(u"gridLayout_17")
self.label_27 = QLabel(self.frame_8)
self.label_27.setObjectName(u"label_27")
self.label_27.setEnabled(False)
self.label_27.setFont(font15)
self.label_27.setLayoutDirection(Qt.LeftToRight)
self.label_27.setStyleSheet(u"")
self.label_27.setAlignment(Qt.AlignCenter)
self.gridLayout_17.addWidget(self.label_27, 0, 0, 1, 1)
self.gridLayout_8.addWidget(self.frame_8, 2, 0, 1, 1)
self.frame_10 = QFrame(self.page_suppr_client)
self.frame_10.setObjectName(u"frame_10")
self.frame_10.setFrameShape(QFrame.StyledPanel)
self.frame_10.setFrameShadow(QFrame.Raised)
self.gridLayout_18 = QGridLayout(self.frame_10)
self.gridLayout_18.setObjectName(u"gridLayout_18")
self.gridLayout_18.setContentsMargins(300, -1, 300, -1)
self.lineEdit_suppr_client = QLineEdit(self.frame_10)
self.lineEdit_suppr_client.setObjectName(u"lineEdit_suppr_client")
self.lineEdit_suppr_client.setMinimumSize(QSize(0, 50))
self.lineEdit_suppr_client.setStyleSheet(u"border-color: rgb(255, 255, 255);")
self.lineEdit_suppr_client.setAlignment(Qt.AlignCenter)
self.gridLayout_18.addWidget(self.lineEdit_suppr_client, 0, 0, 1, 1)
self.gridLayout_8.addWidget(self.frame_10, 1, 0, 1, 1)
self.stackedWidget.addWidget(self.page_suppr_client)
self.page_modif_client = QWidget()
self.page_modif_client.setObjectName(u"page_modif_client")
self.verticalLayout_13 = QVBoxLayout(self.page_modif_client)
self.verticalLayout_13.setObjectName(u"verticalLayout_13")
self.label_11 = QLabel(self.page_modif_client)
self.label_11.setObjectName(u"label_11")
self.label_11.setFont(font16)
self.label_11.setLayoutDirection(Qt.LeftToRight)
self.label_11.setStyleSheet(u"")
self.label_11.setScaledContents(False)
self.label_11.setAlignment(Qt.AlignCenter)
self.verticalLayout_13.addWidget(self.label_11)
self.frame_12 = QFrame(self.page_modif_client)
self.frame_12.setObjectName(u"frame_12")
self.frame_12.setMinimumSize(QSize(0, 50))
self.frame_12.setStyleSheet(u"")
self.frame_12.setFrameShape(QFrame.StyledPanel)
self.frame_12.setFrameShadow(QFrame.Raised)
self.gridLayout_19 = QGridLayout(self.frame_12)
self.gridLayout_19.setObjectName(u"gridLayout_19")
self.gridLayout_19.setContentsMargins(300, 50, 300, -1)
self.lineEdit_modify_client = QLineEdit(self.frame_12)
self.lineEdit_modify_client.setObjectName(u"lineEdit_modify_client")
self.lineEdit_modify_client.setMinimumSize(QSize(0, 50))
self.lineEdit_modify_client.setStyleSheet(u"border-color: rgb(255, 255, 255);")
self.lineEdit_modify_client.setAlignment(Qt.AlignCenter)
self.gridLayout_19.addWidget(self.lineEdit_modify_client, 0, 0, 1, 1)
self.verticalLayout_13.addWidget(self.frame_12)
self.scrollArea_5 = QScrollArea(self.page_modif_client)
self.scrollArea_5.setObjectName(u"scrollArea_5")
self.scrollArea_5.setWidgetResizable(True)
self.scrollAreaWidgetContents_5 = QWidget()
self.scrollAreaWidgetContents_5.setObjectName(u"scrollAreaWidgetContents_5")
self.scrollAreaWidgetContents_5.setGeometry(QRect(0, -2160, 1031, 3450))
self.gridLayout_7 = QGridLayout(self.scrollAreaWidgetContents_5)
self.gridLayout_7.setObjectName(u"gridLayout_7")
self.gridLayout_7.setVerticalSpacing(100)
self.gridLayout_7.setContentsMargins(50, 100, 50, 100)
self.lineEdit_34 = QTextEdit(self.scrollAreaWidgetContents_5)
self.lineEdit_34.setObjectName(u"lineEdit_34")
self.lineEdit_34.setMinimumSize(QSize(0, 300))
self.lineEdit_34.setFont(font13)
self.lineEdit_34.setStyleSheet(u"QTextEdit {\n"
" background-color: transparent;\n"
" border: 2px solid rgb(30, 30, 50);\n"
" padding: 10px;\n"
"}\n"
"QTextEdit:hover {\n"
" border: 2px solid rgb(64, 71, 88);\n"
"}\n"
"QTextEdit:focus {\n"
" border: 2px solid rgb(91, 101, 124);\n"
"}")
self.gridLayout_7.addWidget(self.lineEdit_34, 11, 0, 1, 1)
self.lineEdit_42 = QLineEdit(self.scrollAreaWidgetContents_5)
self.lineEdit_42.setObjectName(u"lineEdit_42")
self.lineEdit_42.setMinimumSize(QSize(500, 50))
self.lineEdit_42.setFont(font13)
self.lineEdit_42.setAlignment(Qt.AlignCenter)
self.gridLayout_7.addWidget(self.lineEdit_42, 2, 0, 1, 1)
self.lineEdit_38 = QLineEdit(self.scrollAreaWidgetContents_5)
self.lineEdit_38.setObjectName(u"lineEdit_38")
self.lineEdit_38.setMinimumSize(QSize(500, 50))
self.lineEdit_38.setFont(font13)
self.lineEdit_38.setAlignment(Qt.AlignCenter)
self.gridLayout_7.addWidget(self.lineEdit_38, 8, 0, 1, 1)
self.lineEdit_37 = QLineEdit(self.scrollAreaWidgetContents_5)
self.lineEdit_37.setObjectName(u"lineEdit_37")
self.lineEdit_37.setMinimumSize(QSize(500, 50))
self.lineEdit_37.setFont(font13)
self.lineEdit_37.setAlignment(Qt.AlignCenter)
self.gridLayout_7.addWidget(self.lineEdit_37, 9, 0, 1, 1)
self.lineEdit_32 = QLineEdit(self.scrollAreaWidgetContents_5)
self.lineEdit_32.setObjectName(u"lineEdit_32")
self.lineEdit_32.setMinimumSize(QSize(500, 50))
self.lineEdit_32.setFont(font13)
self.lineEdit_32.setAlignment(Qt.AlignCenter)
self.gridLayout_7.addWidget(self.lineEdit_32, 3, 0, 1, 1)
self.lineEdit_35 = QLineEdit(self.scrollAreaWidgetContents_5)
self.lineEdit_35.setObjectName(u"lineEdit_35")
self.lineEdit_35.setMinimumSize(QSize(500, 50))
self.lineEdit_35.setFont(font15)
self.lineEdit_35.setAlignment(Qt.AlignCenter)
self.gridLayout_7.addWidget(self.lineEdit_35, 0, 0, 1, 1)
self.lineEdit_36 = QLineEdit(self.scrollAreaWidgetContents_5)
self.lineEdit_36.setObjectName(u"lineEdit_36")
self.lineEdit_36.setMinimumSize(QSize(500, 50))
self.lineEdit_36.setFont(font13)
self.lineEdit_36.setAlignment(Qt.AlignCenter)
self.gridLayout_7.addWidget(self.lineEdit_36, 5, 0, 1, 1)
self.frame_5 = QFrame(self.scrollAreaWidgetContents_5)
self.frame_5.setObjectName(u"frame_5")
self.frame_5.setMinimumSize(QSize(0, 100))
self.frame_5.setFrameShape(QFrame.StyledPanel)
self.frame_5.setFrameShadow(QFrame.Raised)
self.gridLayout_13 = QGridLayout(self.frame_5)
self.gridLayout_13.setObjectName(u"gridLayout_13")
self.btn_modify = QPushButton(self.frame_5)
self.btn_modify.setObjectName(u"btn_modify")
self.btn_modify.setEnabled(True)
self.btn_modify.setMinimumSize(QSize(0, 50))
self.btn_modify.setFont(font18)
self.btn_modify.setCursor(QCursor(Qt.ArrowCursor))
self.btn_modify.setMouseTracking(False)
self.btn_modify.setLayoutDirection(Qt.LeftToRight)
self.btn_modify.setAutoFillBackground(False)
self.btn_modify.setStyleSheet(u"QPushButton {\n"
" background-color: rgb(53, 50, 67);\n"
" color: rgb(208, 208, 208);\n"
" font: 12pt \"Segoe UI\";\n"
"\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" \n"
" \n"
" background-color: rgb(55, 52, 70);\n"
"\n"
"}")
self.btn_modify.setCheckable(False)
self.btn_modify.setAutoRepeat(False)
self.btn_modify.setAutoExclusive(False)
self.btn_modify.setAutoDefault(True)
self.btn_modify.setFlat(False)
self.gridLayout_13.addWidget(self.btn_modify, 0, 0, 1, 1)
self.gridLayout_7.addWidget(self.frame_5, 12, 0, 1, 1)
self.lineEdit_33 = QLineEdit(self.scrollAreaWidgetContents_5)
self.lineEdit_33.setObjectName(u"lineEdit_33")
self.lineEdit_33.setMinimumSize(QSize(500, 50))
self.lineEdit_33.setFont(font13)
self.lineEdit_33.setAlignment(Qt.AlignCenter)
self.gridLayout_7.addWidget(self.lineEdit_33, 7, 0, 1, 1)
self.lineEdit_41 = QLineEdit(self.scrollAreaWidgetContents_5)
self.lineEdit_41.setObjectName(u"lineEdit_41")
self.lineEdit_41.setMinimumSize(QSize(500, 50))
self.lineEdit_41.setFont(font13)
self.lineEdit_41.setAlignment(Qt.AlignCenter)
self.gridLayout_7.addWidget(self.lineEdit_41, 4, 0, 1, 1)
self.lineEdit_39 = QLineEdit(self.scrollAreaWidgetContents_5)
self.lineEdit_39.setObjectName(u"lineEdit_39")
self.lineEdit_39.setMinimumSize(QSize(500, 50))
self.lineEdit_39.setFont(font13)
self.lineEdit_39.setAlignment(Qt.AlignCenter)
self.gridLayout_7.addWidget(self.lineEdit_39, 1, 0, 1, 1)
self.frame_15 = QFrame(self.scrollAreaWidgetContents_5)
self.frame_15.setObjectName(u"frame_15")
self.frame_15.setMinimumSize(QSize(0, 600))
self.frame_15.setFrameShape(QFrame.StyledPanel)
self.frame_15.setFrameShadow(QFrame.Raised)
self.verticalLayout_14 = QVBoxLayout(self.frame_15)
self.verticalLayout_14.setObjectName(u"verticalLayout_14")
self.frame_36 = QFrame(self.frame_15)
self.frame_36.setObjectName(u"frame_36")
self.frame_36.setFrameShape(QFrame.StyledPanel)
self.frame_36.setFrameShadow(QFrame.Raised)
self.gridLayout_43 = QGridLayout(self.frame_36)
self.gridLayout_43.setObjectName(u"gridLayout_43")
self.label_47 = QLabel(self.frame_36)
self.label_47.setObjectName(u"label_47")
self.label_47.setFont(font17)
self.gridLayout_43.addWidget(self.label_47, 0, 0, 1, 1)
self.comboBox_39 = QComboBox(self.frame_36)
self.comboBox_39.addItem("")
self.comboBox_39.addItem("")
self.comboBox_39.addItem("")
self.comboBox_39.addItem("")
self.comboBox_39.addItem("")
self.comboBox_39.addItem("")
self.comboBox_39.setObjectName(u"comboBox_39")
self.comboBox_39.setStyleSheet(u"color: rgb(255, 255, 255);\n"
"font: 14pt \"3ds\";")
self.gridLayout_43.addWidget(self.comboBox_39, 0, 1, 1, 1)
self.verticalLayout_14.addWidget(self.frame_36)
self.scrollArea_9 = QScrollArea(self.frame_15)
self.scrollArea_9.setObjectName(u"scrollArea_9")
self.scrollArea_9.setMinimumSize(QSize(0, 500))
self.scrollArea_9.setWidgetResizable(True)
self.scrollArea_9.setAlignment(Qt.AlignCenter)
self.scrollAreaWidgetContents_9 = QWidget()
self.scrollAreaWidgetContents_9.setObjectName(u"scrollAreaWidgetContents_9")
self.scrollAreaWidgetContents_9.setGeometry(QRect(0, 0, 891, 1109))
self.verticalLayout_20 = QVBoxLayout(self.scrollAreaWidgetContents_9)
self.verticalLayout_20.setSpacing(100)
self.verticalLayout_20.setObjectName(u"verticalLayout_20")
self.verticalLayout_20.setContentsMargins(-1, 50, -1, -1)
self.frame_25 = QFrame(self.scrollAreaWidgetContents_9)
self.frame_25.setObjectName(u"frame_25")
self.frame_25.setMinimumSize(QSize(0, 130))
self.frame_25.setFrameShape(QFrame.StyledPanel)
self.frame_25.setFrameShadow(QFrame.Raised)
self.gridLayout_38 = QGridLayout(self.frame_25)
self.gridLayout_38.setObjectName(u"gridLayout_38")
self.label_37 = QLabel(self.frame_25)
self.label_37.setObjectName(u"label_37")
self.label_37.setFont(font17)
self.gridLayout_38.addWidget(self.label_37, 0, 0, 1, 1)
self.comboBox_4 = QComboBox(self.frame_25)
self.comboBox_4.setObjectName(u"comboBox_4")
self.comboBox_4.setStyleSheet(u"color: rgb(255, 255, 255);\n"
"font: 14pt \"3ds\";")
self.gridLayout_38.addWidget(self.comboBox_4, 0, 1, 1, 1)
self.label_38 = QLabel(self.frame_25)
self.label_38.setObjectName(u"label_38")
self.label_38.setFont(font17)
self.gridLayout_38.addWidget(self.label_38, 1, 0, 1, 1)
self.comboBox_30 = QComboBox(self.frame_25)
self.comboBox_30.addItem("")
self.comboBox_30.addItem("")
self.comboBox_30.addItem("")
self.comboBox_30.addItem("")
self.comboBox_30.addItem("")
self.comboBox_30.addItem("")
self.comboBox_30.addItem("")
self.comboBox_30.addItem("")
self.comboBox_30.addItem("")
self.comboBox_30.addItem("")
self.comboBox_30.addItem("")
self.comboBox_30.addItem("")
self.comboBox_30.addItem("")
self.comboBox_30.addItem("")
self.comboBox_30.addItem("")
self.comboBox_30.addItem("")
self.comboBox_30.addItem("")
self.comboBox_30.addItem("")
self.comboBox_30.addItem("")
self.comboBox_30.addItem("")
self.comboBox_30.setObjectName(u"comboBox_30")
self.comboBox_30.setStyleSheet(u"color: rgb(255, 255, 255);\n"
"font: 14pt \"3ds\";")
self.gridLayout_38.addWidget(self.comboBox_30, 1, 1, 1, 1)
self.verticalLayout_20.addWidget(self.frame_25)
self.frame_32 | |
<gh_stars>0
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.utils.html import format_html
from .helpers import facebook_get_json, facebook_post_json, facebook_base_get_json
from .settings import SERVER_URL
class FacebookPage(models.Model):
facebook_id = models.BigIntegerField(primary_key=True)
name = models.CharField(max_length=256)
categories = models.ManyToManyField('OperaCategory')
class Meta:
verbose_name = "Facebook Page"
def __repr__(self):
return "FacebookPage({},{})".format(self.facebook_id, self.name)
def __str__(self):
return str(self.name)
@property
def facebook_url(self):
return "https://facebook.com/{}".format(self.facebook_id)
def facebook_link(self):
return format_html(
'<a href="{}">Facebook Page</a>',
self.facebook_url)
class Question(models.Model):
id = models.IntegerField(primary_key=True)
text = models.CharField(max_length=128)
next_question = models.OneToOneField('Question', null=True)
def __repr__(self):
return "Question({},{})".format(self.id, self.text)
def __str__(self):
return str(self.text)
class AnswerCategoryWeighting(models.Model):
"""Class which manages weighting between Answer and Category."""
answer = models.ForeignKey('Answer')
category = models.ForeignKey('OperaCategory')
weighting = models.FloatField()
class Answer(models.Model):
id = models.AutoField(primary_key=True)
text = models.CharField(max_length=128)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
categories = models.ManyToManyField('OperaCategory', through=AnswerCategoryWeighting)
def __repr__(self):
return "Answer({},{},{})".format(self.id, self.text, repr(self.question))
@property
def str_pos_associations(self):
pos = self.categories.filter(answercategoryweighting__weighting__gt=0)
pos = [x.name for x in pos]
return ", ".join(pos)
@property
def str_neg_associations(self):
neg = self.categories.filter(answercategoryweighting__weighting__lt=0)
neg = [x.name for x in neg]
return ", ".join(neg)
str_neg_associations.fget.short_description = "Negative associations"
str_pos_associations.fget.short_description = "Positive associations"
class FinancialCategory(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=128, unique=True)
def __str__(self):
return self.name
class Meta:
verbose_name = "Financial Status"
class OperaGoerCategory(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=128, unique=True)
def __str__(self):
return self.name
class Meta:
verbose_name = "Opera Attendance Status"
class ApproachCultureCategory(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=128, unique=True)
def __str__(self):
return self.name
class Meta:
verbose_name: "Approach to Culture"
class DidYouKnowQuestion(models.Model):
id = models.AutoField(primary_key=True)
text = models.CharField(max_length=256)
approach_culture = models.ManyToManyField(ApproachCultureCategory, blank=True)
opera_goer = models.ManyToManyField(OperaGoerCategory, blank=True)
financial = models.ManyToManyField(FinancialCategory, blank=True)
image_url = models.CharField(max_length=256, default='')
link_url = models.CharField(max_length=256, default='')
class Profile(models.Model):
"""
Profile class. Anyone writing a message will have a profile class created.
Profile.user is an instance of User.
User.profile is an instance of Profile.
Example usage:
user = User.objects.get_or_create(username="foobar")
user.profile.greet()
user.profile.send_message("Hello there!")"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
facebook_psid = models.BigIntegerField(primary_key=True)
first_name = models.CharField(max_length=128)
last_name = models.CharField(max_length=128)
likes = models.ManyToManyField(FacebookPage, verbose_name="Facebook Likes")
recorded_answers = models.ManyToManyField(Question, through='UserAnswer')
financial = models.ForeignKey(FinancialCategory, null=True, verbose_name="Financial sensitivity")
opera_goer = models.ForeignKey(OperaGoerCategory, null=True, verbose_name="Opera Attendance Status")
approach_culture = models.ForeignKey(ApproachCultureCategory, null=True, verbose_name="Approach to Culture")
did_you_know = models.ManyToManyField(DidYouKnowQuestion, blank=True)
class Meta:
verbose_name = "User"
def __repr__(self):
return "Profile({}, {}, {})".format(self.facebook_psid, self.first_name, self.last_name)
def __str__(self):
return "{} {}".format(self.first_name, self.last_name)
@property
def facebook_asid(self):
"""Fetches the user's asid from social_auth."""
return self.user.social_auth.get(provider='facebook').uid
def send_message(self, message_text):
"""Sends a Facebook message to the user."""
response = facebook_post_json('me/messages', {"recipient": {"id": self.facebook_psid},
"message": {"text": message_text}})
if response.ok:
print("Message '{}' successfully sent to user {}".format(message_text, str(self)))
else:
print("Problem sending message '{}' to user {}".format(message_text, str(self)))
def sender_action(self, action_text):
"""Sends a sender action to Facebook."""
assert action_text in ["typing_on", "typing_off", "mark_seen"]
response = facebook_post_json('me/messages', {"recipient": {
"id": self.facebook_psid},
"sender_action": action_text})
if response.ok:
print("Sender action {} successfully sent to user {}".format(action_text, str(self)))
def get_info(self):
"""Fetches the user's basic information from Facebook"""
response = facebook_get_json(self.facebook_psid)
self.first_name = response['first_name']
self.last_name = response['last_name']
self.save()
def greet(self):
self.get_info()
self.send_message("Hello, {}!".format(self.first_name))
self.send_message("We'd like to help you find an opera you might like, based on your interests.")
def request_oauth(self):
"""Sends a message to the user requesting that they provide OAuth authentication.
This enables us to access their Facebook likes.
"""
# TODO Domain whitelisting doesn't have to be done on every occasion
facebook_post_json('me/messenger_profile', {"whitelisted_domains": SERVER_URL})
self.send_buttons(
"To help us match operas to you, we'd like to look at what you've liked on Facebook. Is that OK? This might take a second. π
",
[
{
"type": "web_url",
"url": "https://" + SERVER_URL + "/oauth/login/facebook/",
"title": "Sure! π"
},
{
"type": "postback",
"title": "No thanks. π",
"payload": "oauth_refused"
}
])
def get_likes(self):
"""Gets the user's Facebook likes and echos back the first couple."""
# Get the first page of user's likes
j = self.facebook_get_user_json('likes')
# Move on to the next page each time, if the key exists.
for x in j['data']:
# We're only interested in the Facebook pages that we actually have information on.
try:
facebook_page = FacebookPage.objects.get(facebook_id=int(x['id']), name=x['name'])
self.likes.add(facebook_page)
except FacebookPage.DoesNotExist:
pass
self.save()
def send_buttons(self, text, buttons):
"""Sends a Facebook message with buttons to the user."""
message = {"recipient": {"id": self.facebook_psid},
"message": {
"attachment": {
"type": "template",
"payload": {
"template_type": "button",
"text": text,
"buttons": buttons
}
}
}
}
response = facebook_post_json('me/messages', message)
if response.ok:
print("Message '{}' successfully sent to user {}".format(message, str(self)))
else:
print(response.status_code, response.text)
print("Problem sending message '{}' to user {}".format(message, str(self)))
def send_quick_replies(self, text, replies):
"""Sends a Facebook message with quick replies to the user."""
buttons = [
{
"content_type": "text",
"title": x[0],
"payload": x[1]
}
for x in replies]
message = {"recipient": {"id": self.facebook_psid},
"message": {
"text": text,
"quick_replies": buttons}
}
response = facebook_post_json('me/messages', message)
if response.ok:
print("Message '{}' successfully sent to user {}".format(message, str(self)))
else:
print(response.status_code, response.text)
print("Problem sending message '{}' to user {}".format(message, str(self)))
def facebook_get_user_json(self, endpoint):
"""Makes a GET request to Facebook at /{asid}/{endpoint}"""
url = "".join(['/', str(self.facebook_asid), '/', endpoint])
print(url)
return facebook_base_get_json(url, params={
"access_token": self.user.social_auth.get(provider='facebook').extra_data['access_token']})
def ask_to_start(self, oauth):
"""Asks the user if they want to start the quiz."""
if oauth:
self.send_message("Thanks for that!")
else:
self.send_message("No problem!")
# Now send the user a message about the Opera Finder
self.send_quick_replies("Do you want to play Opera Finder?",
[("Yes", "operafinder_yes"),
("No", "operafinder_no"),
("Tell me more?", "operafinder_more")])
def start_quiz(self):
"""Method that will start a quiz for the user."""
self.send_question(Question.objects.get(id=1))
def send_question(self, question):
self.send_quick_replies(question.text,
[(answer.text, "question_{}_{}".format(question.id, answer.text))
for answer in Answer.objects.filter(question=question).order_by('id')])
def end_quiz(self):
"""Initial opera recommendation process."""
user_category_weightings = {x.name: 0 for x in OperaCategory.objects.all()}
user_answers = UserAnswer.objects.filter(profile=self)
for x in user_answers:
for category in x.answer.categories.all():
# For all categories in the weighting, add the answer's weighting
answer_weighting = AnswerCategoryWeighting.objects.get(answer=x.answer, category=category).weighting
user_category_weightings[category.name] += answer_weighting
user_facebook_likes = self.likes.all()
for x in user_facebook_likes:
for category in x.categories.all():
user_category_weightings[category.name] += 0.51
print(user_category_weightings)
favourite = [x for x in user_category_weightings.items() if x[1] > 0]
favourite.sort(key=lambda x: x[1])
if favourite:
self.send_message(f"Looks like you love {favourite[-1][0].lower()}...")
all_operas = [opera for opera in Opera.objects.all() if opera.url]
highest_opera = all_operas[0]
highest_opera_value = 0
for opera in all_operas:
opera_value = 0
categories = [x.name for x in opera.categories.all()]
# Add to the value where the opera is in there.
for x in categories:
opera_value += user_category_weightings[x]
if opera_value > highest_opera_value:
highest_opera, highest_opera_value = opera, opera_value
buttons = [
{
"type": "web_url",
"url": highest_opera.url,
"title": "Find out more"
}]
self.send_buttons("Based on your answers, we recommend {}! {}".format(highest_opera.name,
highest_opera.description),
buttons)
self.send_quick_replies("Thanks so much for trying our quizbot. "
"Would you like to try again?",
[("Yes", "restart_yes")])
# buttons = [
# {
# "type": "web_url",
# "url": "https://takke.typeform.com/to/l2AdhA",
# "title": "Go to survey"
# }]
# self.send_buttons("Thanks so much for trying out our quizbot. We'd really appreciate your feedback, so it would be great if you could fill in our survey, if you don't mind! Thank you π", buttons)
def send_operafinder_no(self):
self.send_quick_replies(
"Oh, no. π Opera Finder is just a quiz-game that we use to help you explore opera, "
"by suggesting a production you might enjoy - are you sure you donβt want to play?",
[("Yes", "operafinder_yes"),
("No", "operafinder_nono")])
def send_operafinder_info(self):
self.send_message(
"Of course! Opera Finder is a quiz-game that we use to help you explore opera, by suggesting a production you might enjoy. We consider your interests, as well as plot and music preferences - and then find your opera match! If you are interested, then, you can always read more about your match on our website.")
self.send_quick_replies("Would you like to play?",
[("Yes", "operafinder_yes"),
("No", "operafinder_nono")])
def send_operafinder_info_inquiz(self):
self.send_message(
"Of course! Opera Finder is a quiz-game that we use to help you explore opera, by suggesting a production you might enjoy. We consider your interests, as well as plot and music preferences - and then find your opera match! If you are interested, then, you can always read more about your match on our website.")
question = self.recorded_answers.all().order_by('-id')[0].id
self.send_quick_replies("Would you like to go back to the quiz?",
[("Resume quiz", f"question_{question}_none"),
("Restart quiz", "restart_yes")])
def outofscope_message(self):
text = "Sorry, I didn't understand that. π§"
# Test if user has already started quiz
if self.recorded_answers.exists():
# Get the last question they | |
quoteIdent(self, ident):
return '"%s"' % ident # escapes?
def null(self):
return r'\N'
def serialType(self, type):
return is_serial(type)
def intType(self, type):
t = type.lower()
return Database.intType(self, t) or \
self.serialType(type) or is_int(t)
def eanType(self, type):
# return the expected byte size
t = type.upper()
return 0 if not re.match('(' + RE_EAN + ')', t) else \
12 if t == 'UPC' else \
13 if t[-2:] == '13' else \
8 if t == 'ISSN' else \
10
def blobValue(self, lo):
return r'\\x' + ''.join("{0:02x}".format(o) for o in lo)
def analyse(self, name):
return "ANALYZE %s;" % name
class MySQL(Database):
"""MySQL database (experimental)."""
# % and _ left out, just for pattern matching
CMAP = { '\0':r'\0', '\'':r'\'', '\"':r'\"', '\b':r'\b', '\n':r'\n',
'\r':r'\r', '\t':r'\t', chr(26):r'\Z', '\\':r'\\' }
def echo(self, s):
return 'system echo %s' % s
def begin(self):
return 'START TRANSACTION;'
def commit(self):
return 'COMMIT;'
def insertBegin(self, table):
return "INSERT INTO %s (%s) VALUES" % \
(table.getName(), ','.join(a.getName() for a in table.att_list))
def insertValue(self, table, value, isLast):
qv = []
for v in value:
qv.append(self.null() if v is None else
self.boolValue(v) if type(v) is bool else
self.quoteLiteral(v) if type(v) in [str, defstr] else
str(v))
s = ' (' + ','.join(str(v) for v in qv) + ')'
if not isLast:
s += ','
return s
def quoteLiteral(self, literal):
return "'" + self.quoteString(literal, MySQL.CMAP), + "'"
def insertEnd(self):
return ';'
def null(self):
return 'NULL'
def serialType(self, type):
return is_serial(type)
def intType(self, type):
t = type.lower()
return Database.intType(self, t) or self.serialType(t) or is_int(t)
def timestampType(self, type):
return type.lower() == 'datetime' or re.match(RE_TSTZ + '$', type, re.I)
def setSequence(self, tab, att, number):
return ''
# class SQLite(Database):
def debug(level, message):
"""Print a debug message, maybe."""
if opts.debug >= level:
sys.stderr.write("*" * level + " " + message + "\n")
# option management
# --size=1000
# --target=postgresql|mysql
# --help is automatic
import sys
import argparse
#version="version {0}".format(version),
opts = argparse.ArgumentParser(
description='Fill database tables with random data.')
opts.add_argument('-s', '--size', type=int, default=None,
help='scale to size')
opts.add_argument('-t', '--target', default='postgresql',
help='generate for this engine')
opts.add_argument('-e', '--encoding', type=str, default=None,
help='set input & output encoding')
opts.add_argument('-f', '--filter', action='store_true', default=False,
help='also include input in output')
opts.add_argument('--no-filter', action='store_true', default=False,
help='do turn off filtering, whatever!')
opts.add_argument('--freeze', action='store_true', default=True,
help='use PostgreSQL COPY FREEZE')
opts.add_argument('--no-freeze', dest='freeze', action='store_false',
help='do not use PostgreSQL COPY FREEZE')
opts.add_argument('-T', '--transaction', action='store_true',
help='wrap output in a transaction')
opts.add_argument('-S', '--seed', default=None,
help='random generator seed')
opts.add_argument('-O', '--offset', type=int, default=None,
help='set global offset for integer primary keys')
opts.add_argument('--truncate', action='store_true', default=False,
help='truncate table contents before loading')
opts.add_argument('--drop', action='store_true', default=False,
help='drop tables before reloading')
opts.add_argument('-D', '--debug', action='count',
help='set debug mode')
opts.add_argument('-m', '--man', action='store_const', const=2,
help='show man page')
opts.add_argument('-n', '--null', type=float, default=None,
help='probability of generating a NULL value')
opts.add_argument('--pod', type=str, default='pod2usage -verbose 3',
help='override pod2usage command')
opts.add_argument('-q', '--quiet', action='store_true', default=False,
help='less verbose output')
opts.add_argument('--csv-null', type=str, default='NULL',
help='how to encode NULL in CSV output')
opts.add_argument('--csv-separator', type=str, default=',',
help='separator for CSV output')
opts.add_argument('--self-test', action='store_true', default=False,
help='run automatic self test')
opts.add_argument('--self-test-hack', action='store_true', default=False,
help='override system newline for self-test')
opts.add_argument('--self-test-python', type=str, default=None,
help="self-test must run with this python")
opts.add_argument('-X', '--test', action='append',
help='show generator output for directives')
opts.add_argument('--tries', type=int, default=10,
help='how hard to try to satisfy unique constraints')
opts.add_argument('--type', action='append', default=[],
help='add custom type')
opts.add_argument('--validate', type=str, default=None,
help='shortcut for script validation')
opts.add_argument('-V', action='store_true', default=False,
help='show short version on stdout and exit')
opts.add_argument('-v', '--version', action='version',
version="version %s" % version,
help='show version information')
opts.add_argument('file', nargs='*',
help='process files, or stdin if empty')
opts = opts.parse_args()
if opts.V:
print(VERSION)
sys.exit(0)
# set database target
db = None
if opts.target == 'postgresql':
db = PostgreSQL()
elif opts.target == 'mysql':
db = MySQL()
elif opts.target == 'csv':
db = CSV()
elif opts.target == 'tsv':
db = CSV()
opts.csv_separator = '\t'
else:
raise Exception("unexpected target database %s" % opts.target)
# fix some options for consistency
if opts.validate:
opts.transaction = True
opts.filter = True
if opts.self_test_hack:
# ensure some settings under self-test for determinism
if opts.seed:
random.seed(opts.seed)
opts.quiet = True
opts.freeze = False
# consistent \n and unicode, see print overriding below
assert not opts.encoding, "no --encoding under self-test"
opts.encoding = 'utf-8'
import os
os.linesep = '\n'
# file generator test
if opts.validate == 'internal':
generate_some_tmp_files()
if not opts.encoding:
# note: this is ignored by python3 on input(?)
opts.encoding = sys.getfilesystemencoding() if opts.file else \
sys.stdin.encoding
if opts.encoding:
# sys.stdout.encoding = opts.encoding
import os
if sys.version_info[0] == 3:
def __encoded_print(s, end=os.linesep):
# tell me there is something better than this...
sys.stdout.buffer.write(s.encode(opts.encoding))
sys.stdout.buffer.write(end.encode(opts.encoding))
sys.stdout.flush() # for python 3.4?
print = __encoded_print
else: # python 2
# simpler, although not sure why it works
print = lambda s, end=os.linesep: \
sys.stdout.write(s.encode(opts.encoding) + end)
# macros: { 'name':{...} }
df_macro = {}
# some example predefined macros
df_macro['cfr'] = getParams("gen=int:scale rate=0.17")
df_macro['french'] = getParams("chars='esaitnrulodcpmvqfbghjxyzwk' cgen=cfr")
df_macro['cen'] = getParams("gen=int:scale rate=0.15")
df_macro['english'] = getParams("chars='etaonrishdlfcmugypwbvkjxqz' cgen=cen")
# simple generator unit tests
if opts.test:
AssertionError = StdoutExitError
ntest = 0
# process decoded tests
for t in map(u, opts.test):
ntest += 1
print(u8("-- test %s: %s") % (ntest, t))
# macro definition
m = re.match(r'\s*([\w\.]+)\s*:\s*(.*)', t)
if m:
name = m.group(1)
assert not name in GENERATORS, \
"do not use generator name '%s' as a macro name!" % name
df_macro[name] = getParams(m.group(2))
continue
# else some directives
d = re.match(r'\s*([!-]?)\s*(.*)', t)
h, params = d.group(1), getParams(d.group(2))
g = findGenerator('test', params)
if not g and 'type' in params:
g = findGeneratorType(params['type'])
assert g, "unknown type %s (%s)" % (params['type'], t)
assert g, "must specify a generator: %s" % t
Model.checkPARAMS('--test', params, Attribute.PARAMS)
gen = createGenerator(None, g, params)
if isinstance(gen, IntGenerator) and gen.size is None:
gen.setSize(opts.size if opts.size else 10)
assert not gen.params, "unused parameter: %s" % gen.params
if h == '!': # show histogram
n = opts.size if opts.size else 10000
vals = {}
for i in range(n):
tuple_count += 1
gen.mapGen(lambda s: s.shareSeed())
v = gen.getData()
vals[v] = 0 if not v in vals else vals[v] + 1
print("histogram on %s draws" % n)
for v in sorted(vals):
print(u8("{0}: {1:6.3f} %").format(v, 100.0*vals[v]/n))
else: # show values
for i in range(opts.size if opts.size else 10):
tuple_count += 1
gen.mapGen(lambda s: s.shareSeed())
d = db.showValue(gen.getData())
if h == '-': # short
print(str(d), end=' ') # '\t' ?
else:
print(u8("%s: %s") % (i, d))
if h == '-':
print('')
# just in case
cleanup_some_tmp_files()
sys.exit(0)
# option consistency
if opts.drop or opts.test:
opts.filter = True
if opts.no_filter: # may be forced back for some tests
opts.filter = False
assert not (opts.filter and opts.truncate), \
"option truncate does not make sense with option filter"
if opts.man:
# Let us use Perl's POD from Python:-)
import os, tempfile as tmp
pod = tmp.NamedTemporaryFile(prefix='datafiller_', mode='w')
name = sys.argv[0].split(os.sep)[-1]
pod.write(POD.format(comics=COMICS, pgbench=PGBENCH, library=LIBRARY_NODIR,
name=name, DOT=RE_DOT, NGENS=len(GENERATORS),
GLIST=' '.join("B<%s>" % g for g in sorted(GENERATORS)),
version=version, year=revyear))
pod.flush()
os.system(opts.pod + ' ' + pod.name)
pod.close()
sys.exit(0)
# auto run a test with some options
def self_run(validate=None, seed=None, pipe=False, op=[]):
import subprocess
# command to run
cmd = [ sys.argv[0] ] + op
if isinstance(validate, list):
cmd += map(lambda t: '--test=' + t, validate)
else: # must be str
cmd += [ '--validate='+validate ]
if opts.self_test_python:
cmd += [ '--self-test-python=' + opts.self_test_python ]
if seed:
cmd.append('--seed='+seed)
if opts.self_test_python:
cmd.insert(0, opts.self_test_python)
if opts.debug: debug(1, "self-test cmd: %s" % cmd)
return subprocess.Popen(cmd, stdout=subprocess.PIPE if pipe else None)
# the self-test allows to test the script on hosts without a database
def self_test(validate=None, seed='Calvin', D=None):
import subprocess, hashlib, time
start = time.time()
h = hashlib.sha256()
p = self_run(validate, seed=seed, pipe=True, op=['--self-test-hack'])
for line in p.stdout:
h.update(line)
okay = p.wait() == 0
d = h.hexdigest()[0:16]
end = time.time()
print("self-test %s seed=%s hash=%s seconds=%.2f: %s" %
(validate, seed, d, end-start, 'PASS' if okay and d == D else 'FAIL'))
return okay and d == D
if opts.self_test:
# self test results for python 2 & 3
TESTS = [
# [test, seed, [ py2h, py3h ]]
['unit', 'Wormwood!', ['73d9b211839c90d6', '05183e0be6ac649e']],
['internal', 'Moe!!', ['8b56d03d6220dca3', 'aa7ccdc56a147cc4']],
['library', 'Calvin', ['d778fe6adc57eea7', 'adc029f5d9a3a0fb']],
['comics', 'Hobbes!', ['fff09e9ac33a4e2a', '49edd6998e04494e']],
['pgbench', 'Susie!', ['891cd4a00d89d501', '55d66893247d3461']]]
fail = 0
for test, seed, hash in TESTS:
if not opts.validate or opts.validate == test:
fail += not self_test(test, seed, hash[sys.version_info[0]-2])
sys.exit(fail)
# reset arguments for fileinput
sys.argv[1:] = opts.file
#
# global variables while parsing
#
# list of tables in occurrence order, for regeneration
tables = []
all_tables = {}
# parser status
current_table = None
current_attribute = None
current_enum = None
dfstuff = None
att_number | |
GT in world frame for accuracy computation
yaw_only_objects, max_min_dict, transformed_annotations, _ = \
fat_image.visualize_pose_ros(
image_data, annotations, frame='table', camera_optical_frame=False,
input_camera_pose=camera_pose, ros_publish=True
)
max_min_dict['ymax'] = 0.85
max_min_dict['ymin'] = -0.85
max_min_dict['xmax'] = 0.5
max_min_dict['xmin'] = -0.5
# max_min_dict['ymax'] += 0.6
# max_min_dict['ymin'] -= 0.6
# max_min_dict['xmax'] += 0.6
# max_min_dict['xmin'] -= 0.6
fat_image.search_resolution_translation = 0.08
if dataset_cfg["device"] == "gpu":
perch_annotations, stats = fat_image.visualize_perch_output(
image_data, annotations, max_min_dict, frame='table',
use_external_render=0, required_object=required_objects,
camera_optical_frame=False, use_external_pose_list=0, gt_annotations=transformed_annotations,
input_camera_pose=camera_pose, table_height=0.006, num_cores=0,
compute_type=1
)
elif dataset_cfg["device"] == "icp":
perch_annotations, stats = fat_image.visualize_perch_output(
image_data, annotations, max_min_dict, frame='table',
use_external_render=0, required_object=required_objects,
camera_optical_frame=False, use_external_pose_list=0, gt_annotations=transformed_annotations,
input_camera_pose=camera_pose, table_height=0.006, num_cores=0,
compute_type=0
)
# print(perch_annotations)
# print(transformed_annotations)
f_accuracy.write("{},".format(image_data['file_name']))
add_dict, add_s_dict = fat_image.compare_clouds(transformed_annotations, perch_annotations, downsample=True, use_add_s=True)
for object_name in required_objects:
if (object_name in add_dict) and (object_name in add_s_dict):
f_accuracy.write("{},{},".format(add_dict[object_name], add_s_dict[object_name]))
else:
f_accuracy.write(" , ,")
f_accuracy.write("\n")
if stats is not None:
f_runtime.write("{} {} {} {} {}".format(image_data['file_name'], stats['expands'], stats['runtime'], stats['icp_runtime'], stats['peak_gpu_mem']))
f_runtime.write("\n")
f_runtime.close()
def run_sameshape():
## Running on PERCH only with synthetic color dataset - shape
# Use normalize cost to get best results
base_dir = "/media/aditya/A69AFABA9AFA85D9/Cruzr/code/Dataset_Synthesizer/Test/Zed"
# base_dir = "/media/sbpl/Data/Aditya/datasets/Zed"
image_directory = base_dir
annotation_file = base_dir + '/instances_newmap1_turbosquid_2018.json'
# annotation_file = base_dir + '/instances_newmap1_turbosquid_can_only_2018.json'
model_dir = "/media/aditya/A69AFABA9AFA85D9/Datasets/SameShape/turbosquid/models"
# model_dir = "/media/sbpl/Data/Aditya/datasets/turbosquid/models"
fat_image = FATImage(
coco_annotation_file=annotation_file,
coco_image_directory=image_directory,
depth_factor=100,
model_dir=model_dir,
model_mesh_in_mm=True,
# model_mesh_scaling_factor=0.005,
model_mesh_scaling_factor=1,
models_flipped=False
)
f_runtime = open('runtime.txt', "w")
f_accuracy = open('accuracy.txt', "w")
f_runtime.write("{} {} {}\n".format('name', 'expands', 'runtime'))
# required_objects = ['coke_can', 'coke_bottle', 'pepsi_can']
# required_objects = ['coke_bottle', 'sprite_bottle']
required_objects = ['coke_bottle', 'sprite_bottle', 'pepsi_can', 'coke_can']
# required_objects = ['pepsi_can', 'coke_can', '7up_can', 'sprite_can']
f_accuracy.write("name ")
for object_name in required_objects:
f_accuracy.write("{}-add {}-adds ".format(object_name, object_name))
f_accuracy.write("\n")
# for img_i in ['14']:
# for img_i in ['14', '20', '25', '32', '33', '38', '48']:
read_results_only = False
# for img_i in range(0,50):
for img_i in range(7,8):
# for img_i in ['30', '31', '34', '35', '36', '37', '39', '40']:
# for img_i in ['15', '16', '17', '18', '19', '21', '22', '23', '24', '26', '27', '28', '29', '41', '42', '43', '44', '45', '46', '47', '49']:
# for img_i in list(range(0,13)) + ['30', '31', '34', '35', '36', '37', '39', '40', '15', '16', '17', '18', '19', '21', '22', '23', '24', '26', '27', '28', '29', '41', '42', '43', '44', '45', '46', '47', '49']:
# if img_i == 10 or img_i == 14 or img_i == 15 or img_i == 18 or img_i == 20:
# # mising in icp run
# continue
image_name = 'NewMap1_turbosquid/0000{}.left.png'.format(str(img_i).zfill(2))
# image_name = 'NewMap1_turbosquid_can_only/0000{}.left.png'.format(str(img_i).zfill(2))
image_data, annotations = fat_image.get_random_image(name=image_name, required_objects=required_objects)
yaw_only_objects, max_min_dict, transformed_annotations = \
fat_image.visualize_pose_ros(image_data, annotations, frame='table', camera_optical_frame=False)
if read_results_only == False:
max_min_dict['ymax'] = 1.5
max_min_dict['ymin'] = -1.5
max_min_dict['xmax'] = 0.5
max_min_dict['xmin'] = -0.5
fat_image.search_resolution_translation = 0.08
perch_annotations, stats = fat_image.visualize_perch_output(
image_data, annotations, max_min_dict, frame='table',
use_external_render=0, required_object=required_objects,
# use_external_render=0, required_object=['coke', 'sprite', 'pepsi'],
# use_external_render=0, required_object=['sprite', 'coke', 'pepsi'],
camera_optical_frame=False, use_external_pose_list=0, gt_annotations=transformed_annotations
)
else:
output_dir_name = os.path.join("final_comp", "color_lazy_histogram", fat_image.get_clean_name(image_data['file_name']))
perch_annotations, stats = fat_image.read_perch_output(output_dir_name)
# print(perch_annotations)
# print(transformed_annotations)
f_accuracy.write("{},".format(image_data['file_name']))
add_dict, add_s_dict = fat_image.compare_clouds(transformed_annotations, perch_annotations, downsample=True, use_add_s=True)
for object_name in required_objects:
if (object_name in add_dict) and (object_name in add_s_dict):
f_accuracy.write("{},{},".format(add_dict[object_name], add_s_dict[object_name]))
else:
f_accuracy.write(" , ,")
f_accuracy.write("\n")
f_runtime.write("{} {} {}\n".format(image_name, stats['expands'], stats['runtime']))
f_runtime.close()
f_accuracy.close()
def run_sameshape_can_only():
## Running on PERCH only with synthetic color dataset - shape
# Use normalize cost to get best results
base_dir = "/media/aditya/A69AFABA9AFA85D9/Cruzr/code/Dataset_Synthesizer/Test/Zed"
# base_dir = "/media/sbpl/Data/Aditya/datasets/Zed"
image_directory = base_dir
annotation_file = base_dir + '/instances_newmap1_turbosquid_can_only_2018.json'
model_dir = "/media/aditya/A69AFABA9AFA85D9/Datasets/SameShape/turbosquid/models"
# model_dir = "/media/sbpl/Data/Aditya/datasets/turbosquid/models"
fat_image = FATImage(
coco_annotation_file=annotation_file,
coco_image_directory=image_directory,
depth_factor=100,
model_dir=model_dir,
model_mesh_in_mm=True,
# model_mesh_scaling_factor=0.005,
model_mesh_scaling_factor=1,
models_flipped=False
)
f_runtime = open('runtime.txt', "w")
f_accuracy = open('accuracy.txt', "w")
f_runtime.write("{} {} {}\n".format('name', 'expands', 'runtime'))
# required_objects = ['coke_can', 'pepsi_can']
required_objects = ['7up_can', 'sprite_can', 'pepsi_can', 'coke_can']
f_accuracy.write("name ")
for object_name in required_objects:
f_accuracy.write("{}-add {}-adds ".format(object_name, object_name))
f_accuracy.write("\n")
for img_i in range(0,25):
image_name = 'NewMap1_turbosquid_can_only/0000{}.left.png'.format(str(img_i).zfill(2))
image_data, annotations = fat_image.get_random_image(name=image_name, required_objects=required_objects)
yaw_only_objects, max_min_dict, transformed_annotations = \
fat_image.visualize_pose_ros(image_data, annotations, frame='table', camera_optical_frame=False)
max_min_dict['ymax'] = 1.5
max_min_dict['ymin'] = -1.5
max_min_dict['xmax'] = 0.5
max_min_dict['xmin'] = -0.5
fat_image.search_resolution_translation = 0.08
perch_annotations, stats = fat_image.visualize_perch_output(
image_data, annotations, max_min_dict, frame='table',
use_external_render=0, required_object=required_objects,
# use_external_render=0, required_object=['coke', 'sprite', 'pepsi'],
# use_external_render=0, required_object=['sprite', 'coke', 'pepsi'],
camera_optical_frame=False, use_external_pose_list=0, gt_annotations=transformed_annotations
)
# print(perch_annotations)
# print(transformed_annotations)
f_accuracy.write("{} ".format(image_data['file_name']))
accuracy_dict, _ = fat_image.compare_clouds(transformed_annotations, perch_annotations, downsample=False, use_add_s=False)
for object_name in required_objects:
if (object_name in add_dict) and (object_name in add_s_dict):
f_accuracy.write("{},{},".format(add_dict[object_name], add_s_dict[object_name]))
else:
f_accuracy.write(" , ,")
f_accuracy.write("\n")
f_runtime.write("{} {} {}\n".format(image_name, stats['expands'], stats['runtime']))
f_runtime.close()
f_accuracy.close()
def run_dope_sameshape():
base_dir = "/media/aditya/A69AFABA9AFA85D9/Cruzr/code/Dataset_Synthesizer/Test/Zed"
# base_dir = "/media/sbpl/Data/Aditya/datasets/Zed"
image_directory = base_dir
annotation_file = base_dir + '/instances_newmap1_turbosquid_can_only_2018.json'
# annotation_file = base_dir + '/instances_newmap1_turbosquid_2018.json'
model_dir = "/media/aditya/A69AFABA9AFA85D9/Datasets/SameShape/turbosquid/models"
# model_dir = "/media/sbpl/Data/Aditya/datasets/turbosquid/models"
fat_image = FATImage(
coco_annotation_file=annotation_file,
coco_image_directory=image_directory,
depth_factor=100,
model_dir=model_dir,
model_mesh_in_mm=True,
# model_mesh_scaling_factor=0.005,
model_mesh_scaling_factor=1,
models_flipped=False,
distance_scale=100,
img_width=960,
img_height=540,
dataset_type="ndds"
)
f_runtime = open('runtime.txt', "w", 1)
f_accuracy = open('accuracy.txt', "w", 1)
f_runtime.write("{},{},{}\n".format('name', 'expands', 'runtime'))
# required_objects = ['coke_can', 'pepsi_can']
required_objects = ['7up_can', 'sprite_can', 'pepsi_can', 'coke_can']
# required_objects = ['coke_can', 'pepsi_can']
# required_objects = ['pepsi_can', 'coke_can']
# required_objects = ['coke_bottle', 'pepsi_can', 'coke_can', 'sprite_bottle']
f_accuracy.write("name,")
# for object_name in required_objects:
# f_accuracy.write("{} ".format(object_name))
# f_accuracy.write("\n")
for object_name in required_objects:
f_accuracy.write("{}-add,{}-adds,".format(object_name, object_name))
f_accuracy.write("\n")
fat_image.init_dope_node()
for img_i in range(0,50):
# for img_i in [5]:
image_name = 'NewMap1_turbosquid_can_only/0000{}.left.png'.format(str(img_i).zfill(2))
# image_name = 'NewMap1_turbosquid/0000{}.left.png'.format(str(img_i).zfill(2))
image_data, annotations = fat_image.get_random_image(name=image_name, required_objects=required_objects)
yaw_only_objects, max_min_dict, transformed_annotations, _ = \
fat_image.visualize_pose_ros(image_data, annotations, frame='camera', camera_optical_frame=False, ros_publish=True, num_publish=1)
# print(transformed_annotations)
# dopenode = DopeNode()
# color_img_path = os.path.join(self.coco_image_directory, image_data['file_name'])
# dopenode.run_on_image(color_img_path)
dope_annotations, runtime = fat_image.visualize_dope_output(image_data)
# print(dope_annotations)
f_accuracy.write("{},".format(image_data['file_name']))
# Dope assumes that model of object is according to camera frame (NDDS) and GT is also in that
# So no need to apply any fixed transform when comparing the clouds by converting the model
add_dict, add_s_dict = fat_image.compare_clouds(
transformed_annotations, dope_annotations, downsample=True, use_add_s=True, convert_annotation_2=True
)
for object_name in required_objects:
if (object_name in add_dict) and (object_name in add_s_dict):
f_accuracy.write("{},{},".format(add_dict[object_name], add_s_dict[object_name]))
else:
f_accuracy.write(" , ,")
f_accuracy.write("\n")
f_runtime.write("{},{},{}\n".format(image_data['file_name'], 0, runtime))
f_accuracy.close()
f_runtime.close()
return
def run_dope_6d():
image_directory = '/media/aditya/A69AFABA9AFA85D9/Datasets/fat/mixed/extra'
annotation_file = '/media/aditya/A69AFABA9AFA85D9/Datasets/fat/mixed/extra/instances_fat_val_pose_6_obj_2018.json'
fat_image = FATImage(
coco_annotation_file=annotation_file,
coco_image_directory=image_directory,
depth_factor=10000,
model_dir='/media/aditya/A69AFABA9AFA85D9/Datasets/YCB_Video_Dataset/aligned_cm',
model_mesh_in_mm=False,
model_mesh_scaling_factor=1,
models_flipped=False
)
f_runtime = open('runtime.txt', "w")
f_accuracy = open('accuracy.txt', "w")
f_runtime.write("{} {} {}\n".format('name', 'expands', 'runtime'))
# required_objects = ['coke_can', 'pepsi_can']
# required_objects = ['7up_can', 'sprite_can', 'pepsi_can', 'coke_can']
# required_objects = ['coke_can', 'pepsi_can']
required_objects = fat_image.category_names
# filter_objects = ["003_cracker_box","010_potted_meat_can", "002_master_chef_can", '006_mustard_bottle', "025_mug"]
# filter_objects = ["010_potted_meat_can", "002_master_chef_can", '006_mustard_bottle']
filter_objects = None
f_accuracy.write("name ")
# for object_name in required_objects:
# f_accuracy.write("{} ".format(object_name))
# f_accuracy.write("\n")
for object_name in required_objects:
f_accuracy.write("{}-add {}-adds ".format(object_name, object_name))
f_accuracy.write("\n")
fat_image.init_dope_node()
skip_list = ['kitchen_4/000006.left.jpg', 'kitchen_4/000014.left.jpg', 'kitchen_4/000169.left.jpg', 'kitchen_4/000177.left.jpg']
for img_i in range(0,2000):
# for img_i in [5]:
image_name = 'kitchen_4/00{}.left.jpg'.format(str(img_i).zfill(4))
if image_name in skip_list:
continue
image_data, annotations = fat_image.get_random_image(name=image_name, required_objects=required_objects)
if image_data is None or annotations is None:
continue
if filter_objects is not None:
found_filter_object = False
for anno in annotations:
if fat_image.category_id_to_names[anno['category_id']]['name'] in filter_objects:
found_filter_object = True
if found_filter_object == False:
continue
yaw_only_objects, max_min_dict, transformed_annotations = \
fat_image.visualize_pose_ros(image_data, annotations, frame='camera', camera_optical_frame=False, ros_publish=False, num_publish=1)
dope_annotations = fat_image.visualize_dope_output(image_data)
f_accuracy.write("{},".format(image_data['file_name']))
add_dict, add_s_dict = fat_image.compare_clouds(
transformed_annotations, dope_annotations, downsample=True, use_add_s=True, convert_annotation_2=True
)
if add_dict is not None and add_s_dict is not None:
for object_name in required_objects:
if (object_name in add_dict) and (object_name in add_s_dict):
f_accuracy.write("{},{},".format(add_dict[object_name], add_s_dict[object_name]))
else:
f_accuracy.write(" , ,")
f_accuracy.write("\n")
# yaw_only_objects, max_min_dict, transformed_annotations = \
# fat_image.visualize_pose_ros(image_data, dope_annotations, frame='camera', camera_optical_frame=False)
f_accuracy.close()
return
def run_sameshape_gpu(dataset_cfg=None):
## Running on PERCH only with synthetic color dataset - shape
image_directory = dataset_cfg['image_dir']
annotation_file = image_directory + '/instances_newmap1_turbosquid_can_only_2018.json'
# annotation_file = image_directory + '/instances_newmap1_turbosquid_2018.json'
model_dir = dataset_cfg['model_dir']
fat_image = FATImage(
coco_annotation_file=annotation_file,
coco_image_directory=image_directory,
depth_factor=100,
model_dir=model_dir,
model_mesh_in_mm=True,
# model_mesh_scaling_factor=0.005,
model_mesh_scaling_factor=1,
models_flipped=False,
img_width=960,
img_height=540,
distance_scale=100,
env_config="pr2_gpu_env_config.yaml",
planner_config="pr2_planner_config.yaml",
perch_debug_dir=dataset_cfg["perch_debug_dir"],
python_debug_dir=dataset_cfg["python_debug_dir"],
dataset_type=dataset_cfg["type"]
)
f_runtime = open('runtime.txt', "w", 1)
f_accuracy = open('accuracy.txt', "w", 1)
f_runtime.write("{} {} {}\n".format('name', 'expands', 'runtime'))
# required_objects = ['coke_can', 'coke_bottle', 'pepsi_can']
# required_objects = ['coke_bottle']
# required_objects = ['010_potted_meat_can', '008_pudding_box']
# required_objects = ['010_potted_meat_can']
# required_objects = ['coke_bottle', 'sprite_bottle', 'pepsi_can', 'coke_can']
# required_objects = ['sprite_bottle']
required_objects = ['pepsi_can', 'coke_can', '7up_can', 'sprite_can']
# required_objects = ['sprite_can']
# required_objects = ['pepsi_can', 'sprite_bottle', 'coke_bottle']
f_accuracy.write("name,")
for object_name in required_objects:
f_accuracy.write("{}-add,{}-adds,".format(object_name, object_name))
f_accuracy.write("\n")
read_results_only = False
# fat_image.search_resolution_yaw = 1.57
# 5 in can only
for img_i in range(18,50):
image_name = 'NewMap1_turbosquid_can_only/0000{}.left.png'.format(str(img_i).zfill(2))
# image_name = 'NewMap1_turbosquid/0000{}.left.png'.format(str(img_i).zfill(2))
image_data, annotations = | |
<filename>spharpy/special.py
"""
Subpackage implementing or wrapping special functions required in the
spharpy package.
"""
from itertools import count
import numpy as np
import scipy.special as _spspecial
from scipy.optimize import brentq
def spherical_bessel(n, z, derivative=False):
r"""
Spherical bessel function of order n evaluated at z.
.. math::
j_n(z) = \sqrt{\frac{\pi}{2z}} J_{n+\frac{1}{2}} (z)
Parameters
----------
n : int, ndarray
Order of the spherical bessel function
z : double, ndarray
Argument of the spherical bessel function. Has to be real valued.
derivative : bool
Return the derivative of the spherical Bessel function
Returns
-------
jn : double, ndarray
Spherical bessel function. Array with dimensions [N x Z], where N is
the number of elements in n and Z is the number of elements in z.
Note
----
This is a wrapper around the Scipy implementation of the spherical Bessel
function.
"""
ufunc = _spspecial.spherical_jn
n = np.asarray(n, dtype=np.int)
z = np.asarray(z, dtype=np.double)
bessel = np.zeros((n.size, z.size), dtype=np.complex)
if n.size > 1:
for idx, order in zip(count(), n):
bessel[idx, :] = ufunc(order, z, derivative=derivative)
else:
bessel = ufunc(n, z, derivative=derivative)
if z.ndim <= 1 or n.ndim <= 1:
bessel = np.squeeze(bessel)
return bessel
def spherical_bessel_zeros(n_max, n_zeros):
"""Compute the zeros of the spherical Bessel function.
This function will always start at order zero which is equal
to sin(x)/x and iteratively compute the roots for higher orders.
The roots are computed using Brents algorithm from the scipy package.
Parameters
----------
n_max : int
The order of the spherical bessel function
n_zeros : int
The number of roots to be computed
Returns
-------
roots : ndarray, double
The roots of the spherical bessel function
"""
def func(x, n):
return _spspecial.spherical_jn(n, x)
zerosj = np.zeros((n_max+1, n_zeros), dtype=np.double)
zerosj[0] = np.arange(1, n_zeros+1)*np.pi
points = np.arange(1, n_zeros+n_max+1)*np.pi
roots = np.zeros(n_zeros+n_max, dtype=np.double)
for i in range(1, n_max+1):
for j in range(n_zeros+n_max-i):
roots[j] = brentq(func, points[j], points[j+1], (i,), maxiter=5000)
points = roots
zerosj[i, :n_zeros] = roots[:n_zeros]
return zerosj
def spherical_hankel(n, z, kind=2, derivative=False):
r"""
Spherical Hankel function of order n evaluated at z.
.. math::
j_n(z) = \sqrt{\frac{\pi}{2z}} J_{n+\frac{1}{2}} (z)
Parameters
----------
n : int, ndarray
Order of the spherical bessel function
z : double, ndarray
Argument of the spherical bessel function. Has to be real valued.
Returns
-------
hn : double, ndarray
Spherical bessel function. Array with dimensions [N x Z], where N is
the number of elements in n and Z is the number of elements in z.
Note
----
This is based on the Hankel functions implemented in the scipy package.
"""
if kind not in (1, 2):
raise ValueError("The spherical hankel function can \
only be of first or second kind.")
n = np.asarray(n, dtype=np.int)
z = np.asarray(z, dtype=np.double)
if derivative:
ufunc = _spherical_hankel_derivative
else:
ufunc = _spherical_hankel
if n.size > 1:
hankel = np.zeros((n.size, z.size), dtype=np.complex)
for idx, order in zip(count(), n):
hankel[idx, :] = ufunc(order, z, kind)
else:
hankel = ufunc(n, z, kind)
if z.ndim <= 1 or n.ndim <= 1:
hankel = np.squeeze(hankel)
return hankel
def _spherical_hankel(n, z, kind):
if kind == 1:
hankel = _spspecial.hankel1(n+0.5, z)
elif kind == 2:
hankel = _spspecial.hankel2(n+0.5, z)
hankel = np.sqrt(np.pi/2/z) * hankel
return hankel
def _spherical_hankel_derivative(n, z, kind):
hankel = _spherical_hankel(n-1, z, kind) - \
(n+1)/z * _spherical_hankel(n, z, kind)
return hankel
def spherical_harmonic(n, m, theta, phi):
"""The spherical harmonics of order n and degree m.
n : unsigned int
The spherical harmonic order
m : int
The spherical harmonic degree
theta : ndarray, double
The elevation angle
phi : ndarray, double
The azimuth angle
Returns
-------
spherical_harmonic : ndarray, double
The complex valued spherial harmonic of order n and degree m
Note
----
This function wraps the spherical harmonic implementation from scipy.
The only difference is that we return zeros instead of nan values
if $n < |m|$.
"""
theta = np.asarray(theta, dtype=np.double)
phi = np.asarray(phi, dtype=np.double)
if n < np.abs(m):
sph_harm = np.zeros(theta.shape)
else:
sph_harm = _spspecial.sph_harm(m, n, phi, theta)
return sph_harm
def spherical_harmonic_real(n, m, theta, phi):
r"""Real valued spherical harmonic function of order n and degree m
evaluated at the angles theta and phi.
The spherical harmonic functions are fully normalized (N3D) and follow
the AmbiX phase convention [1]_.
.. math::
Y_n^m(\theta, \phi) = \sqrt{\frac{2n+1}{4\pi} \frac{(n-|m|)!}{(n+|m|)!}} P_n^{|m|}(\cos \theta)
\begin{cases}
\displaystyle \cos(|m|\phi), & \text{if $m \ge 0$} \newline
\displaystyle \sin(|m|\phi) , & \text{if $m < 0$}
\end{cases}
References
----------
.. [1] <NAME>, <NAME>, <NAME>, and <NAME>, βAmbix - A
Suggested Ambisonics Format (revised by <NAME>),β International
Symposium on Ambisonics and Spherical Acoustics,
vol. 3, pp. 1β11, 2011.
Parameters
----------
n : unsigned int
The spherical harmonic order
m : int
The spherical harmonic degree
theta : ndarray, double
The elevation angle
phi : ndarray, double
The azimuth angle
Returns
-------
spherical_harmonic : ndarray, double
The real valued spherial harmonic of order n and degree m
"""
# careful here, scipy uses phi as the elevation angle and
# theta as the azimuth angle
Y_nm_cplx = _spspecial.sph_harm(m, n, phi, theta)
if m == 0:
Y_nm = np.real(Y_nm_cplx)
elif m > 0:
Y_nm = np.real(Y_nm_cplx) * np.sqrt(2)
elif m < 0:
Y_nm = np.imag(Y_nm_cplx) * np.sqrt(2) * np.float(-1)**(m+1)
Y_nm *= (np.float(-1)**(m))
return Y_nm
def spherical_harmonic_derivative_phi(n, m, theta, phi):
"""Calculate the derivative of the spherical harmonics with respect to
the azimuth angle phi.
Parameters
----------
n : int
Spherical harmonic order
m : int
Spherical harmonic degree
theta : double
Elevation angle 0 < theta < pi
phi : double
Azimuth angle 0 < phi < 2*pi
Returns
-------
sh_diff : complex double
Spherical harmonic derivative
"""
if m == 0 or n == 0:
res = np.zeros(phi.shape, dtype=np.complex)
else:
res = spherical_harmonic(n, m, theta, phi) * 1j * m
return res
def spherical_harmonic_gradient_phi(n, m, theta, phi):
"""Calculate the derivative of the spherical harmonics with respect to
the azimuth angle phi divided by sin(theta)
Parameters
----------
n : int
Spherical harmonic order
m : int
Spherical harmonic degree
theta : double
Elevation angle 0 < theta < pi
phi : double
Azimuth angle 0 < phi < 2*pi
Returns
-------
sh_diff : complex double
Spherical harmonic derivative
"""
if m == 0:
res = np.zeros(theta.shape, dtype=np.complex)
else:
factor = np.sqrt((2*n+1)/(2*n-1))/2
exp_phi = np.exp(1j*phi)
first = np.sqrt((n+m)*(n+m-1)) * exp_phi * \
spherical_harmonic(n-1, m-1, theta, phi)
second = np.sqrt((n-m) * (n-m-1)) / exp_phi * \
spherical_harmonic(n-1, m+1, theta, phi)
Ynm_sin_theta = (-1) * factor * (first + second)
res = Ynm_sin_theta * 1j
return res
def spherical_harmonic_derivative_theta(n, m, theta, phi):
"""Calculate the derivative of the spherical harmonics with respect to
the elevation angle theta.
Parameters
----------
n : int
Spherical harmonic order
m : int
Spherical harmonic degree
theta : double
Elevation angle 0 < theta < pi
phi : double
Azimuth angle 0 < phi < 2*pi
Returns
-------
sh_diff : complex double
Spherical harmonic derivative
"""
if n == 0:
res = np.zeros(theta.shape, dtype=np.complex)
else:
exp_phi = np.exp(1j*phi)
first = np.sqrt((n-m+1) * (n+m)) * exp_phi * \
spherical_harmonic(n, m-1, theta, phi)
second = np.sqrt((n-m) * (n+m+1)) / exp_phi * \
spherical_harmonic(n, m+1, theta, phi)
res = (first-second)/2 * (-1)
return res
def legendre_function(n, m, z, cs_phase=True):
r"""Legendre function of order n and degree m with argument z.
.. math::
P_n^m(z) = (-1)^m(1-z^2)^{m/2}\frac{d^m}{dz^m}P_n{z}
where the Condon-Shotley phase term $(-1)^m$ is dropped when cs_phase=False
is used.
Parameters
----------
n : int
The order
m : int
The degree
z : ndarray, double
The argument as an array
cs_phase : bool, optional
Whether to use include the Condon-Shotley phase term (-1)^m or not
Returns
-------
legendre : ndarray, double
The Legendre function. This will return zeros if $|m| > n$.
Note
----
This is a wrapper for the Legendre function implementation from scipy. The
scipy implementation uses the Condon-Shotley phase. Therefore, the sign
needs to be flipped here for uneven degrees when dropping the
Condon-Shotley phase.
"""
z = np.atleast_1d(z)
if np.abs(m) > n:
legendre = np.zeros(z.shape)
else:
legendre = | |
import re
from pathlib import Path
from functools import partial
from multiprocessing import Pool
from itertools import zip_longest
from articlenizer import articlenizer, encode_string, corrections, sentenize, util
from articlenizer.util import chunk_list
def annotation_to_dict(annotation):
"""Read BRAT annotation line by line and transform it in a dictionary.
Events are currently no supported.
Args:
annotation (string): String formatted in BRAT format (content of .ann file)
Returns:
dictionary: contains 'entities' and 'relations' as separated nested dictionaries
"""
annotation_dict = {
'entities': {},
'relations': {}
}
lines = annotation.split('\n')
for line in lines:
if line.rstrip():
line_split = line.split('\t')
if len(line_split) != 3:
raise(RuntimeError('Line in unsupported format: "{}"'.format(line)))
if line.startswith('T'):
ann_label, ann_beg, ann_end = line_split[1].split()
annotation_dict['entities'][line_split[0]] = {
'label': ann_label,
'beg': int(ann_beg),
'end': int(ann_end),
'string': line_split[2].rstrip()
}
elif line.startswith('R'):
rel_type, arg1, arg2 = line_split[1].split()
annotation_dict['relations'][line_split[0]] = {
'label': rel_type,
'arg1': arg1.split(':')[-1],
'arg2': arg2.split(':')[-1]
}
else:
raise(RuntimeError('Got unsupported annotation type in line "{}"'.format(line)))
return annotation_dict
def _adjust_strings(annotation, text):
"""Adjust annotation string of an entity.
If entity boundaries are readjusted the resulting annotation string is recalculated by this function.
Dictionary is adjusted in place
Args:
annotation (dictionary): annotation dictionary (result of calling annotation_to_dict)
text (string): plain text corresponding to the annotated entities
"""
for _, entity in annotation['entities'].items():
entity['string'] = text[entity['beg']:entity['end']]
def _remove_characters(annotation, drops):
"""Update annotation boundaries based on given indices where a character was dropped.
Args:
annotation (dictionary): annotation dictionary (result of calling annotation_to_dict)
drops (list): list of integer indicies
"""
for _, entity in annotation['entities'].items():
entity['beg_org'] = entity['beg']
entity['end_org'] = entity['end']
for drop in drops:
drop_position, _ = drop
for _, entity in annotation['entities'].items():
if drop_position <= entity['beg_org']:
entity['beg'] -= 1
entity['end'] -= 1
elif drop_position <= entity['end_org']:
entity['end'] -= 1
def _add_characters(annotation, adds):
"""Update annotation boundaries based on given indices where a character was added.
Args:
annotation (dictionary): annotation dictionary (result of calling annotation_to_dict)
adds (list): list of integer indicies
"""
for _, entity in annotation['entities'].items():
entity['beg_org'] = entity['beg']
entity['end_org'] = entity['end']
for add in adds:
for _, entity in annotation['entities'].items():
if add <= entity['beg_org']:
entity['beg'] += 1
entity['end'] += 1
elif add < entity['end_org']:
entity['end'] += 1
def _replace_segments(annotation, replacements):
"""Update annotation based on string substitution at a specific position (general case)
Args:
annotation (dictionary): annotation dictionary (result of calling annotation_to_dict)
replacements (list): contains lists of [string, replacement, start_ind, end_ind]
"""
for drop in replacements:
drop_string, drop_repl, drop_start, drop_end = drop
drop_diff = len(drop_repl) - len(drop_string)
for _, entity in annotation['entities'].items():
if drop_end < entity['beg']:
entity['beg'] += drop_diff
entity['end'] += drop_diff
elif drop_start >= entity['end']:
pass
elif drop_start < entity['beg'] and drop_end <= entity['end']:
entity['beg'] = drop_start + 1
entity['end'] += drop_diff
elif drop_start >= entity['beg'] and drop_end > entity['end']:
entity['end'] = drop_end + drop_diff
elif drop_start < entity['beg'] and drop_end > entity['end']:
entity['beg'] = drop_start
entity['end'] = drop_end + drop_diff
elif drop_start >= entity['beg'] and drop_end <= entity['end']:
entity['end'] += drop_diff
else:
raise(RuntimeError("Unknown case occurred on {} {}-{} with replacement {}".format(drop_string, drop_start, drop_end, drop_repl)))
def _switch_characters(annotation, switches):
"""Adjust annotation boundaries to switching spans in the text
Args:
annotation (dictionary): annotation dictionary (result of calling annotation_to_dict)
switches ([type]): list of replacement span tuples [(b1, e1), (b2, e2)] with e1 <= b2
"""
for switch in switches:
for _, entity in annotation['entities'].items():
if entity['beg'] >= switch[0][0] and entity['end'] <= switch[0][1]:
entity['beg'] += (switch[1][1] - switch[1][0])
entity['end'] += (switch[1][1] - switch[1][0])
elif entity['beg'] >= switch[1][0] and entity['end'] <= switch[1][1]:
entity['beg'] -= (switch[0][1] - switch[0][0])
entity['end'] -= (switch[0][1] - switch[0][0])
elif ( entity['beg'] < switch[0][0] and entity['end'] > switch[0][0] ) or ( entity['beg'] < switch[0][1] and entity['end'] > switch[0][1] ) or ( entity['beg'] < switch[1][0] and entity['end'] > switch[1][0] ) or ( entity['beg'] < switch[1][1] and entity['end'] > switch[1][1] ):
print(RuntimeWarning("For {} switch and {} entity there is an overlap that cannot be handled.".format(switch, entity)))
def get_sentence_entities(beg, end, annotations):
"""Get annotation for each individual sentence and adjust indices to start from 0 for each sentence.
Args:
beg (int): begin index of sentence in text
end (int): end index of sentence in text
annotation (dictionary): annotation dictionary (result of calling annotation_to_dict)
Returns:
dictionary: entities
"""
entities = {}
for k, v in annotations['entities'].items():
if v['beg'] >= beg and v['end'] <= end + 1:
entities[k] = {
'label': v['label'],
'beg': v['beg'] - beg,
'end': v['end'] - beg,
'string': v['string']
}
elif v['beg'] <= end and v['end'] > end:
print(RuntimeWarning("Annotation span stretches over more than one sentence according to the sentence split: {} and {}".format(k, v)))
entities[k] = {
'label': v['label'],
'beg': v['beg'] - beg,
'end': end - 1 - beg,
'string': v['string'][:v['end']-end-1]
}
elif v['beg'] <= beg and v['end'] >= beg:
print(RuntimeWarning("Annotation span stretches over more than one sentence, ingoring the second part!"))
#print(annotations)
entities = {k: v for k, v in sorted(entities.items(), key=lambda item: item[1]['beg'])}
for idx, (k, v) in enumerate(entities.items()):
v['idx'] = idx
return entities
def get_sentence_relations(annotations, entities):
"""Get relations from annotation dictonary and combine it with information from the corresponding entities
Args:
annotations (dictionary): annotation dictionary (result of calling annotation_to_dict)
entities (dictionary): entity annotation (result of get_sentence_entities)
Returns:
dictionary: extracted and enhanced relations
"""
ann_keys = list(entities.keys())
relations = {}
for k, v in annotations['relations'].items():
if v['arg1'] in ann_keys and v['arg2'] in ann_keys:
relations[k] = {
'label': v['label'],
'arg1_old': v['arg1'],
'arg2_old': v['arg2'],
'arg1': entities[v['arg1']]['string'],
'arg2': entities[v['arg2']]['string'],
'pos1': entities[v['arg1']]['new_beg'] if 'new_beg' in entities[v['arg1']] else entities[v['arg1']]['beg'],
'pos2': entities[v['arg2']]['new_beg'] if 'new_beg' in entities[v['arg2']] else entities[v['arg2']]['beg'],
'ent1': entities[v['arg1']]['idx'],
'ent2': entities[v['arg2']]['idx']
}
elif (v['arg1'] in ann_keys and not v['arg2'] in ann_keys) or (v['arg2'] in ann_keys and not v['arg1'] in ann_keys):
pass
#print(RuntimeWarning("Relation {}: {} spans over two sentences".format(k, v)))
return relations
def bio_annotate(tokens, entities):
"""Create BIO annotation for tokens and entities.
Args:
tokens (list): sentence split in token strings
entities (dictionary): entity annotation for a given sentence
Returns:
list, list, list: adjusted tokens, token names, bio labels
"""
out_tokens = []
out_names = []
out_labels = []
offset = 0
new_offset = 0
space_before = True
for token in tokens:
if token.rstrip() and not space_before:
new_offset += 1
current_end = offset + len(token)
current_label = 'O'
token_name = 'O'
if token.rstrip():
for ann_key, ann in entities.items():
if offset == ann['beg']:
if current_label != 'O':
raise(RuntimeError("Multiple annotations for a span."))
current_label = 'B-{}'.format(ann['label'])
token_name = ann_key
ann['new_beg'] = new_offset
ann['new_end'] = new_offset + len(token)
elif offset > ann['beg'] and current_end <= ann['end']:
if current_label != 'O':
raise(RuntimeError("Multiple annotations for a span."))
current_label = 'I-{}'.format(ann['label'])
token_name = ann_key
ann['new_end'] = new_offset + len(token)
elif offset < ann['beg'] and current_end > ann['beg'] or offset < ann['end'] and current_end > ann['end']:
print(RuntimeWarning("Annotation does not match the token split, token: {}, entities: {}".format(token, entities)))
if out_labels[-1].startswith('B-') and out_labels[-1].split('-', maxsplit=1)[-1] == ann['label']:
print("Treating as I..")
current_label = 'I-{}'.format(ann['label'])
token_name = ann_key
ann['new_end'] = new_offset + len(token)
else:
print("Treating as B..")
current_label = 'B-{}'.format(ann['label'])
token_name = ann_key
ann['new_beg'] = new_offset
ann['new_end'] = new_offset + len(token)
else:
pass
out_names.append(token_name)
out_labels.append(current_label)
out_tokens.append(token)
offset = current_end
new_offset += len(token)
if not token.rstrip():
space_before = True
else:
space_before = False
return out_tokens, out_names, out_labels
def brat_to_bio(text, annotation, process_unicode=True, replace_math=True, correct=True, corr_cite=True):
"""Transform a document annotated in BRAT format into a sentence based BIO format that also considers relations.
Args:
text (string): plain text of the BRAT annotation (content of .txt file)
annotation (string): BRAT annotation (content of .ann file)
process_unicode (bool, optional): replace unicodes. Defaults to True.
replace_math (bool, optional): replace math equations. Defaults to True.
correct (bool, optional): replace string errors. Defaults to True.
corr_cite (bool, optional): correct citation errors. Defaults to True.
Returns:
list of dictionaries: sentences information for each sentence in text
"""
annotation_dict = annotation_to_dict(annotation)
if process_unicode:
text, replacements = encode_string.handle_unicode_characters(text)
_remove_characters(annotation_dict, replacements)
_adjust_strings(annotation_dict, text)
if replace_math:
text, replacements = corrections.remove_math_expr(text)
_replace_segments(annotation_dict, replacements)
_adjust_strings(annotation_dict, text)
if correct:
text, replacements = corrections.correct_with_index(text)
_add_characters(annotation_dict, replacements)
_adjust_strings(annotation_dict, text)
if corr_cite:
text, switched_segments = corrections.correct_citations(text)
_switch_characters(annotation_dict, switched_segments)
_adjust_strings(annotation_dict, text)
| |
= None, reply_markup: Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, ] = None, parse_mode: str = None, caption_entities: list = None, duration: int = None, performer: str = None, title: str = None, thumb: Union[types.InputFile, str, ] = None, disable_notification: bool = None, reply_to_message_id: int = None, allow_sending_without_reply: bool = None):
"""Use this method to send audio files, if you want Telegram clients to display them in the music player. Your audio must be in the .MP3 or .M4A format. On success, the sent Message is returned. Bots can currently send audio files of up to 50 MB in size, this limit may be changed in the future.
For sending voice messages, use the sendVoice method instead. [See Telegram API](https://core.telegram.org/bots/api#sendaudio)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier for the target chat or username of the target channel (in the format @channelusername)
- `audio` :`Union[types.InputFile,str,]` Audio file to send. Pass a file_id as String to send an audio file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an audio file from the Internet, or upload a new one using multipart/form-data. More info on Sending Files Β»
- `caption` :`str` Audio caption, 0-1024 characters after entities parsing
- `reply_markup` :`Union[types.InlineKeyboardMarkup,types.ReplyKeyboardMarkup,types.ReplyKeyboardRemove,types.ForceReply,]` Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
- `parse_mode` :`str` Mode for parsing entities in the audio caption. See formatting options for more details.
- `caption_entities` :`list` A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode
- `duration` :`int` Duration of the audio in seconds
- `performer` :`str` Performer
- `title` :`str` Track name
- `thumb` :`Union[types.InputFile,str,]` Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass βattach://<file_attach_name>β if the thumbnail was uploaded using multipart/form-data under <file_attach_name>. More info on Sending Files Β»
- `disable_notification` :`bool` Sends the message silently. Users will receive a notification with no sound.
- `reply_to_message_id` :`int` If the message is a reply, ID of the original message
- `allow_sending_without_reply` :`bool` Pass True, if the message should be sent even if the specified replied-to message is not found
**Returns:**
- A `tuple`, on success a `types.Message` as first member and a botApiResponse object as second member
"""
if parse_mode is None:
parse_mode = self.default_parse_mode
if disable_notification is None:
disable_notification = self.default_disable_notifications
data = {
"chat_id": chat_id,
"audio": helper.toDict(audio, True),
"caption": caption,
"parse_mode": parse_mode,
"caption_entities": caption_entities,
"duration": duration,
"performer": performer,
"title": title,
"thumb": helper.toDict(thumb, True),
"disable_notification": disable_notification,
"reply_to_message_id": reply_to_message_id,
"allow_sending_without_reply": allow_sending_without_reply,
"reply_markup": helper.toDict(reply_markup, True),
}
return self.response(self.sendRequest("sendAudio", data), types.Message)
def sendDocument(self, chat_id: Union[int, str, ], document: Union[types.InputFile, str, ], caption: str = None, reply_markup: Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, ] = None, parse_mode: str = None, thumb: Union[types.InputFile, str, ] = None, caption_entities: list = None, disable_content_type_detection: bool = None, disable_notification: bool = None, reply_to_message_id: int = None, allow_sending_without_reply: bool = None):
"""Use this method to send general files. On success, the sent Message is returned. Bots can currently send files of any type of up to 50 MB in size, this limit may be changed in the future. [See Telegram API](https://core.telegram.org/bots/api#senddocument)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier for the target chat or username of the target channel (in the format @channelusername)
- `document` :`Union[types.InputFile,str,]` File to send. Pass a file_id as String to send a file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a file from the Internet, or upload a new one using multipart/form-data. More info on Sending Files Β»
- `caption` :`str` Document caption (may also be used when resending documents by file_id), 0-1024 characters after entities parsing
- `reply_markup` :`Union[types.InlineKeyboardMarkup,types.ReplyKeyboardMarkup,types.ReplyKeyboardRemove,types.ForceReply,]` Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
- `parse_mode` :`str` Mode for parsing entities in the document caption. See formatting options for more details.
- `thumb` :`Union[types.InputFile,str,]` Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass βattach://<file_attach_name>β if the thumbnail was uploaded using multipart/form-data under <file_attach_name>. More info on Sending Files Β»
- `caption_entities` :`list` A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode
- `disable_content_type_detection` :`bool` Disables automatic server-side content type detection for files uploaded using multipart/form-data
- `disable_notification` :`bool` Sends the message silently. Users will receive a notification with no sound.
- `reply_to_message_id` :`int` If the message is a reply, ID of the original message
- `allow_sending_without_reply` :`bool` Pass True, if the message should be sent even if the specified replied-to message is not found
**Returns:**
- A `tuple`, on success a `types.Message` as first member and a botApiResponse object as second member
"""
if parse_mode is None:
parse_mode = self.default_parse_mode
if disable_notification is None:
disable_notification = self.default_disable_notifications
data = {
"chat_id": chat_id,
"document": helper.toDict(document, True),
"thumb": helper.toDict(thumb, True),
"caption": caption,
"parse_mode": parse_mode,
"caption_entities": caption_entities,
"disable_content_type_detection": disable_content_type_detection,
"disable_notification": disable_notification,
"reply_to_message_id": reply_to_message_id,
"allow_sending_without_reply": allow_sending_without_reply,
"reply_markup": helper.toDict(reply_markup, True),
}
return self.response(self.sendRequest("sendDocument", data), types.Message)
def sendVideo(self, chat_id: Union[int, str, ], video: Union[types.InputFile, str, ], caption: str = None, reply_markup: Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, ] = None, parse_mode: str = None, duration: int = None, width: int = None, height: int = None, thumb: Union[types.InputFile, str, ] = None, caption_entities: list = None, supports_streaming: bool = None, disable_notification: bool = None, reply_to_message_id: int = None, allow_sending_without_reply: bool = None):
"""Use this method to send video files, Telegram clients support mp4 videos (other formats may be sent as Document). On success, the sent Message is returned. Bots can currently send video files of up to 50 MB in size, this limit may be changed in the future. [See Telegram API](https://core.telegram.org/bots/api#sendvideo)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier for the target chat or username of the target channel (in the format @channelusername)
- `video` :`Union[types.InputFile,str,]` Video to send. Pass a file_id as String to send a video that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a video from the Internet, or upload a new video using multipart/form-data. More info on Sending Files Β»
- `caption` :`str` Video caption (may also be used when resending videos by file_id), 0-1024 characters after entities parsing
- `reply_markup` :`Union[types.InlineKeyboardMarkup,types.ReplyKeyboardMarkup,types.ReplyKeyboardRemove,types.ForceReply,]` Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
- `parse_mode` :`str` Mode for parsing entities in the video caption. See formatting options for more details.
- `duration` :`int` Duration of sent video in seconds
- `width` :`int` Video width
- `height` :`int` Video height
- `thumb` :`Union[types.InputFile,str,]` Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. | |
self._validate_dimensions(labels, outputs)
labels = self.binarize(labels, self.label_thresh)
outputs = self.binarize(outputs, self.output_thresh)
# Calculate and sum each intersections and unions
# zero if Truth=0 or Prediction=0
intersection_sum = (outputs & labels).float().sum()
union_sum = (outputs | labels).float().sum() # zero if both are 0
# Smoothed division to avoid division by 0:
set_iou_tensor = self.smooth_division(intersection_sum, union_sum)
return set_iou_tensor
class IoU(AbstractIoUMetric):
r"""Calc sample-wise intersection over union (IoU) values output batch.
The intersection over union for one instance calculates as
.. math::
\frac{intersection}{union} = \frac{TP} {(TP + TN + FP + FN)}
with
- FP / TP: false / true positives,
i.e. in- / correctly predicted foreground pixels
- FN / TN: false / true positives,
i.e. in- / correctly predicted background pixels
The following tensor dimensions are allowed:
- 1D: The tensor is assumed to be 1D without batch dimension.
- 2D: The tensor is assumed to be 2D without batch dimension.
- >2D: The tensor is assumed to be 2D with batch dimension 0,
width dim. -1, height dim. -2.
"""
def __init__(
self,
reduction: Union[
BatchReduction, Callable[[torch.Tensor], torch.Tensor]
] = BatchReduction.mean,
output_thresh: float = 0.5, label_thresh: float = 0.,
smooth: float = 1e-6):
"""Init.
:param reduction: reduction method to aggregate the instance-wise
results of the batch;
must be a callable on a tensor which reduces the 0th dimension;
see BatchReduction instances for examples
:param output_thresh: threshold for binarizing the output
:param label_thresh: threshold for binarizing the labels
:param smooth: summand to smooth the IoU value (evade division by 0)
"""
super(IoU, self).__init__(output_thresh=output_thresh,
label_thresh=label_thresh,
smooth=smooth)
self.reduction: Union[
BatchReduction,
Callable[[torch.Tensor], torch.Tensor]
] = reduction
"""Reduction method to aggregate the instance-wise results of the
batch into one value."""
def forward(self, outputs: torch.Tensor,
labels: torch.Tensor) -> torch.Tensor:
"""Sample-wise reduced IoU between binarized in- and output.
Applied reduction is :py:attr:`reduction`.
:param outputs: Output tensors of shape ``(BATCH x H x W)``;
values must be in [0, 1], and a pixel value > output_thresh means
it is foreground
:param labels: Label tensors of shape ``(BATCH x H x W)``;
values must be in [0, 1], and a pixel value > label_thresh means
it is foreground
:return: tensor containing IoU for each sample along axis 0 reduced
by reduction scheme
"""
# Validate, binarize and turn into integer tensors:
self._validate_dimensions(labels, outputs)
labels = self.binarize(labels, self.label_thresh)
outputs = self.binarize(outputs, self.output_thresh)
# Get axes that describe width (and height), i.e. 2D or 1D areas to
# test on IoU
area_axes = self.get_area_axes(outputs)
# Calculate IoU per sample
# intersections for each area:
intersections = (outputs & labels).float().sum(area_axes)
# unions for each area:
unions = (outputs | labels).float().sum(area_axes)
# smoothed set IoU for each area:
ious = self.smooth_division(intersections, unions)
return self.reduction(ious)
@property
def settings(self):
"""Settings dict for reproduction of instance."""
return dict(**super(IoU, self).settings, reduction=self.reduction)
class AbstractIoULoss(AbstractIoULike):
"""Shared settings for intersection over union based losses.
The difference to IoU based metrics is that only the targets are binarized,
not the outputs.
Thus, the function on the DNN outputs stays smoothly differentiable.
"""
def __init__(self,
reduction: Union[
BatchReduction, Callable[[torch.Tensor], torch.Tensor]
] = BatchReduction.mean,
target_thresh: float = 0.):
"""Init.
:param target_thresh: threshold to binarize targets
:param reduction: reduction method to aggregate the instance-wise
results of the batch;
must be a callable on a tensor which reduces the 0th dimension;
for examples see BatchReduction instances
"""
super(AbstractIoULoss, self).__init__()
self.target_thresh: float = target_thresh
"""Threshold to binarize the targets."""
self.reduction: Union[BatchReduction,
Callable[[torch.Tensor], torch.Tensor]] = \
reduction
"""Reduction method to aggregate the instance-wise results of the batch.
"""
@property
def settings(self) -> Dict[str, Any]:
"""Settings dict to reproduce instance."""
return dict(target_thresh=self.target_thresh, reduction=self.reduction)
@abc.abstractmethod
def forward(self, *inp: Any, **kwargs: Any) -> Any:
"""Loss function definition in sub-classes."""
raise NotImplementedError()
class TverskyLoss(AbstractIoULoss): # TODO: tests
# noinspection SpellCheckingInspection
r"""Calc Tversky loss (balanced Dice loss) for given outputs amd targets.
The Tversky loss [Salehi2017]_ works on masks of prediction and ground
truth (gt) indicating the foreground (fg) area.
The masks may be binary, non-binary or mixed.
The target masks are binarized.
Given a balancing factor b, the loss is calculated for one instance as
.. math::
:label: tversky
\text{Tversky} = \frac{TP} {(TP + b\cdot FP + (1-b) \cdot FN)}
with
- TP: true positives,
respectively the intersection of predicted fg area and gt fg area
- FP: false positives,
respectively the predicted fg area minus the gt fg area
For b=0.5 this is regular Dice loss.
The following tensor dimensions are allowed:
- 1D: The tensor is assumed to be 1D without batch dimension.
- 2D: The tensor is assumed to be 2D without batch dimension.
- >2D: The tensor is assumed to be 2D with batch dimension 0,
width dim. -1, height dim. -2.
.. [Salehi2017] <NAME>, <NAME>, and <NAME>.
Tversky loss function for image segmentation using 3D fully
convolutional deep networks, 2017.
https://arxiv.org/abs/1706.05721
"""
def __init__(self,
factor_false_positives: float = 0.7,
reduction: Union[
BatchReduction, Callable[[torch.Tensor], torch.Tensor]
] = BatchReduction.mean,
target_thresh: float = 0.):
"""Init.
:param target_thresh: threshold to binarize targets
:param factor_false_positives: factor in [0,1] applied to the false
positives (see Tversky loss formula :math:numref:`tversky`)
:param reduction: reduction method to aggregate the instance-wise
results of the batch;
must be a callable on a tensor which reduces the 0th dimension;
for examples see instances of
:py:class:`~hybrid_learning.concepts.kpis.BatchReduction`.
"""
# Value check:
if not 0 <= factor_false_positives <= 1:
raise ValueError(("factor_false_positives must be in [0,1] but "
"was {}").format(factor_false_positives))
super(TverskyLoss, self).__init__(target_thresh=target_thresh,
reduction=reduction)
self.factor_false_positives: float = factor_false_positives
"""Factor applied to the false positives"""
@property
def settings(self) -> Dict[str, Any]:
"""Settings to reproduce the instance."""
return dict(factor_false_positives=self.factor_false_positives,
**super(TverskyLoss, self).settings)
def forward(self, outputs: torch.Tensor,
targets: torch.Tensor) -> torch.Tensor:
"""Tversky loss :math:numref:`tversky` calculation.
:param outputs: input tensor (at least 1D); items must be floats
in the range [0,1]
:param targets: targets to compare outputs with (at least 1D;
same dimension as input)
:return: aggregated Tversky loss :math:numref:`tversky` of outputs
for given targets
"""
# Validate dimensions and binarize targets:
self._validate_dimensions(outputs, targets)
targets: torch.Tensor = self.binarize(targets, self.target_thresh)
# Get axes to work on (i.e. 2D or 1D areas to test on IoU)
area_axes: Tuple[int] = self.get_area_axes(outputs)
# Calculate Tversky loss
factor_false_negatives = 1.0 - self.factor_false_positives
true_pos = (targets * outputs).sum(area_axes)
false_pos = (- (targets - 1) * outputs).sum(area_axes)
false_neg = (- targets * (outputs - 1)).sum(area_axes)
tversky = (true_pos / (true_pos +
self.factor_false_positives * false_pos +
factor_false_negatives * false_neg))
loss = - tversky + 1
# reduction
loss = self.reduction(loss)
return loss
class Net2VecLoss(AbstractIoULoss): # TODO: tests
# noinspection SpellCheckingInspection,SpellCheckingInspection
r"""Simplified intersection over union as loss.
This loss is the one used for the
`original implementation <https://github.com/ruthcfong/net2vec>`_ of the
Net2Vec framework [Fong2018]_
*(even though this is a rewrite and no code is used from there)*.
It works on masks of prediction and ground truth (gt) indicating the
foreground (fg) area.
The masks may be binary, non-binary or mixed.
The target masks are binarized.
Given For an instance, it calculates as
.. math::
:label: net2vec
\text{Net2Vec}(instance) = b \cdot TP + (1-b) \cdot TN
with
- TP: true positives, resp. the intersection of predicted fg area and
gt fg area
- TN: true negatives, resp. the intersection of predicted background (bg)
area and gt bg area
The following tensor dimensions are allowed:
- 1D: The tensor is assumed to be 1D without batch dimension.
- 2D: The tensor is assumed to be 2D without batch dimension.
- >2D: The tensor is assumed to be 2D with batch dimension 0,
width dim. -1, height dim. -2.
.. [Fong2018] <NAME> and <NAME>, βNet2Vec: Quantifying and explaining
how concepts are encoded by filters in deep neural networksβ
in Proc. 2018 IEEE Conf. Comput. Vision and Pattern Recognition,
Salt Lake City, UT, USA, 2018, pp. 8730β8738,
https://arxiv.org/abs/1801.03454
"""
def __init__(
self,
factor_pos_class: float = 0.5,
reduction: Union[
BatchReduction, Callable[[torch.Tensor], torch.Tensor]
] = BatchReduction.mean,
target_thresh: float = 0.):
"""Init.
:param target_thresh: threshold to binarize targets
:param factor_pos_class: balancing factor :math:`b` | |
{"gene1"},
{"gene1", "featgrp1"},
{},
),
)
def test_query_by_identifiers(self, ids):
obj = self.annot.to_annotation_collection()
r = obj.query_by_feature_identifiers(ids)
if r.is_empty:
assert len(ids) == 0
else:
assert {x.gene_id for x in r.genes} | {x.feature_collection_id for x in r.feature_collections} == ids
@pytest.mark.parametrize(
"i",
(
"gene1",
"featgrp1",
),
)
def test_query_by_identifiers_str(self, i):
obj = self.annot.to_annotation_collection()
r = obj.query_by_feature_identifiers(i)
assert {x.gene_id for x in r.genes} | {x.feature_collection_id for x in r.feature_collections} == {i}
def test_query_by_identifiers_with_extraneous(self):
obj = self.annot.to_annotation_collection()
r = obj.query_by_feature_identifiers(["gene1", "abc"])
assert len(r.genes) == 1 and r.genes[0].gene_id == "gene1"
def test_hierarchical_children_guids(self):
obj = self.annot.to_annotation_collection()
assert obj.hierarchical_children_guids == {
UUID("c0596ed0-9583-d323-13b1-593e0b674414"): {
UUID("6fc905fb-4221-0283-adbe-d37981818699"),
UUID("d102e6e0-4f81-df14-1e07-4a09a2a6fa60"),
},
UUID("cb4d2cd9-25b6-f7c5-8eb5-9faf58e64bd0"): {
UUID("88ffa8c0-1761-5b2a-8468-69a7ecfa1265"),
UUID("f0a1f091-8e54-3f2a-812d-029481ae22fc"),
},
UUID("ab3404f4-63a3-be08-362c-28ea7ed56edb"): {UUID("f5c3cdbc-ee03-7bf9-b726-606b28778299")},
}
def test_query_by_interval_guids(self):
obj = self.annot.to_annotation_collection()
# only one isoform of gene1
a = obj.query_by_interval_guids(UUID("6fc905fb-4221-0283-adbe-d37981818699"))
assert len(a.genes) == 1 and a.genes[0].identifiers == {"gene1"} and len(a.genes[0].transcripts) == 1
# both isoforms of gene1
b = obj.query_by_interval_guids(
[UUID("6fc905fb-4221-0283-adbe-d37981818699"), UUID("d102e6e0-4f81-df14-1e07-4a09a2a6fa60")]
)
assert len(b.genes) == 1 and b.genes[0].identifiers == {"gene1"} and len(b.genes[0].transcripts) == 2
# one isoform of gene1 and one isoform of featgrp2
c = obj.query_by_interval_guids(
[UUID("6fc905fb-4221-0283-adbe-d37981818699"), UUID("f5c3cdbc-ee03-7bf9-b726-606b28778299")]
)
assert len(c.genes) == 1 and c.genes[0].identifiers == {"gene1"} and len(c.genes[0].transcripts) == 1
assert len(c.feature_collections) == 1 and c.feature_collections[0].identifiers == {"featgrp2"}
def test_interval_guids_to_collections(self):
obj = self.annot.to_annotation_collection()
m = obj.interval_guids_to_collections
m = {key: val.to_dict() for key, val in m.items()}
assert m == {
UUID("6fc905fb-4221-0283-adbe-d37981818699"): {
"transcripts": [
{
"exon_starts": [12],
"exon_ends": [28],
"strand": "PLUS",
"cds_starts": [15],
"cds_ends": [19],
"cds_frames": ["ZERO"],
"qualifiers": None,
"is_primary_tx": None,
"transcript_id": None,
"transcript_symbol": "tx1",
"transcript_type": None,
"sequence_name": None,
"sequence_guid": None,
"protein_id": None,
"product": None,
"transcript_guid": None,
"transcript_interval_guid": UUID("6fc905fb-4221-0283-adbe-d37981818699"),
},
{
"exon_starts": [12, 17, 22],
"exon_ends": [16, 20, 25],
"strand": "PLUS",
"cds_starts": [14, 17, 22],
"cds_ends": [16, 20, 23],
"cds_frames": ["ZERO", "TWO", "TWO"],
"qualifiers": None,
"is_primary_tx": None,
"transcript_id": None,
"transcript_symbol": "tx2",
"transcript_type": None,
"sequence_name": None,
"sequence_guid": None,
"protein_id": None,
"product": None,
"transcript_guid": None,
"transcript_interval_guid": UUID("d102e6e0-4f81-df14-1e07-4a09a2a6fa60"),
},
],
"gene_id": "gene1",
"gene_symbol": None,
"gene_type": None,
"locus_tag": None,
"qualifiers": None,
"sequence_name": None,
"sequence_guid": None,
"gene_guid": UUID("c0596ed0-9583-d323-13b1-593e0b674414"),
},
UUID("d102e6e0-4f81-df14-1e07-4a09a2a6fa60"): {
"transcripts": [
{
"exon_starts": [12],
"exon_ends": [28],
"strand": "PLUS",
"cds_starts": [15],
"cds_ends": [19],
"cds_frames": ["ZERO"],
"qualifiers": None,
"is_primary_tx": None,
"transcript_id": None,
"transcript_symbol": "tx1",
"transcript_type": None,
"sequence_name": None,
"sequence_guid": None,
"protein_id": None,
"product": None,
"transcript_guid": None,
"transcript_interval_guid": UUID("6fc905fb-4221-0283-adbe-d37981818699"),
},
{
"exon_starts": [12, 17, 22],
"exon_ends": [16, 20, 25],
"strand": "PLUS",
"cds_starts": [14, 17, 22],
"cds_ends": [16, 20, 23],
"cds_frames": ["ZERO", "TWO", "TWO"],
"qualifiers": None,
"is_primary_tx": None,
"transcript_id": None,
"transcript_symbol": "tx2",
"transcript_type": None,
"sequence_name": None,
"sequence_guid": None,
"protein_id": None,
"product": None,
"transcript_guid": None,
"transcript_interval_guid": UUID("d102e6e0-4f81-df14-1e07-4a09a2a6fa60"),
},
],
"gene_id": "gene1",
"gene_symbol": None,
"gene_type": None,
"locus_tag": None,
"qualifiers": None,
"sequence_name": None,
"sequence_guid": None,
"gene_guid": UUID("c0596ed0-9583-d323-13b1-593e0b674414"),
},
UUID("88ffa8c0-1761-5b2a-8468-69a7ecfa1265"): {
"feature_intervals": [
{
"interval_starts": [12],
"interval_ends": [15],
"strand": "PLUS",
"qualifiers": None,
"feature_id": None,
"feature_name": "feat1",
"feature_types": ["a", "b"],
"sequence_name": None,
"sequence_guid": None,
"feature_interval_guid": UUID("88ffa8c0-1761-5b2a-8468-69a7ecfa1265"),
"feature_guid": None,
"is_primary_feature": None,
},
{
"interval_starts": [12, 17, 22],
"interval_ends": [16, 20, 25],
"strand": "PLUS",
"qualifiers": None,
"feature_id": None,
"feature_name": "feat2",
"feature_types": ["b"],
"sequence_name": None,
"sequence_guid": None,
"feature_interval_guid": UUID("f0a1f091-8e54-3f2a-812d-029481ae22fc"),
"feature_guid": None,
"is_primary_feature": None,
},
],
"feature_collection_name": None,
"feature_collection_id": "featgrp1",
"feature_collection_type": None,
"locus_tag": None,
"qualifiers": None,
"sequence_name": None,
"sequence_guid": None,
"feature_collection_guid": UUID("cb4d2cd9-25b6-f7c5-8eb5-9faf58e64bd0"),
},
UUID("f0a1f091-8e54-3f2a-812d-029481ae22fc"): {
"feature_intervals": [
{
"interval_starts": [12],
"interval_ends": [15],
"strand": "PLUS",
"qualifiers": None,
"feature_id": None,
"feature_name": "feat1",
"feature_types": ["a", "b"],
"sequence_name": None,
"sequence_guid": None,
"feature_interval_guid": UUID("88ffa8c0-1761-5b2a-8468-69a7ecfa1265"),
"feature_guid": None,
"is_primary_feature": None,
},
{
"interval_starts": [12, 17, 22],
"interval_ends": [16, 20, 25],
"strand": "PLUS",
"qualifiers": None,
"feature_id": None,
"feature_name": "feat2",
"feature_types": ["b"],
"sequence_name": None,
"sequence_guid": None,
"feature_interval_guid": UUID("f0a1f091-8e54-3f2a-812d-029481ae22fc"),
"feature_guid": None,
"is_primary_feature": None,
},
],
"feature_collection_name": None,
"feature_collection_id": "featgrp1",
"feature_collection_type": None,
"locus_tag": None,
"qualifiers": None,
"sequence_name": None,
"sequence_guid": None,
"feature_collection_guid": UUID("cb4d2cd9-25b6-f7c5-8eb5-9faf58e64bd0"),
},
UUID("f5c3cdbc-ee03-7bf9-b726-606b28778299"): {
"feature_intervals": [
{
"interval_starts": [35],
"interval_ends": [40],
"strand": "MINUS",
"qualifiers": None,
"feature_id": None,
"feature_name": "feat3",
"feature_types": ["a"],
"sequence_name": None,
"sequence_guid": None,
"feature_interval_guid": UUID("f5c3cdbc-ee03-7bf9-b726-606b28778299"),
"feature_guid": None,
"is_primary_feature": None,
}
],
"feature_collection_name": None,
"feature_collection_id": "featgrp2",
"feature_collection_type": None,
"locus_tag": None,
"qualifiers": None,
"sequence_name": None,
"sequence_guid": None,
"feature_collection_guid": UUID("ab3404f4-63a3-be08-362c-28ea7ed56edb"),
},
}
def test_extract_sequence(self):
obj = self.annot.to_annotation_collection(parent_or_seq_chunk_parent=parent_genome)
seq = obj.get_reference_sequence()
assert str(seq) == genome[2:40]
# with inferred range, now sequence is cut down based on bounds of transcripts
obj = self.annot_no_range.to_annotation_collection(parent_or_seq_chunk_parent=parent_genome)
seq = obj.get_reference_sequence()
assert str(seq) == genome[12:40]
# however, if we use a parent whose location is provided, then we retain more information
obj = self.annot_no_range.to_annotation_collection(parent_or_seq_chunk_parent=parent_genome_with_location)
seq = obj.get_reference_sequence()
assert str(seq) == genome
def test_query_by_ids(self):
obj = self.annot.to_annotation_collection()
my_ids = list(obj.hierarchical_children_guids.keys())
# query them all
assert obj.query_by_guids(my_ids).children_guids == set(my_ids)
# query one
assert obj.query_by_guids([my_ids[0]]).children_guids == {my_ids[0]}
# query none
assert obj.query_by_guids([]).children_guids == set()
def test_gff3_export(self, test_data_dir):
obj = self.annot.to_annotation_collection()
# populate sequence names; normally this is done via the model constructors
obj.sequence_name = "chr1"
for item in obj:
item.sequence_name = "chr1"
for subitem in item:
subitem.sequence_name = "chr1"
if hasattr(subitem, "cds"):
subitem.cds.sequence_name = "chr1"
with open(test_data_dir / "collection_gff3_export_chromosome_coordinates.gff") as fh:
assert fh.read() == "\n".join(str(x) for x in obj.to_gff())
def test_gff3_export_chunk_relative(self, test_data_dir):
obj = self.annot.to_annotation_collection(parent_genome_10_49)
# populate sequence names; normally this is done via the model constructors
obj.sequence_name = "chr1"
for item in obj:
item.sequence_name = "chr1"
for subitem in item:
subitem.sequence_name = "chr1"
if hasattr(subitem, "cds"):
subitem.cds.sequence_name = "chr1"
with open(test_data_dir / "collection_gff3_export_chunk_relative.gff") as fh:
assert fh.read() == "\n".join(str(x) for x in obj.to_gff(chromosome_relative_coordinates=False))
def test_gff3_export_exception(self, test_data_dir):
"""Cannot export to GFF3 in relative coordinates without having sequence."""
obj = self.annot.to_annotation_collection()
obj.sequence_name = "chr1"
for item in obj:
item.sequence_name = "chr1"
for subitem in item:
subitem.sequence_name = "chr1"
with pytest.raises(NoSuchAncestorException):
_ = "\n".join(str(x) for x in obj.to_gff(chromosome_relative_coordinates=False))
def test_reset_parent_noop(self):
obj = self.annot.to_annotation_collection()
# no-op
obj._reset_parent()
# equivalent
obj._reset_parent(None)
def test_reset_parent_null(self):
obj = self.annot.to_annotation_collection(parent_genome)
for child in obj:
assert child._location.parent
obj._reset_parent()
for child in obj:
assert not child._location.parent
def test_reset_parent(self):
obj = self.annot.to_annotation_collection(parent_genome)
obj2 = obj.query_by_position(20, 40)
obj2._reset_parent(parent_genome)
# the coordinates are now broken, so the sequences are wrong
for rec in obj2:
orig_rec = next(obj.query_by_guids([rec.guid]).__iter__()).feature_intervals[0]
assert orig_rec.get_spliced_sequence() != rec.feature_intervals[0].get_spliced_sequence()
def test_iterator(self):
obj = self.annot.to_annotation_collection(parent_genome)
assert list(obj.iter_children()) == list(obj)
assert len(list(obj)) == 3
def test_nonstandard_parents(self):
obj0 = self.annot.to_annotation_collection(parent_genome)
obj1 = self.annot.to_annotation_collection(parent_no_seq)
obj2 = self.annot.to_annotation_collection(parent_nonstandard_type)
obj3 = self.annot.to_annotation_collection(parent_nonstandard_type_with_sequence)
with pytest.raises(NullSequenceException):
_ = obj1.get_reference_sequence()
with pytest.raises(NullSequenceException):
_ = obj2.get_reference_sequence()
assert obj0.get_reference_sequence() == obj3.get_reference_sequence()
assert obj0.chromosome_location == obj0.chunk_relative_location
assert obj1.chromosome_location == obj1.chunk_relative_location
# not the same because of the non-standard parent
assert obj2.chromosome_location != obj2.chunk_relative_location
assert obj2._chunk_relative_bounded_chromosome_location == obj2.chunk_relative_location
assert obj3.chromosome_location != obj3.chunk_relative_location
assert obj3._chunk_relative_bounded_chromosome_location == obj3.chunk_relative_location
# OTOH, this is not the same
obj4 = self.annot.to_annotation_collection(parent_genome_10_49)
assert obj4.chromosome_location != obj4.chunk_relative_location
def test_lift_to_new_coordinates(self):
obj0 = self.annot.to_annotation_collection(parent_genome)
obj1 = obj0.liftover_to_parent_or_seq_chunk_parent(parent_genome_10_49)
assert str(obj1.get_reference_sequence()) in str(obj0.get_reference_sequence())
assert obj1.start == 2 and obj1.end == 40
for gene in obj1:
orig_gene = next(obj0.query_by_feature_identifiers(gene.identifiers).iter_children())
orig_tx_or_feat = next(orig_gene.iter_children())
tx_or_feat = next(gene.iter_children())
assert str(orig_tx_or_feat.get_spliced_sequence()) == str(tx_or_feat.get_spliced_sequence())
def test_query_by_identifiers_subset(self):
model = {
"feature_collections": [],
"genes": [
{
"transcripts": [
{
"exon_starts": [2971596],
"exon_ends": [2972637],
"strand": "PLUS",
"cds_starts": [2971596],
"cds_ends": [2972637],
"cds_frames": ["ZERO"],
"qualifiers": {
"gene": ["tas"],
"locus_tag": ["b2834"],
"gene_synonym": ["ECK2830; JW2802; ygdS"],
"function": ["putative enzyme; Not classified"],
"codon_start": ["1"],
"transl_table": ["11"],
"product": ["putative NADP(H)-dependent aldo-keto reductase"],
"protein_id": ["NP_417311.1"],
"db_xref": [
"ASAP:ABE-0009298",
"EcoGene:EG13093",
"GeneID:947306",
"UniProtKB/Swiss-Prot:P0A9T4",
],
"translation": [
"MQYHRIPHSSLEVSTLGLGTMTFGEQNSEADAHAQLDYAVAQGINLIDVAEMYPVPPRPETQGLTETYVGNWLAKHGSREKLIIASKVSGPSRNNDKGIRPDQALDRKNIREALHDSLKRLQTDYLDLYQVHWPQRPTNCFGKLGYSWTDSAPAVSLLDTLDALAEYQRAGKIRYIGVSNETAFGVMRYLHLADKHDLPRIVTIQNPYSLLNRSFEVGLAEVSQYEGVELLAYSCLGFGTLTGKYLNGAKPAGARNTLFSRFTRYSGEQTQKAVAAYVDIARRHGLDPAQMALAFVRRQPFVASTLLGATTMDQLKTNIESLHLELSEDVLAEIEAVHQVYTYPAP" # noqa: E501
],
},
"is_primary_tx": False,
"transcript_id": None,
"protein_id": "NP_417311.1",
"product": "putative NADP(H)-dependent aldo-keto reductase",
"transcript_symbol": "tas",
"transcript_type": "protein_coding",
"sequence_name": "NC_000913.3",
"sequence_guid": None,
"transcript_interval_guid": "cdbed83f-cc17-b945-53df-620890e7e867",
"transcript_guid": None,
}
],
"gene_id": None,
"gene_symbol": "tas",
"gene_type": "protein_coding",
"locus_tag": "b2834",
"qualifiers": {
"gene": ["tas"],
"locus_tag": ["b2834"],
"gene_synonym": ["ECK2830; JW2802; ygdS"],
"db_xref": ["EcoGene:EG13093", "GeneID:947306"],
},
"sequence_name": "NC_000913.3",
"sequence_guid": None,
"gene_guid": "6b729a41-3316-6cc8-ad99-db1597e4c68a",
},
{
"transcripts": [
{
"exon_starts": [2972668],
"exon_ends": [2973862],
"strand": "MINUS",
"cds_starts": [2972668],
"cds_ends": [2973862],
"cds_frames": ["ZERO"],
"qualifiers": {
"gene": ["lplT"],
"locus_tag": ["b2835"],
"gene_synonym": ["ECK2831; JW2803; ygeD"],
"function": ["orf; Drug/analog sensitivity"],
"GO_process": ["GO:0042493 - response to drug"],
"note": ["putative resistance proteins"],
"codon_start": ["1"],
"transl_table": ["11"],
"product": ["lysophospholipid transporter"],
"protein_id": ["NP_417312.1"],
"db_xref": [
"ASAP:ABE-0009300",
"EcoGene:EG12455",
"GeneID:947317",
"UniProtKB/Swiss-Prot:P39196",
],
"translation": [
"MSESVHTNTSLWSKGMKAVIVAQFLSAFGDNALLFATLALLKAQFYPEWSQPILQMVFVGAYILFAPFVGQVADSFAKGRVMMFANGLKLLGAASICFGINPFLGYTLVGVGAAAYSPAKYGILGELTTGSKLVKANGLMEASTIAAILLGSVAGGVLADWHVLVALAACALAYGGAVVANIYIPKLAAARPGQSWNLINMTRSFLNACTSLWRNGETRFSLVGTSLFWGAGVTLRFLLVLWVPVALGITDNATPTYLNAMVAIGIVVGAGAAAKLVTLETVSRCMPAGILIGVVVLIFSLQHELLPAYALLMLIGVMGGFFVVPLNALLQERGKKSVGAGNAIAVQNLGENSAMLLMLGIYSLAVMIGIPVVPIGIGFGALFALAITALWIWQRRH" # noqa: E501
],
},
"is_primary_tx": False,
"transcript_id": None,
"protein_id": "NP_417312.1",
"product": "lysophospholipid transporter",
"transcript_symbol": "lplT",
"transcript_type": "protein_coding",
"sequence_name": "NC_000913.3",
"sequence_guid": None,
"transcript_interval_guid": "91e4286f-9757-2bb9-1b50-2a42a9c668f0",
"transcript_guid": None,
}
],
"gene_id": None,
"gene_symbol": "lplT",
"gene_type": "protein_coding",
"locus_tag": "b2835",
"qualifiers": {
"gene": ["lplT"],
"locus_tag": ["b2835"],
"gene_synonym": ["ECK2831; | |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import pickle
import tensorflow as tf
import keras
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, learning_curve, GridSearchCV, StratifiedKFold
from sklearn.metrics import roc_curve, confusion_matrix, balanced_accuracy_score,make_scorer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression, LinearRegression, ElasticNet
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier, RandomForestRegressor
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC, SVR
from sklearn.gaussian_process.kernels import RBF, DotProduct
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from keras.layers.core import Dense, Dropout, Activation
from keras.models import Sequential
import xgboost as xgb
import multiprocessing as mp
import tqdm
import sys, os
import time
from datetime import datetime
sns.set()
def _dropSparseCol(df,thresh):
"""
Return DataFrame with subset of columns that a proportion of nonNaN entries to total entries above `thresh`
@param thresh: float defining the threshold for the proportion of filled values in a column.
Keep all columns above this values
@param df: pandas DataFrame
"""
isNotSparse = (df.describe().loc[ 'count' ,:].values)/len(df.index) >= thresh
df_dropCol = df.iloc[:,isNotSparse]
return df_dropCol
def _getResponseCol(df,cols,m=2):
"""
Return np array with boolean values indicating the class depending on the `cols` param
`options` defines the methods used:
1. 'use2D': take the element-wise logical or of v1 ... vn with n:=len(cols)
Let vi be be a boolean array where each element represents whether that observation is outside the
2 standard deviations of of the mean of that col
@param df: pandas DataFrame
@param cols: list of column names for `df` deliminating what columns to create a classification response from. Here, we expect WET cols
"""
options = {'use2SD':True}
for opt in options:
if opt == 'use2SD' and options[opt]:
prev = pd.Series([False]*df.shape[0])
for col in cols:
prev = np.logical_or(prev.values, np.abs(df[col] - np.mean(df[col])) > m * np.std(df[col]))
return prev
def _removeSmallSE(df, thresh):
"""
Return a DataFrame that drops columns whose mean/sd is below threshold (Not exactly SE, but close enough)
@param df: pandas DataFrame
@param thresh: float defining the threshold for the proportion of filled values in a column.
Keep all columns above this values
"""
colsToRemove =[]
counter=0
for col in df.columns.values:
try:
colSE = np.mean(df[col])/np.std(df[col])
except:
# Catch a divide by zero Exception
colsToRemove.append(col)
if np.abs(colSE) <= thresh:
colsToRemove.append(col)
if colsToRemove != []:
return df.drop(columns=colsToRemove)
else:
return df
def runPCA(data):
"""
Return numpy array of transform PCA `data` and the associated sklearn model
The reduced dimension taken will be the first k whose explained variance is above 1
@param data: pandas DataFrame
"""
print('Running PCA')
pcaModel = PCA()
pcaModel.fit(data)
cummalVarRatio = [np.sum(pcaModel.explained_variance_ratio_[:i+1]) for i,_ in enumerate(pcaModel.explained_variance_ratio_)]
eigenvalues = pcaModel.explained_variance_
kProp = np.argmin(pcaModel.explained_variance_ >= 1)
# kProp = np.argmax(np.array(cummalVarRatio) > proportion) MAKE SURE TO SET PROPORTION
print('PCA reduced to {}. Original shape: {}'.format(str(kProp), str(data.shape)))
pcaProportionModel = PCA(kProp)
pcaProportionModel.fit(data)
return pcaProportionModel.transform(data), pcaProportionModel
def writeToLogFile(CURRENT_MODEL_FOLDER,s):
"""
Write str `s` or list `s` to `CURRENT_MODEL_FOLDER` + logs.txt
Also adds timestamp
@param CURRENT_MODEL_FOLDER: file path to current train folder
@param s: string or list of strings
"""
if type(s) == list:
s = map(str, s)
s = ' '.join(s)
else:
s = str(s)
f = open(CURRENT_MODEL_FOLDER + 'logs.txt', 'a+')
f.write('[{}]: {}\n'.format(datetime.now().strftime("%m/%d/%Y %H:%M:%S"), s ))
f.close()
def get_data(train, wet_cols):
"""
Return train-test split on train
@param train: pandas DataFrame with columns `wet_cols` as response variable
@param wet_cols: list of response columns in `wet_cols`
"""
X_train, X_test,y_train,y_test = train_test_split(train.drop(wet_cols, axis=1), train[wet_cols] , test_size = .3, random_state=909)
return X_train, X_test, y_train, y_test
def _scatter_preds(preds, truth, CURRENT_MODEL_FOLDER, col, method=''):
"""
Plot a scatter plot of preds abd truth
@param preds: array like
@param truth: array like
@param CURRENT_MODEL_FOLDER: path to save folder to
@param col: name of folder (for wet parm of interest) under images
@param method: string denoting the algorithm used
"""
fig = plt.figure(figsize=(10,8))
sns.regplot(preds, truth, marker="+")
top = max(max(preds),max(truth))
bottom = min(min(preds),min(truth))
x = np.linspace(bottom,top,100)
y102 = x * 1.02
y105 = x * 1.05
y98 = x * 0.98
y95 = x * 0.95
plt.plot(x,x, 'g', linewidth=3)
plt.plot(x,y102, 'y', linewidth=3)
plt.plot(x,y98, 'y', linewidth=3)
plt.plot(x,y105, 'r', linewidth=3)
plt.plot(x,y95, 'r', linewidth=3)
plt.ylim((bottom, top))
plt.xlim((bottom, top))
plt.xlabel("predictions")
plt.ylabel("truth")
plt.title("Pred. vs. Truth "+ method)
plt.savefig(CURRENT_MODEL_FOLDER +"/images/"+col+"/pred_scatter_"+method +".png")
plt.show()
def _plotROC(preds, truth,CURRENT_MODEL_FOLDER,method = ''):
"""
Plot a ROC plot of preds and truth
@param preds: array like
@param truth: array like
@param CURRENT_MODEL_FOLDER: path to save folder to
@param method: string denoting the algorithm used
"""
fpr,tpr, thresh = roc_curve(truth, preds)
fig, ax = plt.subplots(figsize=(6,6))
plt.plot(fpr, tpr)
plt.plot(np.linspace(0,1,30),np.linspace(0,1,30))
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC curve ' + method)
if method != "":
filename = method+'ROC'
else:
filename = 'modelROC'
plt.savefig(CURRENT_MODEL_FOLDER +'images/'+ filename + '.png')
return fpr,tpr,thresh
def _getConfusionMatrix(preds, truth, thresh,fpr, tpr, CURRENT_MODEL_FOLDER,method = '', figsize=(6,6)):
"""
Plot a confusion matrix of preds and truth
@param preds: array like
@param truth: array like
@param thresh: array like
@param fpr: array like
@param tpr: array like
@param CURRENT_MODEL_FOLDER: path to save folder to
@param method: string denoting the algorithm used
@param figsize: tuple
"""
# Get Threshold val
# First filter all thresholds to those with fpr less than `arbitrary_fpr_max`
# Next take all indices with maximal tpr
# Finally take the threshold of those indices with minimal fpr
arbitrary_fpr_max = 0.45
idx_fpr = [i for i,val in enumerate(fpr) if val <= arbitrary_fpr_max]
tpr_with_fpr_max = [tpr[i] for i in idx_fpr]
max_tpr = max(tpr_with_fpr_max)
idx_max_tpr = [(i,fpr[i]) for i, j in enumerate(tpr_with_fpr_max) if j == max_tpr]
threshold_i = idx_max_tpr[0][0]
threshold_v = idx_max_tpr[0][1]
for i,v in idx_max_tpr[1:]:
if v < threshold_v:
threshold_i = i
threshold_v = v
threshold_val = thresh[threshold_i]
# Round and make judgement
preds = [val >= threshold_val for val in preds]
cm = confusion_matrix(truth, preds, labels=np.unique(truth))
cm_sum = np.sum(cm, axis=1, keepdims=True)
cm_perc = cm / cm_sum.astype(float) * 100
annot = np.empty_like(cm).astype(str)
nrows, ncols = cm.shape
for i in range(nrows):
for j in range(ncols):
c = cm[i, j]
p = cm_perc[i, j]
if i == j:
s = cm_sum[i]
annot[i, j] = '%.1f%%\n%d/%d' % (p, c, s)
elif c == 0:
annot[i, j] = ''
else:
annot[i, j] = '%.1f%%\n%d' % (p, c)
cm = pd.DataFrame(cm, index=np.unique(truth), columns=np.unique(truth))
cm.index.name = 'Actual'
cm.columns.name = 'Predicted'
fig, ax = plt.subplots(figsize=figsize)
snsCM = sns.heatmap(cm, cmap= "YlGnBu", annot=annot, fmt='', ax=ax)
if method != "":
filename = method+'CM'
else:
filename = 'modelCM'
fig.savefig(CURRENT_MODEL_FOLDER +'/images/'+ filename + '.png')
return threshold_val
def _runClassificationModel(X_train, X_test, y_train, y_test, pipe, pipe_params,modelMethod,CURRENT_MODEL_FOLDER):
"""
Taking in data, conduct a gridsearch over `pipe_params` using the `pipe` and return the best model found
@param X_train: array like
@param X_test: array like
@param y_train: vector like
@param y_test: vector like
@param pipe: sklearn Pipeline containing algorithm to use
@param pipe_params: dict for pipe
@param modelMethod: string for algorithm used
param CURRENT_MODEL_FOLDER: string for path
"""
# create scorer
balanced_accuracy_scorer = make_scorer(balanced_accuracy_score, )
# Fit model. Ravel done to avert warnings in some algorithms
gs = GridSearchCV(pipe, param_grid=pipe_params, cv=StratifiedKFold() ,n_jobs=10, scoring = balanced_accuracy_scorer)
if type(y_train) == pd.Series or type(y_train) == pd.DataFrame:
y_train = y_train.values.ravel()
gs.fit(X_train, y_train)
# Get quick model stats
print('cvs:', gs.best_score_)
writeToLogFile(CURRENT_MODEL_FOLDER, ['cvs:', gs.best_score_])
print('train score:', gs.score(X_train, y_train))
writeToLogFile(CURRENT_MODEL_FOLDER, ['train score:', gs.score(X_train, y_train)])
print('test score:', gs.score(X_test, y_test))
writeToLogFile(CURRENT_MODEL_FOLDER, ['test score:', gs.score(X_test, y_test)])
writeToLogFile(CURRENT_MODEL_FOLDER, gs.best_params_)
writeToLogFile(CURRENT_MODEL_FOLDER, gs.best_estimator_)
print(gs.best_params_)
print(gs.best_estimator_)
# Predict
preds_train = gs.best_estimator_.predict_proba(X_train)[:,1]
preds_test = gs.best_estimator_.predict_proba(X_test)[:,1]
# Convert and create output csvs
convert_to_class = lambda lst, x: [val >= x for val in lst]
# Get ROC and confustion matric
fpr,tpr,thresh = _plotROC(preds_test, y_test,CURRENT_MODEL_FOLDER,method = modelMethod )
threshold_val = _getConfusionMatrix(preds_test,y_test, thresh, fpr, tpr, CURRENT_MODEL_FOLDER,method = modelMethod)
preds_train_df = pd.DataFrame(convert_to_class(preds_train, threshold_val), columns = ['PRED_TRAIN_CLASS'], index = X_train.index)
preds_train_df['PRED_TRAIN'] = preds_train
preds_train_df['ACT_TRAIN'] = y_train
preds_test_df = pd.DataFrame(convert_to_class(preds_test, threshold_val), columns = ['PRED_TEST_CLASS'], index = X_test.index)
preds_test_df['PRED_TEST'] = preds_test
preds_test_df['ACT_TEST'] = y_test
preds_test_df['THRESH'] = pd.Series([threshold_val]*len(y_test), index=X_test.index)
preds_train_df.to_csv(CURRENT_MODEL_FOLDER + 'models/' + modelMethod + 'TRAINpredVsAct.csv')
preds_test_df.to_csv(CURRENT_MODEL_FOLDER + 'models/' + modelMethod + 'TESTpredVsAct.csv')
roc_df = pd.DataFrame(thresh, columns = ['THRESHOLD'])
roc_df['FPR'] = fpr
| |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import pickle
# In[2]:
from PIL import Image
# In[3]:
from skimage.color import rgb2hsv, rgb2lab, hsv2rgb, lab2rgb
from matplotlib.colors import hsv_to_rgb
# In[4]:
from LogGabor import LogGabor
from PYramid2 import cropped_pyramid, local_filter, get_K, log_gabor_transform
from PYramid2 import inverse_pyramid, get_K_inv, inverse_gabor
# In[5]:
width = 32
n_levels = 7 #int(np.log(np.max((N_X, N_Y))/width)/np.log(base_levels)) + 1
base_levels = 2
n_color = 3
r_min = width / 8 # width / 16
r_max = width / 2 # 7 * width / 16
n_sublevel = n_eccentricity = 4
n_azimuth = 16
n_theta = 8
n_phase = 1
# In[6]:
phase_shift = False
# In[7]:
pyramid_n_params = width*width*n_color*n_levels
print('pyramids #params :', pyramid_n_params)
logpolar_n_params = n_levels * n_color * n_eccentricity * n_azimuth * n_theta * n_phase
print('logpolar #params :', logpolar_n_params)
# In[9]:
out_chan=1024
gauss = False
do_mask = False
color = True
color_mode= 'lab' # 'hsv' #True
print ('encoder #params :', out_chan)
# In[10]:
if gauss:
script_name = '2021-03-13-log-polar-deep-convolutional-no-max-pool-VAE-gauss-'+color_mode
else:
script_name = '2021-03-13-log-polar-deep-convolutional-no-max-pool-VAE-laplace-'+color_mode
# ### Image utilities
# In[11]:
def tensor_pyramid_display(img_pyr_tens, global_bias = 0):
fig, axs = plt.subplots(1, n_levels, figsize=(20,20))
img_aff = img_pyr_tens.permute(0,1,3,4,2).detach().numpy()
for i_level, ax in enumerate(axs):
if i_level < n_levels-1 and not gauss:
bias = 128
else:
bias = global_bias
ax.imshow((img_aff[0, i_level, ...]+bias).clip(0,255).astype('uint8'))
ax.plot([width/2], [width/2], 'r+', ms=32);
#print('Tensor shape=', img_rec.shape)
return axs
# In[12]:
def tensor_image_cmp(img_tens_ref, img_tens_rec):
fig, ax = plt.subplots(1, 2, figsize=(20,10))
img_aff_ref = img_tens_ref.detach().permute(0,2,3,1).squeeze().detach().numpy().clip(0,255).astype('uint8')
ax[0].imshow(img_aff_ref)
N_X, N_Y, _ = img_aff_ref.shape
ax[0].plot([N_Y//2], [N_X//2], 'r+', ms=16)
ax[0].set_title('LOG GABOR RECONSTRUCTION')
img_aff_rec = img_tens_rec.detach().permute(0,2,3,1).squeeze().detach().numpy().clip(0,255).astype('uint8')
ax[1].imshow(img_aff_rec)
ax[1].plot([N_Y//2], [N_X//2], 'r+', ms=16)
ax[1].set_title('AUTO-ENCODER RECONSTRUCTION')
return ax
# ### Log Gabor filters
# In[13]:
pe = {'N_X': width, 'N_Y': width, 'do_mask': do_mask, 'base_levels':
base_levels, 'n_theta': 0, 'B_sf': np.inf, 'B_theta': np.inf ,
'use_cache': True, 'figpath': 'results', 'edgefigpath':
'results/edges', 'matpath': 'cache_dir', 'edgematpath':
'cache_dir/edges', 'datapath': 'database/', 'ext': '.pdf', 'figsize':
14.0, 'formats': ['pdf', 'png', 'jpg'], 'dpi': 450, 'verbose': 0}
lg = LogGabor(pe)
print('lg shape=', lg.pe.N_X, lg.pe.N_Y)
# In[14]:
lg.pe
# In[15]:
K = get_K(width=width,
n_sublevel = n_sublevel,
n_azimuth = n_azimuth,
n_theta = n_theta,
n_phase = n_phase,
r_min = r_min,
r_max = r_max,
log_density_ratio = 2,
verbose=True,
phase_shift=phase_shift,
lg=lg)
K_mean= torch.norm(K, dim=4).unsqueeze(4)K = torch.cat((K, K_mean), dim=4)
n_theta = 9K = K_mean
n_theta = 1plt.plot(K.flatten())
# ### Gabor filters pseudo-inverse
# In[16]:
K_inv = get_K_inv(K, width=width, n_sublevel = n_sublevel, n_azimuth = n_azimuth, n_theta = n_theta, n_phase = n_phase)
plt.plot(K_inv.flatten())
# In[17]:
### regularized inverse gabor
K_ = K.reshape((width**2, n_sublevel*n_azimuth*n_theta*n_phase))
print('Reshaped filter tensor=', K_.shape)
K_inv_rcond = torch.pinverse(K_, rcond=0.1)
print('Tensor shape=', K_inv.shape)
K_inv_rcond =K_inv_rcond.reshape(n_sublevel, n_azimuth, n_theta, n_phase, width, width)
plt.figure()
plt.plot(K_inv_rcond.flatten())
# ### Honeycomb space coverage tests
# In[18]:
plt.figure(figsize=(20,3))
for i_theta in range(n_theta):
coefs = torch.zeros((n_sublevel, n_azimuth, n_theta, n_phase))
coefs[0, n_azimuth//2, i_theta, 0] = 1
img_dis = torch.tensordot(K, coefs, dims=4)
plt.subplot(1,n_theta,i_theta+1)
plt.imshow(img_dis.numpy()[:, :, ...], cmap='gray')
# In[19]:
plt.figure(figsize=(20,5))
plt.subplot(1,2,1)
coefs = torch.zeros((n_sublevel, n_azimuth, n_theta, n_phase))
coefs[0, 4, 0, 0] = 1
img_dis = torch.tensordot(K, coefs, dims=4)
plt.imshow(img_dis.numpy()[:, :, ...], cmap='gray')
if n_phase >1:
plt.subplot(1,2,2)
coefs = torch.zeros((n_sublevel, n_azimuth, n_theta, n_phase))
coefs[0, 4, 0, 1] = 1
img_dis = torch.tensordot(K, coefs, dims=4)
plt.imshow(img_dis.numpy()[:, :, ...], cmap='gray')
# In[20]:
plt.figure(figsize=(20,6))
for i_az in range(n_azimuth):
coefs = torch.zeros((n_sublevel, n_azimuth, n_theta, n_phase))
coefs[:, i_az, 0, 0] = 1
img_dis = torch.tensordot(K, coefs, dims=4)
plt.subplot(2,n_azimuth//2,i_az+1)
plt.imshow(img_dis.numpy()[:, :, ...], cmap='gray')
# In[21]:
coefs = torch.zeros((n_sublevel, n_azimuth, n_theta, n_phase))
coefs[:, :, :1, 0] = torch.ones((n_sublevel, n_azimuth, 1))
img_dis = torch.tensordot(K, coefs, dims=4)
plt.subplot(1,2,1)
plt.imshow(img_dis.numpy(), cmap='gray')
plt.subplot(1,2,2)
_=plt.plot(img_dis.numpy())
# In[22]:
if n_phase>1:
coefs = torch.zeros((n_sublevel, n_azimuth, n_theta, n_phase))
coefs[:, :, :1, 1] = torch.ones((n_sublevel, n_azimuth, 1))
img_dis = torch.tensordot(K, coefs, dims=4)
plt.subplot(1,2,1)
plt.imshow(img_dis.numpy(), cmap='gray')
plt.subplot(1,2,2)
_=plt.plot(img_dis.numpy())
# In[23]:
coefs = torch.zeros((n_sublevel, n_azimuth, n_theta, n_phase))
coefs[:, :, 2:3, 0] = torch.ones((n_sublevel, n_azimuth, 1))
img_dis = torch.tensordot(K, coefs, dims=4)
plt.subplot(1,2,1)
plt.imshow(img_dis.numpy(), cmap='gray')
plt.subplot(1,2,2)
_=plt.plot(img_dis.numpy())
K_test= torch.norm(K, dim=4)
plt.figure(figsize=(20,6))
for i_az in range(n_azimuth):
coefs = torch.zeros((n_sublevel, n_azimuth, n_phase))
coefs[:, i_az, 0] = 1
img_dis = torch.tensordot(K_test, coefs, dims=3)
plt.subplot(2,n_azimuth//2,i_az+1)
plt.imshow(img_dis.numpy()[:, :, ...], cmap='gray')
plt.figure(figsize=(20,6))
for i_az in range(n_azimuth):
coefs = torch.zeros((n_sublevel, n_azimuth, n_phase))
coefs[:, i_az, 1] = 1
img_dis = torch.tensordot(K_test, coefs, dims=3)
plt.subplot(2,n_azimuth//2,i_az+1)
plt.imshow(img_dis.numpy()[:, :, ...], cmap='gray')plt.figure()
coefs = torch.zeros((n_sublevel, n_azimuth, n_phase))
coefs[:, :, :1] = torch.ones((n_sublevel, n_azimuth, 1))
img_dis = torch.tensordot(K_test, coefs, dims=3)
plt.subplot(1,2,1)
plt.imshow(img_dis.numpy(), cmap='gray')
plt.subplot(1,2,2)
_=plt.plot(img_dis.numpy())
plt.figure()
coefs = torch.zeros((n_sublevel, n_azimuth, n_phase))
coefs[:, :, 1:] = torch.ones((n_sublevel, n_azimuth, 1))
img_dis = torch.tensordot(K_test, coefs, dims=3)
plt.subplot(1,2,1)
plt.imshow(img_dis.numpy(), cmap='gray')
plt.subplot(1,2,2)
_=plt.plot(img_dis.numpy())liste = [img_dis] * n_levels
crop_levels = (torch.stack(liste)).unsqueeze(0) * 5
full_rosace = inverse_pyramid(crop_levels, color=False, gauss=gauss, n_levels=n_levels)
full_rosace = full_rosace.detach().permute(0,2,3,1).numpy().clip(0,255).astype('uint8')
#ax = tensor_image_cmp(full_img_rec, full_img_rec_rec)
plt.figure(figsize=(20,15))
plt.imshow(full_rosace.squeeze(), cmap='gray')
# In[24]:
log_gabor_rosace = torch.zeros(1, n_levels, n_color, n_eccentricity, n_azimuth, n_theta, n_phase)
log_gabor_rosace[:,:,:,:,:,2,0] = 200
img_crop_rosace=inverse_gabor(log_gabor_rosace, K_inv_rcond)
axs = tensor_pyramid_display(img_crop_rosace)
img_crop_rosace[:,-1,...] = 128
full_rosace = inverse_pyramid(img_crop_rosace, color=color, gauss=gauss, n_levels=n_levels)
full_rosace = full_rosace.detach().permute(0,2,3,1).numpy().clip(0,255).astype('uint8')
#ax = tensor_image_cmp(full_img_rec, full_img_rec_rec)
plt.figure(figsize=(20,15))
plt.imshow(full_rosace[0,:])
# In[25]:
log_gabor_rosace = 100 * torch.ones(1, n_levels, n_color, n_eccentricity, n_azimuth, n_theta, n_phase)
plt.figure()
img_crop_rosace=inverse_gabor(log_gabor_rosace, K_inv_rcond)
axs = tensor_pyramid_display(img_crop_rosace)
img_crop_rosace[:,-1,...] = 128
full_rosace = inverse_pyramid(img_crop_rosace, color=color, gauss=gauss, n_levels=n_levels)
full_rosace = full_rosace.detach().permute(0,2,3,1).numpy().clip(0,255).astype('uint8')
#ax = tensor_image_cmp(full_img_rec, full_img_rec_rec)
plt.figure(figsize=(20,15))
plt.imshow(full_rosace[0,:])
# ## Images dataset + transforms
# In[26]:
if True: #not os.path.exists("image_names.txt"):
names = open("image_names.txt", "w")
img_names = os.listdir('../ALLSTIMULI')
print('EXCLUDED:')
for i in range(len(img_names)):
if 'Data1' in img_names[i] or 'Data2' in img_names[i] or 'Data3' in img_names[i] or 'Data4' in img_names[i] or 'DS_' in img_names[i] or len(img_names[i])<=10 :
#or '2218506905' in img_names[i] or 'i24622350' in img_names[i]:
print(img_names[i])
else:
names.write(img_names[i][:-5]+'\n')
names.close()
names = open("image_names.txt", "r")
img_names = names.readlines()
for i in range(len(img_names)):
img_names[i]=img_names[i][:-1]
# In[27]:
dir_names = os.listdir('../saccades-data')
loc_data_xy={}
for dir_name in dir_names:
loc_data_xy[dir_name]={}
for name in img_names:
locpath = '../saccades-data/' + dir_name + '/' + name
f = open(locpath,'rb')
loc_dict = pickle.load(f)
loc_data_xy[dir_name][name] = np.array(loc_dict['barycenters'])
# In[28]:
def show_landmarks(image, landmarks):
"""Show image with landmarks"""
plt.imshow(image)
plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')
#plt.pause(0.001) # pause a bit so that plots are updated
# # Dataset class
# In[29]:
class SaccadeLandmarksDataset(Dataset):
"""Saccade Landmarks dataset."""
def __init__(self, loc_dict, img_dir, img_names, dir_names, transform=None, color_mode='rgb'):
"""
Args:
loc_dict (dict): Dictonary containing saccade coordinates
img_dir (string): Directory with all the images.
img_names (lost): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.loc_dict = loc_dict
self.img_dir = img_dir
self.img_names = img_names
self.dir_names = dir_names
self.transform = transform
self.color_mode=color_mode
def __len__(self):
return len(self.img_names)
def __getitem__(self, idx, color_mode='rgb'):
#img_name = os.listdir(self.img_dir)[idx+2]
name = self.img_names[idx]
img_path = os.path.join(self.img_dir, name + '.jpeg')
image = io.imread(img_path)
#if self.color_mode == 'hsv':
# image = rgb2hsv(image)
#elif self.color_mode == 'lab':
# image = rgb2lab(image)
#name = img_name[:-5]
dir_name = np.random.choice(self.dir_names)
landmarks = self.loc_dict[dir_name][name]
landmarks = np.array([landmarks])
landmarks = landmarks.reshape(-1, 2) #.astype('float').reshape(-1, 2)
sample = {'image': image, 'landmarks': landmarks, 'name':name}
if self.transform:
sample = self.transform(sample)
return sample
# # Transforms
# In[30]:
class RandomSaccadeTo(object):
# TODO : zero_fill option
def __init__(self, zero_fill = False):
self.zero_fill = zero_fill
def __call__(self, sample):
image, landmarks = sample['image'], sample['landmarks']
N_X, N_Y = image.shape[:2]
try:
nb_sac = len(landmarks)
sac_num = np.random.randint(nb_sac)
sac = landmarks[sac_num]
#sac = np.random.choice(landmarks)
except:
print("landmarks", landmarks, "image", sample['name'])
sac = (N_Y//2, N_X//2)
#img_color_sac = saccade_to(image, (N_X//2, N_Y//2), (sac[1], sac[0]))
image_roll = np.copy(image)
image_roll=np.roll(image_roll, N_X//2 - sac[1], axis=0)
if self.zero_fill:
shift = N_X//2 - sac[1]
if shift > 0:
image_roll[:shift,:,:] = 0
elif shift < 0:
image_roll[shift:,:,:] = 0
image_roll=np.roll(image_roll, N_Y//2 - sac[0], axis=1)
if self.zero_fill:
shift = N_Y//2 - sac[0]
if shift > 0:
image_roll[:,:shift,:] = 0
elif shift < 0:
image_roll[:,shift:,:] = 0
return {'image':image_roll, 'pos':sac, 'name':sample['name']}
# In[31]:
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image_tens = sample['image'].transpose((2, 0, 1))
return {'image': torch.FloatTensor(image_tens), 'pos': sample['pos'], 'name':sample['name']}
# ### Adapted cropped pyramid (squeezed tensor)
# In[32]:
class CroppedPyramid(object):
def __init__(self, width,
base_levels,
color=color,
do_mask=do_mask,
verbose=False,
n_levels=None,
color_mode='rgb'):
self.width = width
self.base_levels = base_levels
self.color = color
self.do_mask = do_mask
self.verbose = verbose
self.n_levels = n_levels
self.color_mode = color_mode
def __call__(self, sample):
img_crop, level_size = cropped_pyramid(sample['image'].unsqueeze(0),
width=self.width,
base_levels=self.base_levels,
color=self.color,
do_mask=self.do_mask,
verbose=self.verbose,
squeeze=True,
gauss=gauss,
n_levels=self.n_levels,
color_mode=self.color_mode)
#img_crop[:,-1,...]-=128 # on residual (!!)
return{'img_crop':img_crop, 'level_size':level_size, 'pos':sample['pos'], 'name':sample['name']}
# ### LogGaborTransform
# In[33]:
class LogGaborTransform(object):
def __init__(self, K=K, color=color, verbose=False):
self.K = K
self.color = color
self.verbose = verbose
def __call__(self, sample):
log_gabor_coeffs = log_gabor_transform(sample['img_crop'].unsqueeze(0), K)
return{'img_gabor':log_gabor_coeffs, 'pos':sample['pos'], 'name':sample['name']}
# ### ComplexModulus
# # Compose transforms
# ### transforms.Compose
# In[34]:
composed_transform = transforms.Compose([RandomSaccadeTo(zero_fill=True),
ToTensor(),
CroppedPyramid(width,
base_levels,
n_levels=n_levels,
color_mode=color_mode)]) #, LogGaborTransform()])
# In[35]:
saccade_dataset = SaccadeLandmarksDataset(loc_dict=loc_data_xy,
img_dir='../ALLSTIMULI/',
img_names=img_names,
dir_names = dir_names,
transform=composed_transform,
color_mode=color_mode)
# # Iterating through the dataset
# In[38]:
# Helper function to show a batch
'''def tensor_hsv_to_rgb(images_batch):
n_batch, n_levels = images_batch.shape[:2]
for batch in range(n_batch):
for level in range(n_levels):
im_hsv =
'''
def show_landmarks_batch(sample_batched, color_mode='rgb'):
"""Show image with landmarks for | |
matrices.
"""
y = np.empty(x.size, common_type(A, x))
_dot_csr_matvec_prange(A.data, A.indptr, A.indices, x.ravel(), y)
y.shape = x.shape
if isinstance(x, qarray):
y = qarray(y)
return y
def dot_sparse(a, b):
"""Dot product for sparse matrix, dispatching to parallel for v large nnz.
"""
out = a @ b
if isdense(out) and (isinstance(b, qarray) or isinstance(a, qarray)):
out = qarray(out)
return out
def dot(a, b):
"""Matrix multiplication, dispatched to dense or sparse functions.
Parameters
----------
a : dense or sparse operator
First array.
b : dense or sparse operator
Second array.
Returns
-------
dense or sparse operator
Dot product of ``a`` and ``b``.
"""
if issparse(a) or issparse(b):
return dot_sparse(a, b)
try:
return a.dot(b)
except AttributeError:
return a @ b
@realify
def vdot(a, b):
"""Accelerated 'Hermitian' inner product of two arrays. In other words,
``b`` here will be conjugated by the function.
"""
return np.vdot(a.ravel(), b.ravel())
@realify
@upcast
@njit
def rdot(a, b): # pragma: no cover
"""Real dot product of two dense vectors.
Here, ``b`` will *not* be conjugated before the inner product.
"""
a, b = a.reshape((1, -1)), b.reshape((-1, 1))
return (a @ b)[0, 0]
@njit(parallel=True)
def _l_diag_dot_dense_par(l, A, out): # pragma: no cover
for i in numba.prange(l.size):
out[i, :] = l[i] * A[i, :]
@ensure_qarray
def l_diag_dot_dense(diag, mat):
"""Dot product of diagonal matrix (with only diagonal supplied) and dense
matrix.
"""
if diag.size <= 128:
return mul_dense(diag.reshape(-1, 1), mat)
else:
out = np.empty_like(mat, dtype=common_type(diag, mat))
_l_diag_dot_dense_par(diag.ravel(), mat, out)
return out
def l_diag_dot_sparse(diag, mat):
"""Dot product of digonal matrix (with only diagonal supplied) and sparse
matrix.
"""
return sp.diags(diag) @ mat
def ldmul(diag, mat):
"""Accelerated left diagonal multiplication. Equivalent to
``numpy.diag(diag) @ mat``, but faster than numpy.
Parameters
----------
diag : vector or 1d-array
Vector representing the diagonal of a matrix.
mat : dense or sparse matrix
A normal (non-diagonal) matrix.
Returns
-------
dense or sparse matrix
Dot product of the matrix whose diagonal is ``diag`` and ``mat``.
"""
if issparse(mat):
return l_diag_dot_sparse(diag, mat)
return l_diag_dot_dense(diag, mat)
@njit(parallel=True)
def _r_diag_dot_dense_par(A, l, out): # pragma: no cover
for i in numba.prange(l.size):
out[:, i] = A[:, i] * l[i]
@ensure_qarray
def r_diag_dot_dense(mat, diag):
"""Dot product of dense matrix and digonal matrix (with only diagonal
supplied).
"""
if diag.size <= 128:
return mul_dense(mat, diag.reshape(1, -1))
else:
out = np.empty_like(mat, dtype=common_type(diag, mat))
_r_diag_dot_dense_par(mat, diag.ravel(), out)
return out
def r_diag_dot_sparse(mat, diag):
"""Dot product of sparse matrix and digonal matrix (with only diagonal
supplied).
"""
return mat @ sp.diags(diag)
def rdmul(mat, diag):
"""Accelerated left diagonal multiplication.
Equivalent to ``mat @ numpy.diag(diag)``, but faster.
Parameters
----------
mat : dense or sparse matrix
A normal (non-diagonal) matrix.
diag : vector or 1d-array
Vector representing the diagonal of a matrix.
Returns
-------
dense or sparse matrix
Dot product of ``mat`` and the matrix whose diagonal is ``diag``.
"""
if issparse(mat):
return r_diag_dot_sparse(mat, diag)
return r_diag_dot_dense(mat, diag)
@njit(parallel=True)
def _outer_par(a, b, out, m, n): # pragma: no cover
for i in numba.prange(m):
out[i, :] = a[i] * b[:]
@ensure_qarray
def outer(a, b):
"""Outer product between two vectors (no conjugation).
"""
m, n = a.size, b.size
if m * n < 2**14:
return mul_dense(a.reshape(m, 1), b.reshape(1, n))
out = np.empty((m, n), dtype=common_type(a, b))
_outer_par(a.ravel(), b.ravel(), out, m, n)
return out
@vectorize
def explt(l, t): # pragma: no cover
"""Complex exponenital as used in solution to schrodinger equation.
"""
return cmath.exp((-1.0j * t) * l)
# --------------------------------------------------------------------------- #
# Kronecker (tensor) product #
# --------------------------------------------------------------------------- #
@njit
def _nb_kron_exp_seq(a, b, out, m, n, p, q):
for i in range(m):
for j in range(n):
ii, fi = i * p, (i + 1) * p
ij, fj = j * q, (j + 1) * q
out[ii:fi, ij:fj] = a[i, j] * b
@njit(parallel=True)
def _nb_kron_exp_par(a, b, out, m, n, p, q):
for i in numba.prange(m):
for j in range(n):
ii, fi = i * p, (i + 1) * p
ij, fj = j * q, (j + 1) * q
out[ii:fi, ij:fj] = a[i, j] * b
@ensure_qarray
def kron_dense(a, b, par_thresh=4096):
m, n = a.shape
p, q = b.shape
out = np.empty((m * p, n * q), dtype=common_type(a, b))
if out.size > 4096:
_nb_kron_exp_par(a, b, out, m, n, p, q)
else:
_nb_kron_exp_seq(a, b, out, m, n, p, q)
return out
def kron_sparse(a, b, stype=None):
"""Sparse tensor (kronecker) product,
Output format can be specified or will be automatically determined.
"""
if stype is None:
stype = ("bsr" if isinstance(b, np.ndarray) or b.format == 'bsr' else
b.format if isinstance(a, np.ndarray) else
"csc" if a.format == "csc" and b.format == "csc" else
"csr")
return sp.kron(a, b, format=stype)
def kron_dispatch(a, b, stype=None):
"""Kronecker product of two arrays, dispatched based on dense/sparse and
also size of product.
"""
if issparse(a) or issparse(b):
return kron_sparse(a, b, stype=stype)
return kron_dense(a, b)
# --------------------------------------------------------------------------- #
# Core Functions #
# --------------------------------------------------------------------------- #
_SPARSE_CONSTRUCTORS = {"csr": sp.csr_matrix,
"bsr": sp.bsr_matrix,
"csc": sp.csc_matrix,
"coo": sp.coo_matrix}
def sparse_matrix(data, stype="csr", dtype=complex):
"""Construct a sparse matrix of a particular format.
Parameters
----------
data : array_like
Fed to scipy.sparse constructor.
stype : {'csr', 'csc', 'coo', 'bsr'}, optional
Sparse format.
Returns
-------
scipy sparse matrix
Of format ``stype``.
"""
return _SPARSE_CONSTRUCTORS[stype](data, dtype=dtype)
_EXPEC_METHODS = {
# [isop(a), isop(b), issparse(a) or issparse(b)]
(0, 0, 0): lambda a, b: abs(vdot(a, b))**2,
(0, 1, 0): lambda a, b: vdot(a, b @ a),
(1, 0, 0): lambda a, b: vdot(b, a @ b),
(1, 1, 0): lambda a, b: _trace_dense(a @ b),
(0, 0, 1): lambda a, b: abs(dot(dag(a), b)[0, 0])**2,
(0, 1, 1): realify(lambda a, b: dot(dag(a), dot(b, a))[0, 0]),
(1, 0, 1): realify(lambda a, b: dot(dag(b), dot(a, b))[0, 0]),
(1, 1, 1): lambda a, b: _trace_sparse(dot(a, b)),
}
def expectation(a, b):
"""'Expectation' between a vector/operator and another vector/operator.
The 'operator' inner product between ``a`` and ``b``, but also for vectors.
This means that for consistency:
- for two vectors it will be the absolute expec squared ``|<a|b><b|a>|``,
*not* ``<a|b>``.
- for a vector and an operator its will be ``<a|b|a>``
- for two operators it will be the Hilbert-schmidt inner product
``tr(A @ B)``
In this way ``expectation(a, b) == expectation(dop(a), b) ==
expectation(dop(a), dop(b))``.
Parameters
----------
a : vector or operator
First state or operator - assumed to be ket if vector.
b : vector or operator
Second state or operator - assumed to be ket if vector.
Returns
-------
x : float
'Expectation' of ``a`` with ``b``.
"""
return _EXPEC_METHODS[isop(a), isop(b), issparse(a) or issparse(b)](a, b)
expec = expectation
"""Alias for :func:`expectation`."""
def normalize(qob, inplace=True):
"""Normalize a quantum object.
Parameters
----------
qob : dense or sparse vector or operator
Quantum object to normalize.
inplace : bool, optional
Whether to act inplace on the given operator.
Returns
-------
dense or sparse vector or operator
Normalized quantum object.
"""
if not inplace:
qob = qob.copy()
if isop(qob):
n_factor = trace(qob)
else:
n_factor = expectation(qob, qob)**0.25
qob[:] /= n_factor
return qob
normalize_ = functools.partial(normalize, inplace=True)
def chop(qob, tol=1.0e-15, inplace=True):
"""Set small values of a dense or sparse array to zero.
Parameters
----------
qob : dense or sparse vector or operator
Quantum object to chop.
tol : float, optional
Fraction of ``max(abs(qob))`` to chop below.
inplace : bool, optional
Whether to act on input array or return copy.
Returns
-------
dense or sparse vector or operator
Chopped quantum object.
"""
minm = np.abs(qob).max() * tol # minimum value tolerated
if not inplace:
qob = qob.copy()
if issparse(qob):
qob.data.real[np.abs(qob.data.real) < minm] = 0.0
qob.data.imag[np.abs(qob.data.imag) < minm] = 0.0
qob.eliminate_zeros()
else:
qob.real[np.abs(qob.real) < minm] = 0.0
qob.imag[np.abs(qob.imag) < minm] = 0.0
return qob
chop_ = functools.partial(chop, inplace=True)
def quimbify(data, qtype=None, normalized=False, chopped=False,
sparse=None, stype=None, dtype=complex):
"""Converts data to 'quantum' i.e. complex matrices, kets being columns.
Parameters
----------
data : dense or sparse array_like
Array describing vector or operator.
qtype : {``'ket'``, ``'bra'`` or ``'dop'``}, optional
Quantum object type output type. Note that if an operator is given
as ``data`` and ``'ket'`` or ``'bra'`` as ``qtype``, the operator
will be unravelled into a column or row vector.
sparse : bool, optional
Whether to convert output to sparse a format.
normalized : bool, optional
Whether to normalise the output.
chopped : | |
error didn't raise IndexError"
x = ()
x += ()
if x != (): raise TestFailed, 'tuple inplace add from () to () failed'
x += (1,)
if x != (1,): raise TestFailed, 'tuple resize from () failed'
# extended slicing - subscript only for tuples
a = (0,1,2,3,4)
vereq(a[::], a)
vereq(a[::2], (0,2,4))
vereq(a[1::2], (1,3))
vereq(a[::-1], (4,3,2,1,0))
vereq(a[::-2], (4,2,0))
vereq(a[3::-2], (3,1))
vereq(a[-100:100:], a)
vereq(a[100:-100:-1], a[::-1])
vereq(a[-100L:100L:2L], (0,2,4))
# Check that a specific bug in _PyTuple_Resize() is squashed.
def f():
for i in range(1000):
yield i
vereq(list(tuple(f())), range(1000))
# Verify that __getitem__ overrides are not recognized by __iter__
# XXX: this is a problem
#class T(tuple):
# def __getitem__(self, key):
# return str(key) + '!!!'
#vereq(iter(T((1,2))).next(), 1)
print '6.5.3 Lists'
# calling built-in types without argument must return empty
if list() != []: raise TestFailed,'list() does not return []'
if len([]) != 0: raise TestFailed, 'len([])'
if len([1,]) != 1: raise TestFailed, 'len([1,])'
if len([1,2,3,4,5,6]) != 6: raise TestFailed, 'len([1,2,3,4,5,6])'
if [1,2]+[3,4] != [1,2,3,4]: raise TestFailed, 'list concatenation'
if [1,2]*3 != [1,2,1,2,1,2]: raise TestFailed, 'list repetition *3'
if [1,2]*3L != [1,2,1,2,1,2]: raise TestFailed, 'list repetition *3L'
if 0*[1,2,3] != []: raise TestFailed, 'list repetition 0*'
if 0L*[1,2,3] != []: raise TestFailed, 'list repetition 0L*'
if min([1,2]) != 1 or max([1,2]) != 2: raise TestFailed, 'min/max list'
if 0 in [0,1,2] and 1 in [0,1,2] and 2 in [0,1,2] and 3 not in [0,1,2]: pass
else: raise TestFailed, 'in/not in list'
a = [1, 2, 3, 4, 5]
a[:-1] = a
if a != [1, 2, 3, 4, 5, 5]:
raise TestFailed, "list self-slice-assign (head)"
a = [1, 2, 3, 4, 5]
a[1:] = a
if a != [1, 1, 2, 3, 4, 5]:
raise TestFailed, "list self-slice-assign (tail)"
a = [1, 2, 3, 4, 5]
a[1:-1] = a
if a != [1, 1, 2, 3, 4, 5, 5]:
raise TestFailed, "list self-slice-assign (center)"
try: [][0]
except IndexError: pass
else: raise TestFailed, "list index error didn't raise IndexError"
try: [][0] = 5
except IndexError: pass
else: raise TestFailed, "list assignment index error didn't raise IndexError"
try: [].pop()
except IndexError: pass
else: raise TestFailed, "empty list.pop() didn't raise IndexError"
try: [1].pop(5)
except IndexError: pass
else: raise TestFailed, "[1].pop(5) didn't raise IndexError"
try: [][0:1] = 5
except TypeError: pass
else: raise TestFailed, "bad list slice assignment didn't raise TypeError"
try: [].extend(None)
except TypeError: pass
else: raise TestFailed, "list.extend(None) didn't raise TypeError"
a = [1, 2, 3, 4]
a *= 0
if a != []:
raise TestFailed, "list inplace repeat"
a = []
a[:] = tuple(range(10))
if a != range(10):
raise TestFailed, "assigning tuple to slice"
print '6.5.3a Additional list operations'
a = [0,1,2,3,4]
a[0L] = 1
a[1L] = 2
a[2L] = 3
if a != [1,2,3,3,4]: raise TestFailed, 'list item assignment [0L], [1L], [2L]'
a[0] = 5
a[1] = 6
a[2] = 7
if a != [5,6,7,3,4]: raise TestFailed, 'list item assignment [0], [1], [2]'
a[-2L] = 88
a[-1L] = 99
if a != [5,6,7,88,99]: raise TestFailed, 'list item assignment [-2L], [-1L]'
a[-2] = 8
a[-1] = 9
if a != [5,6,7,8,9]: raise TestFailed, 'list item assignment [-2], [-1]'
a[:2] = [0,4]
a[-3:] = []
a[1:1] = [1,2,3]
if a != [0,1,2,3,4]: raise TestFailed, 'list slice assignment'
a[ 1L : 4L] = [7,8,9]
if a != [0,7,8,9,4]: raise TestFailed, 'list slice assignment using long ints'
del a[1:4]
if a != [0,4]: raise TestFailed, 'list slice deletion'
del a[0]
if a != [4]: raise TestFailed, 'list item deletion [0]'
del a[-1]
if a != []: raise TestFailed, 'list item deletion [-1]'
a=range(0,5)
del a[1L:4L]
if a != [0,4]: raise TestFailed, 'list slice deletion'
del a[0L]
if a != [4]: raise TestFailed, 'list item deletion [0]'
del a[-1L]
if a != []: raise TestFailed, 'list item deletion [-1]'
a.append(0)
a.append(1)
a.append(2)
if a != [0,1,2]: raise TestFailed, 'list append'
a.insert(0, -2)
a.insert(1, -1)
a.insert(2,0)
if a != [-2,-1,0,0,1,2]: raise TestFailed, 'list insert'
b = a[:]
b.insert(-2, "foo")
b.insert(-200, "left")
b.insert(200, "right")
if b != ["left",-2,-1,0,0,"foo",1,2,"right"]: raise TestFailed, 'list insert2'
# a = [-2,-1,0,0,1,2]
if a.count(0) != 2: raise TestFailed, ' list count'
if a.index(0) != 2: raise TestFailed, 'list index'
if a.index(0,2) != 2: raise TestFailed, 'list index, start argument'
if a.index(0,-4) != 2: raise TestFailed, 'list index, -start argument'
if a.index(-2,-10) != 0: raise TestFailed, 'list index, very -start argument'
if a.index(0,3) != 3: raise TestFailed, 'list index, start argument'
if a.index(0,-3) != 3: raise TestFailed, 'list index, -start argument'
if a.index(0,3,4) != 3: raise TestFailed, 'list index, stop argument'
if a.index(0,-3,-2) != 3: raise TestFailed, 'list index, -stop argument'
#XXX index with Long not working yet.
#if a.index(0,-4*sys.maxint,4*sys.maxint) != 2:
# raise TestFailed, 'list index, -maxint, maxint argument'
#try:
# a.index(0, 4*sys.maxint,-4*sys.maxint)
#except ValueError:
# pass
#else:
# raise TestFailed, 'list index, maxint,-maxint argument'
try:
a.index(2,0,-10)
except ValueError:
pass
else:
raise TestFailed, 'list index, very -stop argument'
a.remove(0)
try:
a.index(2,0,4)
except ValueError:
pass
else:
raise TestFailed, 'list index, stop argument.'
if a != [-2,-1,0,1,2]: raise TestFailed, 'list remove'
a.reverse()
if a != [2,1,0,-1,-2]: raise TestFailed, 'list reverse'
a.sort()
if a != [-2,-1,0,1,2]: raise TestFailed, 'list sort'
def revcmp(a, b): return cmp(b, a)
a.sort(revcmp)
if a != [2,1,0,-1,-2]: raise TestFailed, 'list sort with cmp func'
# The following dumps core in unpatched Python 1.5:
def myComparison(x,y):
return cmp(x%3, y%7)
z = range(12)
z.sort(myComparison)
try: z.sort(2)
except TypeError: pass
else: raise TestFailed, 'list sort compare function is not callable'
#XXX need a strategy for locking list during sort.
#def selfmodifyingComparison(x,y):
# z.append(1)
# return cmp(x, y)
#try: z.sort(selfmodifyingComparison)
#except ValueError: pass
#else: raise TestFailed, 'modifying list during sort'
try: z.sort(lambda x, y: 's')
except TypeError: pass
else: raise TestFailed, 'list sort compare function does not return int'
# Test extreme cases with long ints
a = [0,1,2,3,4]
if a[ -pow(2,128L): 3 ] != [0,1,2]:
raise TestFailed, "list slicing with too-small long integer"
if a[ 3: pow(2,145L) ] != [3,4]:
raise TestFailed, "list slicing with too-large long integer"
# extended slicing
# subscript
a = [0,1,2,3,4]
vereq(a[::], a)
vereq(a[::2], [0,2,4])
vereq(a[1::2], [1,3])
vereq(a[::-1], [4,3,2,1,0])
vereq(a[::-2], [4,2,0])
vereq(a[3::-2], [3,1])
vereq(a[-100:100:], a)
vereq(a[100:-100:-1], a[::-1])
vereq(a[-100L:100L:2L], [0,2,4])
vereq(a[1000:2000:2], [])
vereq(a[-1000:-2000:-2], [])
# deletion
del a[::2]
vereq(a, [1,3])
a = range(5)
del a[1::2]
vereq(a, [0,2,4])
a = range(5)
del a[1::-2]
vereq(a, [0,2,3,4])
a = range(10)
del a[::1000]
vereq(a, [1, 2, 3, 4, 5, 6, 7, 8, 9])
# assignment
a = range(10)
a[::2] = [-1]*5
vereq(a, [-1, 1, -1, 3, -1, 5, -1, 7, -1, 9])
a = range(10)
a[::-4] = [10]*3
vereq(a, [0, 10, 2, 3, 4, 10, 6, 7, 8 ,10])
a = range(4)
a[::-1] = a
vereq(a, [3, 2, 1, 0])
a = range(10)
b = a[:]
c = a[:]
a[2:3] = ["two", "elements"]
b[slice(2,3)] = ["two", "elements"]
c[2:3:] = ["two", "elements"]
vereq(a, b)
vereq(a, c)
a = range(10)
a[::2] = tuple(range(5))
vereq(a, [0, 1, 1, 3, 2, 5, 3, 7, 4, 9])
# Verify that __getitem__ overrides are not recognized by __iter__
class L(list):
def __getitem__(self, key):
return str(key) + '!!!'
vereq(iter(L([1,2])).next(), 1)
print '6.6 Mappings == Dictionaries'
# calling built-in types without argument must return empty
if dict() != {}: raise TestFailed,'dict() does not return {}'
d = {}
if d.keys() != []: raise TestFailed, '{}.keys()'
if d.values() != []: raise TestFailed, '{}.values()'
if d.items() != []: raise TestFailed, '{}.items()'
if d.has_key('a') != 0: raise TestFailed, '{}.has_key(\'a\')'
if ('a' in d) != 0: raise TestFailed, "'a' in {}"
if ('a' not in d) != 1: raise TestFailed, "'a' not in {}"
if len(d) != 0: raise TestFailed, 'len({})'
d = {'a': 1, 'b': 2}
if len(d) != 2: raise TestFailed, 'len(dict)'
k = d.keys()
k.sort()
if k != ['a', 'b']: raise TestFailed, 'dict keys()'
if d.has_key('a') and d.has_key('b') and not d.has_key('c'): pass
else: raise TestFailed, 'dict keys()'
if 'a' in d and 'b' in d and 'c' not in d: pass
else: raise TestFailed, 'dict keys() # in/not in version'
if d['a'] != 1 or d['b'] != 2: raise TestFailed, 'dict item'
d['c'] = 3
d['a'] = 4
if d['c'] != 3 or d['a'] != 4: raise TestFailed, 'dict item assignment'
del d['b']
if d != {'a': 4, 'c': 3}: raise TestFailed, 'dict item deletion'
# dict.clear()
d = {1:1, 2:2, 3:3}
d.clear()
if d != {}: raise TestFailed, 'dict clear'
# dict.update()
d.update({1:100})
d.update({2:20})
d.update({1:1, 2:2, 3:3})
if d != {1:1, 2:2, 3:3}: raise TestFailed, 'dict update'
d.clear()
try: d.update(None)
except AttributeError: pass
else: raise TestFailed, 'dict.update(None), AttributeError expected'
class SimpleUserDict:
def __init__(self):
self.d = {1:1, 2:2, 3:3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.update(SimpleUserDict())
if d != {1:1, 2:2, 3:3}: raise TestFailed, 'dict.update(instance)'
d.clear()
class FailingUserDict:
def keys(self):
raise ValueError
try: d.update(FailingUserDict())
except ValueError: pass
else: raise TestFailed, 'dict.keys() expected ValueError'
class FailingUserDict:
def keys(self):
class BogonIter:
def __iter__(self):
raise ValueError
return BogonIter()
try: d.update(FailingUserDict())
except ValueError: pass
else: raise TestFailed, 'iter(dict.keys()) expected ValueError'
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def next(self):
if self.i:
self.i = 0
return 'a'
raise ValueError
return BogonIter()
def __getitem__(self, key):
return key
try: d.update(FailingUserDict())
except ValueError: pass
else: raise TestFailed, 'iter(dict.keys()).next() expected ValueError'
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def next(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise ValueError
try: d.update(FailingUserDict())
except ValueError: pass
else: raise TestFailed, 'dict.update(), __getitem__ expected ValueError'
# dict.fromkeys()
if dict.fromkeys('abc') != {'a':None, 'b':None, 'c':None}:
raise | |
= list(location_integers[:-1])
new_location_integers.append(location_integers[-1] - 1)
# after a move, location is wrong, so remove from call_data
if 'location' in call_data:
del call_data['location']
if 'part' in call_data:
del call_data['part']
if 'part_top' in call_data:
del call_data['part_top']
if 'part_loc' in call_data:
del call_data['part_loc']
# move the item
if part_tuple.section_name:
# move the part in a section, using skilift.editsection.move_location(project, section_name, schange, from_location, to_location)
call_data['schange'] = editsection.move_location(part_tuple.project, part_tuple.section_name, call_data['schange'], location, (widget_name, container, new_location_integers))
domcontents, dragrows, droprows = _container_domcontents(part_tuple.project, None, part_tuple.section_name, widget_name, container)
else:
# move the part in a page, using skilift.editpage.move_location(project, pagenumber, pchange, from_location, to_location)
call_data['pchange'] = editpage.move_location(part_tuple.project, part_tuple.pagenumber, call_data['pchange'], location, (widget_name, container, new_location_integers))
domcontents, dragrows, droprows = _container_domcontents(part_tuple.project, part_tuple.pagenumber, None, widget_name, container)
except ServerError as e:
raise FailPage(message = e.message)
# redraw the table
sd['domtable', 'dragrows'] = dragrows
sd['domtable', 'droprows'] = droprows
sd['domtable', 'contents'] = domcontents
pd.update(sd)
def move_up_right_in_container_dom(skicall):
"Called by domtable to move an item in a container up and to the right"
call_data = skicall.call_data
pd = call_data['pagedata']
sd = SectionData("editdom")
try:
part_tuple = _item_to_move(call_data)
location = part_tuple.location
widget_name = location[0]
container = int(location[1])
location_integers = location[2]
if location_integers[-1] == 0:
# at top of a part, cannot be moved
raise FailPage("Cannot be moved up")
new_parent_integers = list(location_integers[:-1])
new_parent_integers.append(location_integers[-1] - 1)
new_parent_location = (location[0], location[1], new_parent_integers)
new_parent_tuple = skilift.part_info(part_tuple.project, part_tuple.pagenumber, part_tuple.section_name, new_parent_location)
if new_parent_tuple is None:
raise FailPage("Cannot be moved up")
if new_parent_tuple.part_type != "Part":
raise FailPage("Cannot be moved up")
items_in_new_parent = len(skilift.part_contents(part_tuple.project, part_tuple.pagenumber, part_tuple.section_name, new_parent_location))
new_location_integers = tuple(new_parent_integers + [items_in_new_parent])
# after a move, location is wrong, so remove from call_data
if 'location' in call_data:
del call_data['location']
if 'part' in call_data:
del call_data['part']
if 'part_top' in call_data:
del call_data['part_top']
if 'part_loc' in call_data:
del call_data['part_loc']
# move the item
if part_tuple.section_name:
# move the part in a section, using skilift.editsection.move_location(project, section_name, schange, from_location, to_location)
call_data['schange'] = editsection.move_location(part_tuple.project, part_tuple.section_name, call_data['schange'], location, (widget_name, container, new_location_integers))
domcontents, dragrows, droprows = _container_domcontents(part_tuple.project, None, part_tuple.section_name, widget_name, container)
else:
# move the part in a page, using skilift.editpage.move_location(project, pagenumber, pchange, from_location, to_location)
call_data['pchange'] = editpage.move_location(part_tuple.project, part_tuple.pagenumber, call_data['pchange'], location, (widget_name, container, new_location_integers))
domcontents, dragrows, droprows = _container_domcontents(part_tuple.project, part_tuple.pagenumber, None, widget_name, container)
except ServerError as e:
raise FailPage(message = e.message)
# redraw the table
sd['domtable', 'dragrows'] = dragrows
sd['domtable', 'droprows'] = droprows
sd['domtable', 'contents'] = domcontents
pd.update(sd)
def move_down_in_container_dom(skicall):
"Called by domtable to move an item in a container down"
call_data = skicall.call_data
pd = call_data['pagedata']
sd = SectionData("editdom")
try:
part_tuple = _item_to_move(call_data)
location = part_tuple.location
widget_name = location[0]
container = int(location[1])
location_integers = location[2]
if len(location_integers) == 1:
# Just at immediate level below top
parent_location = (widget_name, container, ())
items_in_parent = len(skilift.part_contents(part_tuple.project, part_tuple.pagenumber, part_tuple.section_name, parent_location))
if location_integers[0] == (items_in_parent-1):
# At end, cannot be moved
raise FailPage("Cannot be moved down")
new_location_integers = (location_integers[0]+2,)
else:
parent_integers = tuple(location_integers[:-1])
parent_location = (widget_name, container, parent_integers)
items_in_parent = len(skilift.part_contents(part_tuple.project, part_tuple.pagenumber, part_tuple.section_name, parent_location))
if location_integers[-1] == (items_in_parent-1):
# At end of a part, so move up a level
new_location_integers = list(parent_integers[:-1])
new_location_integers.append(parent_integers[-1] + 1)
else:
# just insert into current level
new_location_integers = list(parent_integers)
new_location_integers.append(location_integers[-1] + 2)
# after a move, location is wrong, so remove from call_data
if 'location' in call_data:
del call_data['location']
if 'part' in call_data:
del call_data['part']
if 'part_top' in call_data:
del call_data['part_top']
if 'part_loc' in call_data:
del call_data['part_loc']
# move the item
if part_tuple.section_name:
# move the part in a section, using skilift.editsection.move_location(project, section_name, schange, from_location, to_location)
call_data['schange'] = editsection.move_location(part_tuple.project, part_tuple.section_name, call_data['schange'], location, (widget_name, container, new_location_integers))
domcontents, dragrows, droprows = _container_domcontents(part_tuple.project, None, part_tuple.section_name, widget_name, container)
else:
# move the part in a page, using skilift.editpage.move_location(project, pagenumber, pchange, from_location, to_location)
call_data['pchange'] = editpage.move_location(part_tuple.project, part_tuple.pagenumber, call_data['pchange'], location, (widget_name, container, new_location_integers))
domcontents, dragrows, droprows = _container_domcontents(part_tuple.project, part_tuple.pagenumber, None, widget_name, container)
except ServerError as e:
raise FailPage(message = e.message)
# redraw the table
sd['domtable', 'dragrows'] = dragrows
sd['domtable', 'droprows'] = droprows
sd['domtable', 'contents'] = domcontents
pd.update(sd)
def move_down_right_in_container_dom(skicall):
"Called by domtable to move an item in a container down and to the right"
call_data = skicall.call_data
pd = call_data['pagedata']
sd = SectionData("editdom")
try:
part_tuple = _item_to_move(call_data)
location = part_tuple.location
widget_name = location[0]
container = int(location[1])
location_integers = location[2]
if len(location_integers) == 1:
parent_location = (widget_name, container, ())
else:
parent_integers = list(location_integers[:-1])
parent_location = (widget_name, container, parent_integers)
items_in_parent = len(skilift.part_contents(part_tuple.project, part_tuple.pagenumber, part_tuple.section_name, parent_location))
if location_integers[-1] == (items_in_parent-1):
# At end of a block, cannot be moved
raise FailPage("Cannot be moved down")
new_parent_integers = list(location_integers[:-1])
new_parent_integers.append(location_integers[-1] + 1)
new_parent_location = (location[0], location[1], new_parent_integers)
new_parent_tuple = skilift.part_info(part_tuple.project, part_tuple.pagenumber, part_tuple.section_name, new_parent_location)
if new_parent_tuple is None:
raise FailPage("Cannot be moved down")
if not (new_parent_tuple.part_type == 'Part' or new_parent_tuple.part_type == 'Section'):
raise FailPage("Cannot be moved down")
new_location_integers = tuple(new_parent_integers+[0])
# after a move, location is wrong, so remove from call_data
if 'location' in call_data:
del call_data['location']
if 'part' in call_data:
del call_data['part']
if 'part_top' in call_data:
del call_data['part_top']
if 'part_loc' in call_data:
del call_data['part_loc']
# move the item
if part_tuple.section_name:
# move the part in a section, using skilift.editsection.move_location(project, section_name, schange, from_location, to_location)
call_data['schange'] = editsection.move_location(part_tuple.project, part_tuple.section_name, call_data['schange'], location, (widget_name, container, new_location_integers))
domcontents, dragrows, droprows = _container_domcontents(part_tuple.project, None, part_tuple.section_name, widget_name, container)
else:
# move the part in a page, using skilift.editpage.move_location(project, pagenumber, pchange, from_location, to_location)
call_data['pchange'] = editpage.move_location(part_tuple.project, part_tuple.pagenumber, call_data['pchange'], location, (widget_name, container, new_location_integers))
domcontents, dragrows, droprows = _container_domcontents(part_tuple.project, part_tuple.pagenumber, None, widget_name, container)
except ServerError as e:
raise FailPage(message = e.message)
# redraw the table
sd['domtable', 'dragrows'] = dragrows
sd['domtable', 'droprows'] = droprows
sd['domtable', 'contents'] = domcontents
pd.update(sd)
def move_in_container_dom(skicall):
"Called by domtable to move an item in a container after a drag and drop"
call_data = skicall.call_data
pd = call_data['pagedata']
sd = SectionData("editdom")
if ('editdom', 'domtable', 'dragrows') not in call_data:
raise FailPage(message = "item to drop missing")
editedprojname = call_data['editedprojname']
pagenumber = None
section_name = None
if "page_number" in call_data:
pagenumber = call_data["page_number"]
elif "section_name" in call_data:
section_name = call_data["section_name"]
else:
raise FailPage(message = "No page or section given")
part_to_move = call_data['editdom', 'domtable', 'dragrows']
# so part_to_move is widget name with container and location string of integers
# create location which is a tuple or list consisting of three items:
# a string of widget name
# a container integer
# a tuple or list of location integers
location_list = part_to_move.split('-')
# first item should be a string, rest integers
if len(location_list) < 3:
raise FailPage("Item to move has not been recognised")
try:
widget_name = location_list[0]
container = int(location_list[1])
location_to_move_integers = [ int(i) for i in location_list[2:]]
except Exception:
raise FailPage("Item to move has not been recognised")
# location is a tuple of widget_name, container, tuple of location integers
location_to_move = (widget_name, container, location_to_move_integers)
call_data['container'] = container
call_data['widget_name'] = widget_name
# new target location
target_part = call_data['editdom', 'domtable', 'droprows']
# so target_part is widget name with location string of integers
# create location which is a tuple or list consisting of three items:
# a string of widget name
# a container integer
# a tuple or list of location integers
location_list = target_part.split('-')
# first item should be a string, rest integers
if len(location_list) < 2:
raise FailPage("target of move has not been recognised")
if widget_name != location_list[0]:
raise FailPage("Invalid move, widget name differs")
if container != int(location_list[1]):
raise FailPage("Invalid move, container number differs")
if len(location_list) == 2:
# At the container top row
new_location_integers = [0]
else:
try:
target_location_integers = [ int(i) for i in location_list[2:]]
except Exception:
raise FailPage("Invalid move, location not accepted")
# location is a tuple of widget_name, container, tuple of location integers
target_location = (widget_name, container, target_location_integers)
# get target part_tuple from project, pagenumber, section_name, target_location
target_part_tuple = skilift.part_info(editedprojname, pagenumber, section_name, target_location)
if target_part_tuple is None:
raise FailPage("Target has not been recognised")
if (target_part_tuple.part_type == "Part") or (target_part_tuple.part_type == "Section"):
# insert
if target_location_integers:
new_location_integers = list(target_location_integers)
new_location_integers.append(0)
else:
new_location_integers = [0]
else:
# append
new_location_integers = list(target_location_integers)
new_location_integers[-1] = new_location_integers[-1] + 1
# after a move, location is wrong, so remove from | |
# Copyright International Business Machines Corp, 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import dill
import getopt
import http.client as httplib
import httplib2
import locale
import os
import re
import sys
import urllib
import urllib.request as urllib2
from xml.dom import minidom
from xml.etree import ElementTree as ET
from xml.parsers.expat import ExpatError
TOKEN_FILE = '.lsfpass'
WORK_DIR_NAME = '.lsf_faas'
MULTIPLE_ACCEPT_TYPE = 'text/plain,application/xml,text/xml,multipart/mixed'
ERROR_STRING = 'errMsg'
ERROR_TAG = '<' + ERROR_STRING + '>'
ACTION_STRING = 'actionMsg'
ACTION_TAG = '<' + ACTION_STRING + '>'
SCRIPT_FILE_NAME = 'lsf_faas.py'
OUTPUT_FILE_NAME = 'output.out'
LSF_OUTPUT_FILE_NAME = 'lsf.output'
LSF_ERRPUT_FILE_NAME = 'lsf.errput'
SESSION_LOGOUT = 'Your current login session was logout'
CANNOT_CONNECT_SERVER = 'Cannot connect to the server.'
TOKEN_IS_DELETED = 'Your token is empty or was deleted.'
def checkField(field):
if field != None:
if field.text == None :
field = ''
else:
field = field.text
else:
field='-'
return field
def prepareUpload(upload_files):
cwd = os.getcwd()
files = upload_files.split(',')
paths = ''
p = re.compile(r'^[a-zA-Z]:[/\\][\w\W]+')
totalSize = 0
for f in files:
f.strip()
if len(f) > 0:
if ((os.sep != f[0]) and (p.match(f.lower()) == None)):
f = os.sep.join([cwd ,f])
if not os.path.isfile(f):
return False, 'The specified file does not exist: %s' % f
elif os.access(f, os.R_OK) == 0:
return False, 'No read permission for the file: %s' % f
else:
totalSize += os.path.getsize(f)
paths = paths + f + ','
if len(paths) <= 0:
return False, 'The specified file does not exist: %s '% upload_files
else:
paths = paths[:-1]
if (totalSize > 536870912):
return False, 'Total file size is greater than 500MB. Files cannot be uploaded.'
else:
return True, paths
def getHttp(url, work_dir, timeout=5):
is_https = False
if ( (len(url) != 0) & ('https' in url.lower())):
is_https = True
if is_https == True:
pem_file= os.sep.join([work_dir , 'cacert.pem'])
if os.path.isfile(pem_file):
if timeout is None:
return httplib2.Http(ca_certs = pem_file)
else:
return httplib2.Http(ca_certs = pem_file, timeout = timeout)
else:
raise Exception('The https certificate \'cacert.pem\' is missing. Please copy the \'cacert.pem\' file from the GUI_CONFDIR/https/cacert.pem on the IBM Spectrum Application Center to %s.' % work_dir)
if timeout is None:
return httplib2.Http()
else:
return httplib2.Http(timeout = timeout)
def saveToken(url, token, jtoken, work_dir):
if len(jtoken) > 0:
token = token + ",JSESSIONID=" + jtoken[0].childNodes[0].nodeValue
fpath = os.sep.join([work_dir , TOKEN_FILE])
try:
f = open(fpath, "w")
except IOError as e:
raise Exception('Cannot open file "%s": %s' % (fpath, str(e)))
else:
f.write(url)
f.write('\n')
f.write(token)
f.close()
def getToken(work_dir):
token = ''
url = ''
fpath = os.sep.join([work_dir, TOKEN_FILE])
try:
f = open(fpath, "r")
url_token = f.read().split('\n')
f.close()
url = url_token[0]
token = url_token[1].replace('"', '#quote#')
if len(token) <= 0:
return url, ''
else:
return url, 'platform_token='+token
except IOError:
return url, token
except Exception as e:
return url, token
def removeToken(work_dir):
fpath = os.sep.join([work_dir , TOKEN_FILE])
if (os.path.exists(fpath)):
os.remove(fpath)
def doAction(jobId, action, work_dir):
url, token = getToken(work_dir)
if token == '':
return False, TOKEN_IS_DELETED
try:
http = getHttp(url, work_dir)
except Exception as e:
return False, str(e)
headers = {'Content-Type': 'text/plain', 'Cookie': token, 'Accept': 'application/xml', 'Accept-Language': 'en-us'}
try:
response, content = http.request(url + 'webservice/pacclient/jobOperation/' + action +'/' + jobId, 'GET', headers=headers)
except Exception as e:
return False, CANNOT_CONNECT_SERVER
try:
content = content.decode('utf-8')
except Exception as e:
return False, 'Failed to decode content "%s": %s' % (content, str(e))
try:
if response['status'] == '200':
xdoc = minidom.parseString(content)
if ERROR_TAG in content:
err_tag = xdoc.getElementsByTagName(ERROR_STRING)
return False, err_tag[0].childNodes[0].nodeValue
elif ACTION_TAG in content:
action_tag = xdoc.getElementsByTagName(ACTION_STRING)
return True, action_tag[0].childNodes[0].nodeValue
else:
return False, 'Failed to %s the task' % action
else:
return False, 'Failed to %s the task' % action
except Exception as e:
return False, 'Failed to parse content: %s' % str(e)
def downloadFiles(jobId, destination, files, work_dir, asynchronous = False):
url,token = getToken(work_dir)
if token == '':
return False, TOKEN_IS_DELETED
try:
if asynchronous:
http = getHttp(url, work_dir, timeout = None)
else:
http = getHttp(url, work_dir)
except Exception as e:
return False, str(e)
body = os.path.basename(files)
headers = {'Content-Type': 'text/plain', 'Cookie': token, 'Accept': MULTIPLE_ACCEPT_TYPE, 'Accept-Language': 'en-us'}
try:
response, content = http.request( url + 'webservice/pacclient/file/' + jobId, 'GET', body = body, headers = headers)
except Exception as e:
return False, CANNOT_CONNECT_SERVER
if len(content) <= 0:
if response['status'] == '404':
return False, 'Failed to download the file. The specified file does not exist: ' + body
# when SESSION_LOGOUT, AC also return error code 403. so here no way to get the real reason.
# may be let user logout.
elif response['status'] == '403':
return False, 'Failed to download the file. Permmsin denied: ' + body
else:
return False, 'Failed to download the file: ' + body
else:
try:
content = content.decode('utf-8')
except Exception as e:
try:
# no need decode here
parseDownloadContentBytes(destination, content)
return True, ''
except Exception as e:
return False, 'Failed to parse downloaded content: %s' % str(e)
try:
parseDownloadContentString(destination, content)
return True, ''
except Exception as e:
return False, 'Failed to parse downloaded content: %s' % str(e)
def parseDownloadContentBytes(destination, content):
boundary = content.split(b"\n")[0].strip()
if b'--' not in boundary:
boundary = content.split(b"\n")[1].strip()
file_sections = content.split(boundary)
file_number = len(file_sections) - 1
for sections in file_sections:
# if has Content-ID in this section, it means a file
if b'Content-ID:' in sections:
# get the file name
data_list = sections.split(b"Content-ID: ")
filename = data_list[1][1:data_list[1].index(b">")]
filename = os.path.basename(str(filename,'utf-8'))
fname = os.sep.join([destination , filename])
lengths = len(sections)
start = sections.index(b">") + 5
end = lengths
if file_number > 1:
end = lengths - 2
data = sections[start : end]
try:
f = open(fname,'wb')
f.write(data.decode('utf-8'))
f.close()
except Exception as e:
f = open(fname,'wb')
f.write(data)
f.close()
def parseDownloadContentString(destination, content):
boundary = content.split("\n")[0].strip()
if '--' not in boundary:
boundary = content.split("\n")[1].strip()
file_sections = content.split(boundary)
file_number = len(file_sections) - 1
for sections in file_sections:
# if has Content-ID in this section, it means a file
if 'Content-ID:' in sections:
# get the file name
data_list = sections.split("Content-ID: ")
filename = data_list[1][1:data_list[1].index(">")]
filename = os.path.basename(filename)
fname = os.sep.join([destination , filename])
# get the file content
lengths = len(sections)
start = sections.index(">") + 5
end = lengths
if file_number > 1:
end = lengths - 2
data = sections[start : end]
if OUTPUT_FILE_NAME in fname:
f = open(fname,'wb')
# encode data as it received bytes
orig_bytes = data.encode('utf-8')
f.write(base64.b64decode(orig_bytes))
else:
try:
f = open(fname,'wb')
f.write(data.encode('utf-8'))
except Exception as e:
f = open(fname,'w')
f.write(data)
f.close()
def logonAC(username, password, host, port, isHttps, work_dir):
if isHttps:
url='https://' + host + ':' + str(port) + '/platform/'
else:
url='http://' + host + ':' + str(port) + '/platform/'
password = password.replace("&", "&")
password = password.replace("<", "<")
password = password.replace(">", ">")
try:
http = getHttp(url, work_dir)
except Exception as e:
return False, str(e)
url_check, token = getToken(work_dir)
if ( (url_check != url) | (False == token.startswith("platform_token=" + username + "#quote#")) ):
token = "platform_token="
headers = {'Content-Type': 'application/xml', 'Cookie': token, 'Accept': MULTIPLE_ACCEPT_TYPE, 'Accept-Language': 'en-us'}
body = '<User><name>%s</name> <pass>%s</pass> </User>' % (username, password)
try:
response, content = http.request(url + 'webservice/pacclient/logon/', 'GET', body=body, headers=headers)
except Exception as e:
return False, 'Failed to log on the server "%s": %s' % (host, str(e))
try:
content = content.decode('utf-8')
except Exception as e:
return False, 'Failed to decode the content "%s": %s' % (content, str(e))
if response['status'] == '200':
xdoc = minidom.parseString(content)
tk = xdoc.getElementsByTagName("token")
jtk = xdoc.getElementsByTagName("jtoken")
if len(tk) > 0:
#You have logged on to as: {0} username)
try:
saveToken(url, tk[0].childNodes[0].nodeValue,jtk, work_dir)
except Exception as e:
return False, str(e)
return True, 'You have logged on.'
else:
err_tag = xdoc.getElementsByTagName("errMsg")
return False, err_tag[0].childNodes[0].nodeValue
else:
return False, 'Failed to logon the server "%s".' % host
def logoutAC(work_dir):
url, token = getToken(work_dir)
if token == '':
return False, TOKEN_IS_DELETED
if (len(token) <= 0):
return True,'You are not logged yet.'
try:
http = getHttp(url, work_dir)
except Exception as e:
return False, str(e)
| |
ang)
a2 = abs(limitingAngle - oldAng)
a = a2 / (a1+a2)
self.viewDir = self.viewDir * a + oldViewDir * (1-a)
self.viewDir = self.viewDir.normalize()
# store and limit history
self._viewDirHistory.insert(0,self.viewDir)
self._viewDirHistory[10:] = []
# Do a step in that direction
self.viewDir = self.viewDir.normalize()
stepDir = self.DoStep(self.viewDir)
# what do we visualize?
self.dir = self.viewDir
#self.dir = self.walkDir (not used anymore)
# test if we are ok here...
# There are two thresholds. th1 says below which intensity we
# should start to worry. th2 says below which intensity we can
# be sure it is background. An error measure is calculated
# which indicate where between th2 and th1 the value is now.
# The square of the value is subtracted from a foodsupply.
# when this supply reaches 0, the walker is killed.
# Each time we encounter a sample above th1, the food supply
# is reset to 1.0.
val = self._manager.data.sample(self.pos)
th1, th2 = self._manager.params.th1, self._manager.params.th2
if val < th1:
portion = (th1 - val ) / (th1-th2)
self._foodsupply -= portion**2
if self._foodsupply <= 0:
self.Kill("Ran in too low intensity pixels")
else:
self._foodsupply = 1.0
def DirCom(self, viewdir, normalize=True):
""" Apply the directional center of mass operator.
The result depends on a general view direction
and the resulting (normalized) direction (comDir)
is returned. """
# get data and its scale vector
data = self._manager.data
sam = self._manager.data.sampling
scale = Point(sam[1],sam[0])
# get Gaussian derivative kernels
sigma = self._manager.params.scale
sigma2size = 2
if True:
ky = -self._manager.GetGaussianKernel(sigma, sigma2size, (1,0) )
kx = -self._manager.GetGaussianKernel(sigma, sigma2size, (0,1) )
else:
g = self._manager.GetGaussianKernel(sigma, sigma2size, (0,0) )
c = [(i-1)/2 for i in g.shape]
kx = np.zeros(g.shape,dtype=np.float32)
ky = np.zeros(g.shape,dtype=np.float32)
kx[:,:c[1]], kx[:,c[1]+1:] = -1, 1
ky[:c[0],:], ky[c[0]+1:,:] = -1, 1
kx, ky = kx*g/scale[1], ky*g/scale[0]
# calculate sze's
szes = [(s-1)/2 for s in kx.shape]
sze_y, sze_x = szes[0], szes[1]
# get patch
patch = self.GetPatch( tuple(szes) )
if patch is None:
self.Kill("Out of bounds in getting patch for dirCom.")
return
# normalize patch (required because kw is asymetric)
#patch = patch - self._manager.params.th2
#patch = patch - patch.min()
patch = patch - sp.ndimage.filters.minimum_filter(patch,3)
# get weighting kernel
kw = self._manager.GetWeightKernel(szes, viewdir)
# apply kernels
dx = patch * kx * kw
dy = patch * ky * kw
# get center-of-mass and store direction
# com is initially in voxel coordinates and
# should be scaled to transform to world coordinates.
# But not if the gaussian kernels are scaled..
com = Point(dx.sum(), dy.sum())
if com.norm()==0:
com = viewDir
dir = (com/scale)
if normalize:
dir = dir.normalize()
# store stuff for debugging...
self._kw = Aarray(kw, self._manager.data.sampling)
self._patch = Aarray(patch, self._manager.data.sampling)
self._dx = Aarray(dx, self._manager.data.sampling)
self._dy = Aarray(dy, self._manager.data.sampling)
self._kx = Aarray(kx, self._manager.data.sampling)
self._ky = Aarray(ky, self._manager.data.sampling)
return dir
def DoStep(self, dir):
""" Do a step in the direction pointed to by dir.
Taking into account pixel values.
Returns the vector representing the direction in which we
stepped.
"""
# get data and its scale vector
data = self._manager.data
sam = self._manager.data.sampling
scale = Point(sam[1],sam[0])
# create list of candidates
candidates = candidates2.copy()
if self._manager.params.testAngleLimit:
# select best candidate
th1, th2 = self._manager.params.th1, self._manager.params.th2
iy, ix = data.point_to_index(self.pos)
bestp, bestval = Point(0,0), -99999 # th
for p in candidates:
val = ( data[ iy+p.y, ix+p.x ] - th2 ) / (th1-th2)
val = max(val,0) * np.cos(dir.angle(p))
if val > bestval:
bestval = val
bestp = p
elif False:
# use patch intensities, as they have been normalized
# can jump between two values
bestp, bestval = Point(0,0), -999 # th
patch = self._patch
iy, ix = (patch.shape[0]-1)/2, (patch.shape[1]-1)/2
for p in candidates:
val = patch[ iy+p.y, ix+p.x ]
val = val * (np.cos(dir.angle(p))+0.5)
if val > bestval:
bestval = val
bestp = p
else:
# Select best candidate. To make sure that we cannot go back to
# the previous pixel, we use limitingAngle to determine the
# candidate Angle.
# 2*candidateAng + limitingAngle < 180
# candidateAng < 90 - limitingAngle/2
iy, ix = data.point_to_index(self.pos)
bestp, bestval = Point(0,0), -99999 # th
candidateAng = 89.0 - self._manager.params.limitingAngle/2.0
candidateAng *= np.pi / 180 # make radians
for p in candidates:
if abs( dir.angle(p) ) > candidateAng:
continue
val = data[ iy+p.y, ix+p.x ]
if val > bestval:
bestval = val
bestp = p
# now go there (keep sampling into account)...
bestp = bestp * scale
self.SetPos( self.pos + bestp)
# return step vector (stepdir)
return bestp
def DrawMore(self, f):
# make current
vv.figure(f.nr)
if not hasattr(self, '_patch') or not hasattr(self, '_kw'):
return
vv.subplot(311)
vv.imshow(self._patch)
vv.subplot(312)
vv.imshow(self._kw)
vv.subplot(313)
vv.imshow(self._ky)
class DirComWalker3D(BaseWalker3D):
""" A walker that walks based on a direction. It keeps
walking more or less in that direction. By taking small steps
we prevent it from going of "track" (the stent).
"""
def __init__(self, manager, p):
BaseWalker3D.__init__(self, manager, p)
self._foodsupply = 1.0
def GoToMaxInRegion(self, region):
""" Overloaded version. Calls the original method and
then determines an initial orientation. """
BaseWalker3D.GoToMaxInRegion(self, region)
# init dir
for i in range(5):
comdir = self.DirCom(self.viewDir)
if comdir is not None:
self.viewDir = comdir
# spawn walker in opposite direction
spawn = DirComWalker3D(self._manager, self.pos)
spawn.viewDir = self.viewDir*-1
def Move(self):
# "inherit" from walker2D
DirComWalker2D.Move.im_func(self)
def DirCom(self, viewdir):
# get data and its scale vector
data = self._manager.data
sam = self._manager.data.sampling
scale = Point(sam[2],sam[1],sam[0])
# get (anisotropic) Gaussian derivative kernels
sigma = self._manager.params.scale
sigma2size = 2
kz = -self._manager.GetGaussianKernel(sigma, sigma2size, (1,0,0) )
ky = -self._manager.GetGaussianKernel(sigma, sigma2size, (0,1,0) )
kx = -self._manager.GetGaussianKernel(sigma, sigma2size, (0,0,1) )
# normalize kernels (if not commented remove scaling below)
#kz, ky, kx = kz / kz.max(), ky / ky.max(), kx / kx.max()
# calculate sze's
szes = [(s-1)/2 for s in kx.shape]
sze_z, sze_y, sze_x = szes[0], szes[1], szes[2]
# get patch
patch = self.GetPatch( tuple(szes) )
if patch is None:
self.Kill("Out of bounds in getting patch for dirCom.")
return
# normalize patch (required because kw is asymetric)
#patch = patch - patch.min()
patch = patch - sp.ndimage.filters.minimum_filter(patch,3)
# get weighting kernel
kw = self._manager.GetWeightKernel(szes, viewdir)
# apply kernels
dx = patch * kx * kw
dy = patch * ky * kw
dz = patch * kz * kw
# get center-of-mass and store direction
# com is initially in voxel coordinates and
# should be scaled to transform to world coordinates.
# But not if the gaussian kernels are scaled..
com = Point(dx.sum(), dy.sum(), dz.sum())
if com.norm()==0:
com = viewDir
dir = (com/scale).normalize()
# store stuff for inspection...
self._kw = Aarray(kw, self._manager.data.sampling)
self._patch = Aarray(patch, self._manager.data.sampling)
self._patch2 = Aarray(patch*kw, self._manager.data.sampling)
self._com = com
return dir
def DoStep(self, dir):
# get data and its scale vector
data = self._manager.data
sam = self._manager.data.sampling
scale = Point(sam[2],sam[1],sam[0])
# create list of candidates
# represent position change in voxels
candidates = candidates3.copy()
# Select best candidate. To make sure that we cannot go back to
# the previous pixel, we use limitingAngle to determine the
# candidate Angle.
# 2*candidateAng + limitingAngle < 180
# candidateAng < 90 - limitingAngle/2
iz, iy, ix = data.point_to_index(self.pos)
bestp, bestval = Point(0,0,0), -99999 # th
candidateAng = 89.0 - self._manager.params.limitingAngle/2.0
candidateAng *= np.pi / 180 # make radians
for p in candidates:
if abs( dir.angle(p) ) > candidateAng:
continue
val = data[ iz+p.z, iy+p.y, ix+p.x ]
if val > bestval:
bestval = val
bestp | |
<filename>messy_pypi/done/main_terminalsnake.py
#!/usr/bin/env python3
import fcntl
import signal
import sys
import threading
import os
import termios
import time
import tty
import types
from select import select
from random import randint
# ["BORDURE", ["Snake directions", ,,,,,], "Apple", "RΓ©actualise snake directions"]
Themes = {
"Normal": ["\u2588", ['β', 'β', 'β', 'β'], "X", "\u00B7"],
"Full": ["=", ["\u2588", "\u2588", "\u2588", "\u2588"], "\u2600", "?"],
"Custom": ["\u2588", ['β', 'β', 'β', 'β'], "X", "\u00B7"],
}
# TODO:
# - Windows Adaptations
# - Cleanup
# --- BUGS
# - Bug: Apple spawn inside snake
# - Fix bug: redessiner la queue quand le menu s'enlève
# --- NOT URGENT
# - Keyboard Option
# - CustomTheme
# - Theme Head
# - --help menu
# - Save Config
def sigint_quit(s, f):
exit_event.set()
def clean_quit(errcode: int = 0):
exit_event.set()
print("Fin du programme")
Key.stop()
Draw.stop()
raise SystemExit(errcode)
escape = {
"\n": "enter",
("\x7f", "\x08"): "backspace",
("[A", "OA"): "up",
("[B", "OB"): "down",
("[D", "OD"): "left",
("[C", "OC"): "right",
"[2~": "insert",
"[3~": "delete",
"[H": "home",
"[F": "end",
"[5~": "page_up",
"[6~": "page_down",
"\t": "tab",
"[Z": "shift_tab",
"OP": "f1",
"OQ": "f2",
"OR": "f3",
"OS": "f4",
"[15": "f5",
"[17": "f6",
"[18": "f7",
"[19": "f8",
"[20": "f9",
"[21": "f10",
"[23": "f11",
"[24": "f12"
}
mouse_state = {
# Changer le cls.snake_pos[1]regex si supΓ©rieur a la key \033[<100;: passer le {1,2} Γ {1,3}ou+
# mouse_.._click
"\033[<0;": "mouse_left_click",
"\033[<1;": "mouse_middle_click",
"\033[<2;": "mouse_right_click",
# mouse_..alt_click
"\033[<8;": "mouse_left_alt_click",
"\033[<9;": "mouse_left_alt_click",
"\033[<10;": "mouse_left_alt_click",
# mouse_..ctrl_click
"\033[<16;": "mouse_left_ctrl_click",
"\033[<17;": "mouse_middle_ctrl_click",
"\033[<18;": "mouse_right_ctrl_click",
# mouse_..altctrl_click
"\033[<24;": "mouse_left_ctrlalt_click",
"\033[<25;": "mouse_middle_ctrlalt_click",
"\033[<26;": "mouse_right_ctrlalt_click",
# mouse_drag_.._click
"\033[<32;": "mouse_drag_left_click",
"\033[<33;": "mouse_drag_middle_click",
"\033[<34;": "mouse_drad_right_click",
# mouse_drag_..alt_click
"\033[<40;": "mouse_left_alt_click",
"\033[<41;": "mouse_left_alt_click",
"\033[<42;": "mouse_left_alt_click",
# mouse_drag_..ctrl_click
"\033[<48;": "mouse_left_ctrl_click",
"\033[<49;": "mouse_middle_ctrl_click",
"\033[<50;": "mouse_right_ctrl_click",
# mouse_drag_..ctrlalt_click
"\033[<56;": "mouse_left_ctrlalt_click",
"\033[<57;": "mouse_middle_ctrlalt_click",
"\033[<58;": "mouse_right_ctrlalt_click",
# mouse_scroll..
"\033[<64;": "mouse_scroll_up",
"\033[<65;": "mouse_scroll_down",
# mouse_scroll_alt..
"\033[<72;": "mouse_scroll_alt_up",
"\033[<73;": "mouse_scroll_alt_down",
# mouse_scroll_ctrl..
"\033[<80;": "mouse_scroll_ctrl_up",
"\033[<81;": "mouse_scroll_ctrl_down",
# mouse_scroll_ctrlalt..
"\033[<88;": "mouse_scroll_ctrl_up",
"\033[<89;": "mouse_scroll_ctrl_down",
}
class Actions:
# mouse_pos=mouse_pos, click_state=click_state, clean_key=clean_key ,input_save=input_save
# Pos mouse type: (x, y), up or down , key du type: escape ou mouse_..., key du type: \033[..
dico_actions = {}
@classmethod
def set_action(cls):
cls.dico_actions = {
"z": cls.change_direction,
"s": cls.change_direction,
"d": cls.change_direction,
"q": cls.change_direction,
"\x1b[A": cls.change_direction,
"\x1b[B": cls.change_direction,
"\x1b[C": cls.change_direction,
"\x1b[D": cls.change_direction,
"m": Draw.show_menu,
"escape": Draw.show_menu,
"r": Draw.restart,
}
@classmethod
def change_option_dead(cls, **kwargs):
if kwargs["clean_key"] in ["z", "\x1b[A"]:
Draw.dead_option_number = (Draw.dead_option_number - 1)
if kwargs["clean_key"] in ["s", "\x1b[B"]:
Draw.dead_option_number = (Draw.dead_option_number + 1)
Draw.dead_option_number %= 2 # CAR y a 2 options Quit, Restart
Draw.draw_dead_options()
@classmethod
def set_dead_action(cls):
cls.dico_actions = {
"r": Draw.restart, # restart
"escape": Draw.show_menu, # restart
"z": cls.change_option_dead,
"\x1b[A": cls.change_option_dead,
"s": cls.change_option_dead,
"\x1b[B": cls.change_option_dead,
"\n": cls.do_dead_option_action,
}
@classmethod
def do_dead_option_action(cls, **kwargs):
if Draw.dead_option_number == 0:
Draw.restart()
if Draw.dead_option_number == 1:
sigint_quit(0, None)
@classmethod
def set_menu_action(cls):
cls.dico_actions = {
"r": Draw.restart,
"m": Draw.show_menu,
"escape": Draw.show_menu,
"z": cls.change_option_menu,
"\x1b[A": cls.change_option_menu,
"s": cls.change_option_menu,
"\x1b[B": cls.change_option_menu,
"\n": cls.do_option_action,
"q": cls.do_option_action,
"\x1b[D": cls.do_option_action,
"d": cls.do_option_action,
"\x1b[C": cls.do_option_action,
}
@classmethod
def change_direction(cls, **kwargs):
directions = {
"z": 1,
"\x1b[A": 1,
"q": 0,
"\x1b[D": 0,
"d": 2,
"\x1b[C": 2,
"s": 3,
"\x1b[B": 3,
}
if not Draw.lock:
if directions[kwargs["clean_key"]] % 2 != Draw.facing % 2:
Draw.facing = directions[kwargs["clean_key"]]
Draw.lock = True
@classmethod
def change_option_menu(cls, **kwargs):
if kwargs["clean_key"] in ["z", "\x1b[A"]:
Draw.option_number = (Draw.option_number - 1)
if kwargs["clean_key"] in ["s", "\x1b[B"]:
Draw.option_number = (Draw.option_number + 1)
Draw.option_number %= len(Draw.menu_options)
Draw.draw_options()
@classmethod
def do_option_action(cls, **kwargs):
option = tuple(Draw.menu_options.keys())[Draw.option_number]
func = Draw.menu_options[option]
if isinstance(func, types.FunctionType) and kwargs["clean_key"] == "\n":
if option == "Quit":
func(0, None)
else:
func()
elif isinstance(func, list):
if kwargs["clean_key"] in ["q", "\x1b[C", "\n"]:
func[0] += 1
elif kwargs["clean_key"] in ["d", "\x1b[D"]:
func[0] -= 1
func[0] = func[0] % len(func[1])
# TODO Actualiser la valeur
# Avec FPS = func[1][func[0]
Draw.draw_options()
def game_restart(**kwargs):
Draw.menu = False
Draw.snake_pos = [(Draw.size // 2, Draw.size // 2)]
Draw.facing = 0 # 0 right, 1: up, 2: left 3: down
Draw.snake_long = 10
# back position -> Head
Draw.points = 0
Draw.draw_box()
Draw.set_a_apple()
Draw.dead = False
Actions.set_action()
def show_shortcuts(): # TODO ou a remove
pass
def quit_menu():
Draw.show_menu()
class Draw:
lock = False # Pour Γ©viter le double action sur la meme frame
menu = False
dead = False
size = 32 + 2 # 2 pour les bordures
facing = 0 # 0 right, 1: up, 2: left 3: down
snake_long = 10
# back position -> Head
snake_pos = [(size // 2, size // 2)]
random_pos = ()
points = 0
logo_menu = (
"βββ β β ββββ β β ββββ",
"β ββ β β β β β β ",
"βββ β β β ββββ ββ βββ ",
" β β ββ β β β β β ",
"βββ β β β β β β ββββ",
)
@classmethod
def restart(cls, **kwargs):
cls.menu = False
cls.snake_pos = [(cls.size // 2, cls.size // 2)]
cls.facing = 0 # 0 right, 1: up, 2: left 3: down
cls.snake_long = 10
# back position -> Head
cls.points = 0
cls.draw_box()
cls.dead = False
Actions.set_action()
cls.set_a_apple()
@classmethod
def show_menu(cls, **kwargs):
if cls.menu:
cls.menu = False
Actions.set_action()
# print("\033[2J\033[1;1H") # CLEAR SCREEN
cls.draw_box()
print(f"\033[{cls.random_pos[1] + 1};{cls.random_pos[0] + 1}H{Themes[cls.current_theme][2]}")
cls.redraw_queue()
else:
cls.menu = True
Actions.set_menu_action()
for i in range(len(cls.logo_menu)):
print(f"\033[{i + 5};5H{cls.logo_menu[i]}")
cls.option_number = 0
cls.draw_options()
menu_options = {
# OPTION: [Curent_option(Default_option), [selectable options]]
# "FPS": [0, [10, 15, 24, 30, 60, 120]],
"Speed": [1, [.03, .05, .1, .3, .5, 1]],
"Size": [1, [16 + 2, 32 + 2, 64 + 2]], # NEED TO RESTART
"Themes": [0, ["Normal", "Full"]], #, "Custom"]],
# "Show Shortcut": show_shortcuts,
"Continue": quit_menu,
"Restart": game_restart,
"Quit": sigint_quit,
}
option_number: int = 0
dead_option_number: int = 0
speed: float = menu_options["Speed"][1][menu_options["Speed"][0]]
current_theme: str = str(menu_options["Themes"][1][menu_options["Themes"][0]])
dead_options: tuple = ("Restart", "Quit")
@classmethod
def set_a_apple(cls):
pos_of_point = randint(1, (cls.size - 2) ** 2 - cls.snake_long)
current_point = 0
for i in range(1, cls.size - 1):
for j in range(1, cls.size - 1):
if (i, j) in cls.snake_pos:
pass
else:
current_point += 1
if current_point == pos_of_point:
cls.random_pos = (i, j)
print(f"\033[{cls.random_pos[1] + 1};{cls.random_pos[0] + 1}H{Themes[cls.current_theme][2]}")
@classmethod
def draw_options(cls):
for i in range(len(cls.menu_options.keys())):
# : β {func[1][func[0]]} β
if cls.option_number == i:
message = f"\033[33m\033[{i * 2 + 11};8H{tuple(cls.menu_options.keys())[i]}\033[0m"
else:
message = f"\033[{i * 2 + 11};8H{tuple(cls.menu_options.keys())[i]}"
option = tuple(Draw.menu_options.keys())[i]
func = Draw.menu_options[option]
# replace func[1] by menu_option
if isinstance(func, list):
message += f": β {func[1][func[0]]} β "
print(message)
if option == "Themes":
Draw.current_theme = func[1][func[0]]
if option == "Size":
Draw.size = func[1][func[0]]
# Restart
if option == "Speed":
Draw.speed = func[1][func[0]]
@classmethod
def draw_dead_options(cls):
cls.logo_dead = (
"ββββ ββββ β β βββ ββββ β β βββ βββ ",
"β β β ββ ββ β β β β β β β β ",
"β ββ ββββ β β β ββ β β β β ββ ββ ",
"β β β β β β β β β β β β β β ",
"ββββ β β β β βββ ββββ β βββ β β ",
)
for j in range(len(cls.logo_dead)):
print(f"\033[{j + 5};5H{cls.logo_dead[j]}")
for i in range(2):
# Draw Gameover
# : β {func[1][func[0]]}
if cls.dead_option_number == i:
message = f"\033[33m\033[{i * 2 + 11};8H{cls.dead_options[i]}\033[0m"
else:
message = f"\033[{i * 2 + 11};8H{cls.dead_options[i]}"
print(message)
@classmethod
def draw_box(cls):
print("\033[2J\033[1;1H") # CLEAR SCREEN
print(f"\033[1;1H" + Themes[cls.current_theme][0] * cls.size)
print(f"\033[{cls.size};1H" + Themes[cls.current_theme][0] * cls.size)
for i in range(2, cls.size):
print(f"\033[{i};1H{Themes[cls.current_theme][0]}")
print(f"\033[{i};{cls.size}H{Themes[cls.current_theme][0]}")
@classmethod
def redraw_queue(cls):
for i, j in cls.snake_pos:
print(f"\033[{j};{i}H{Themes[cls.current_theme][3]}")
@classmethod
def set_dead(cls):
# Set dead menu option
# Reset points
cls.dead = True
cls.draw_dead_options()
Actions.set_dead_action()
pass
@classmethod
def _do_draw(cls):
cls.draw_box()
cls.set_a_apple()
while not cls.stopping:
if exit_event.is_set():
break
if cls.menu or cls.dead:
pass
else:
# SET CODE HERE: ne pas metre de code bloquant: code qui nΓ©cessite une action de l'utilisateur
# Affiche la tΓͺte du snake
print(f"\033[{cls.snake_pos[-1][1]};{cls.snake_pos[-1][0]}H{Themes[cls.current_theme][1][cls.facing]}")
# DΓ©placement f(facing)
if cls.facing == 0:
cls.snake_pos += [(cls.snake_pos[-1][0] - 1, cls.snake_pos[-1][1])]
elif cls.facing == 1:
cls.snake_pos += [(cls.snake_pos[-1][0], cls.snake_pos[-1][1] - 1)]
elif cls.facing == 2:
cls.snake_pos += [(cls.snake_pos[-1][0] + 1, cls.snake_pos[-1][1])]
elif cls.facing == 3:
cls.snake_pos += [(cls.snake_pos[-1][0], cls.snake_pos[-1][1] + 1)]
if True: # A remove Condition gameover
if 1 < cls.snake_pos[-1][0] <= cls.size - 1 and 1 < cls.snake_pos[-1][1] <= | |
G4(crystal, i, ep_type, functional='Cosine',
Rc=6.5, eta=2, lamBda=1, zeta=1):
"""
Calculate G4 symmetry function.
G4 function is an angular function utilizing the cosine funtion of the
angle theta_ijk centered at atom i.
One can refer to equation 8 in:
<NAME>. (2011). Atom-centered symmetry functions for constructing
high-dimensional neural network potentials.
The Journal of chemical physics, 134(7), 074106.
Parameters
----------
crystal: object
Pymatgen crystal structure object.
i: int
The index of core element.
ep_types: str
The allowed element pair presents in the symmetry function calculation.
functional: str
Cutoff functional. Default is Cosine functional.
Rc: float
Cutoff radius which the symmetry function will be calculated.
Default value is 6.5 as suggested by Behler.
eta: float
The parameter of G4 symmetry function.
lamBda: float
LamBda take values from -1 to +1 shifting the maxima of the cosine
function to 0 to 180 degree.
zeta: float
The angular resolution. Zeta with high values give a narrower range of
the nonzero G4 values. Different zeta values is preferrable for
distribution of angles centered at each reference atom. In some sense,
zeta is illustrated as the eta value.
Returns
-------
G4: float
G4 symmetry value.
"""
# Cutoff functional
if functional == 'Cosine':
func = Cosine(Rc=Rc)
elif functional == 'Polynomial':
func = Polynomial(Rc=Rc)
elif functional == 'TangentH':
func = TangentH(Rc=Rc)
else:
raise NotImplementedError('Unknown cutoff functional: %s' %functional)
# Get core atoms information
Ri = crystal.cart_coords[i]
# Get neighbors information
neighbors = crystal.get_all_neighbors(Rc)
G4 = 0.0
for j in range(len(neighbors[i])-1):
for k in range(j+1, len(neighbors[i])):
n1 = neighbors[i][j][0].species_string
n2 = neighbors[i][k][0].species_string
if (ep_type[0] == n1 and ep_type[1] == n2) or \
(ep_type[1] == n1 and ep_type[0] == n2):
Rj = neighbors[i][j][0].coords
Rk = neighbors[i][k][0].coords
Rij_vector = Rj - Ri
Rij = np.linalg.norm(Rij_vector)
Rik_vector = Rk - Ri
Rik = np.linalg.norm(Rik_vector)
Rjk_vector = Rk - Rj
Rjk = np.linalg.norm(Rjk_vector)
cos_ijk = np.dot(Rij_vector, Rik_vector)/ Rij / Rik
term = (1. + lamBda * cos_ijk) ** zeta
term *= np.exp(-eta *
(Rij ** 2. + Rik ** 2. + Rjk ** 2.) /
Rc ** 2.)
term *= func(Rij) * func(Rik) * func(Rjk)
G4 += term
G4 *= 2. ** (1. - zeta)
return G4
def G4_prime(crystal, i, ep_type, ni, functional='Cosine',
Rc=6.5, eta=2, lamBda=1, zeta=1, p=1, q=0):
"""
Calculate the derivative of the G4 symmetry function.
Parameters
----------
crystal: object
Pymatgen crystal structure object.
i: int
The index of core element.
ep_types: str
The allowed element pair presents in the symmetry function calculation.
ni: array of neighbors information
Neighbors information of the core element.
functional: str
Cutoff functional. Default is Cosine functional.
Rc: float
Cutoff radius which the symmetry function will be calculated.
Default value is 6.5 as suggested by Behler.
eta: float
The parameter of G4 symmetry function.
lamBda: float
LamBda take values from -1 to +1 shifting the maxima of the cosine
function to 0 to 180 degree.
zeta: float
The angular resolution. Zeta with high values give a narrower range of
the nonzero G4 values. Different zeta values is preferrable for
distribution of angles centered at each reference atom. In some sense,
zeta is illustrated as the eta value.
p: int
The atom that the force is acting on.
q: int
Direction of force.
Returns
-------
G4p: float
The derivative of G4 symmetry function.
"""
# Cutoff functional
if functional == 'Cosine':
func = Cosine(Rc=Rc)
elif functional == 'Polynomial':
func = Polynomial(Rc=Rc)
elif functional == 'TangentH':
func = TangentH(Rc=Rc)
else:
raise NotImplementedError('Unknown cutoff functional: %s' %functional)
# Get positions of core atoms
Ri = crystal.cart_coords[i]
counts = range(len(ni))
G4p = 0
for j in counts:
for k in counts[(j+1):]:
n1 = ni[j][0].species_string
n2 = ni[k][0].species_string
if (ep_type[0] == n1 and ep_type[1] == n2) or \
(ep_type[1] == n1 and ep_type[0] == n2):
Rj = ni[j][0].coords
Rk = ni[k][0].coords
Rij_vector = Rj - Ri
Rij = np.linalg.norm(Rij_vector)
Rik_vector = Rk - Ri
Rik = np.linalg.norm(Rik_vector)
Rjk_vector = Rk - Rj
Rjk = np.linalg.norm(Rjk_vector)
cos_ijk = np.dot(Rij_vector, Rik_vector)/ Rij / Rik
dcos_ijk = dcos_dRpq(i, ni[j][2], ni[k][2], Ri, Rj, Rk, p, q)
cutoff = func(Rij) * func(Rik) * func(Rjk)
cutoff_Rik_Rjk = func(Rik) * func(Rjk)
cutoff_Rij_Rjk = func(Rij) * func(Rjk)
cutoff_Rij_Rik = func(Rij) * func(Rik)
dRij = dRab_dRpq(i, ni[j][2], Ri, Rj, p, q)
dRik = dRab_dRpq(i, ni[k][2], Ri, Rk, p, q)
dRjk = dRab_dRpq(ni[j][2], ni[k][2], Rj, Rk, p, q)
cutoff_Rij_derivative = func.derivative(Rij) * dRij
cutoff_Rik_derivative = func.derivative(Rik) * dRik
cutoff_Rjk_derivative = func.derivative(Rjk) * dRjk
lamBda_term = 1. + lamBda * cos_ijk
first_term = lamBda * zeta * dcos_ijk
first_term += (-2. * eta * lamBda_term / (Rc ** 2)) * \
(Rij * dRij + Rik * dRik + Rjk * dRjk)
first_term *= cutoff
second_term = cutoff_Rij_derivative * cutoff_Rik_Rjk + \
cutoff_Rik_derivative * cutoff_Rij_Rjk + \
cutoff_Rjk_derivative * cutoff_Rij_Rik
second_term *= lamBda_term
term = first_term + second_term
term *= lamBda_term ** (zeta - 1.)
term *= np.exp(-eta * (Rij ** 2. + Rik ** 2. + Rjk ** 2.) /
Rc ** 2.)
G4p += term
G4p *= 2. ** (1. - zeta)
return G4p
def G5(crystal, i, ep_type, functional='Cosine',
Rc=6.5, eta=2, lamBda=1, zeta=1):
"""
Calculate G5 symmetry function.
G5 function is also an angular function utilizing the cosine funtion of the
angle theta_ijk centered at atom i. The difference between G5 and G4 is
that G5 does not depend on the Rjk value. Hence, the G5 will generate a
greater value after the summation compared to G4.
One can refer to equation 9 in:
<NAME>. (2011). Atom-centered symmetry functions for constructing
high-dimensional neural network potentials.
The Journal of chemical physics, 134(7), 074106.
Parameters
----------
crystal: object
Pymatgen crystal structure object.
i: int
The index of core element.
ep_types: str
The allowed element pair presents in the symmetry function calculation.
functional: str
Cutoff functional. Default is Cosine functional.
Rc: float
Cutoff radius which the symmetry function will be calculated.
Default value is 6.5 as suggested by Behler.
eta: float
The parameter of G4 symmetry function.
lamBda: float
LamBda take values from -1 to +1 shifting the maxima of the cosine
function to 0 to 180 degree.
zeta: float
The angular resolution. Zeta with high values give a narrower range of
the nonzero G4 values. Different zeta values is preferrable for
distribution of angles centered at each reference atom. In some sense,
zeta is illustrated as the eta value.
Returns
-------
G5: float
G5 symmetry value.
"""
# Cutoff functional
if functional == 'Cosine':
func = Cosine(Rc=Rc)
elif functional == 'Polynomial':
func = Polynomial(Rc=Rc)
elif functional == 'TangentH':
func = TangentH(Rc=Rc)
else:
raise NotImplementedError('Unknown cutoff functional: %s' %functional)
# Get core atoms information
Ri = crystal.cart_coords[i]
# Get neighbors information
neighbors = crystal.get_all_neighbors(Rc)
G5 = 0.0
for j in range(len(neighbors[i])-1):
for k in range(j+1, len(neighbors[i])):
n1 = neighbors[i][j][0]
n2 = neighbors[i][k][0]
if (ep_type[0] == n1.species_string \
and ep_type[1] == n2.species_string) or \
(ep_type[1] == n1.species_string and \
ep_type[0] == n2.species_string):
Rij_vector = Ri - n1.coords
Rij = np.linalg.norm(Rij_vector)
Rik_vector = Ri - n2.coords
Rik = np.linalg.norm(Rik_vector)
cos_ijk = np.dot(Rij_vector, Rik_vector)/ Rij / Rik
term = (1. + lamBda * cos_ijk) ** zeta
term *= np.exp(-eta *
(Rij ** 2. + Rik ** 2.) / Rc ** 2.)
term *= func(Rij) * func(Rik)
G5 += term
G5 *= 2. ** (1. - zeta)
return G5
def G5_prime(crystal, i, ep_type, ni, functional='Cosine',
Rc=6.5, eta=2, lamBda=1, zeta=1, p=1, q=0):
"""
Calculate the derivative of the G5 symmetry function.
Parameters
----------
crystal: object
Pymatgen crystal structure object.
i: int
The index of core element.
ep_types: str
The allowed | |
#How to run this:
#Python libraries needed to run this file: Flask, Git Python, SQLAlchemy
#You will need to have Git installed, and it will need to be in your path.
#For example, on Windows you should be able to run a command like 'git pull' from the
#ordinary Windows command prompt and not just from Git Bash.
#You will need a MySQL server with the MSR14 datasource or other GHTorrent database with the same schema.
#Edit the line in this code that says db = sqlalchemy.create_engine to match your username:password@hostname:port/database.
#This file is hardcoded to download the ghdata repository.
#Since it is a preliminary example, each time it runs,
#it deletes the local ghdata repo and re-downloads it (though this might not be a good option for the future).
#Because of this: if you have a folder named ghdata whose contents you do not want deleted,
#do not place this file in the same folder as your ghdata folder.
#to run this, type "python pythonBlameHistoryTree.py" into the command prompt
#You will see some output about running on 127.0.0.1:5000 in the command prompt
#Open a web browser and navigate to 127.0.0.1:5000.
#This page will load for quite a while. At least several minutes is expected.
#You can see it is still running due to the testing output in the command prompt Outer loop: commit# Inner loop: commit#
#When the testing output stops running you should see some output in the browser tab.
#the output shows the commit number and date, the total lines of code and other files (for example, the readme)
#and the percentage written by each organization.
#expected output for ghdata should show only the spdx-tools organization (Matt is a member)
#Number of lines corresponds to the lines written by Matt.
#You can see that earlier commits are lower on the page, and chronologically later ones appear higher up.
#An "error" I expect us to encounter when testing other repos:
#The way my sql query works right now, a user can be a member of multiple organizations.
#For a simple case of expected output problems:
#User1 wrote the entire repository (100%)
#User1 is a member of Microsoft and IBM
#Microsoft wrote 100% of the repository. IBM also wrote 100% of the repository for a total of 200%
#Other issues:
#If a user does not have both an email and organization available in GHTorrent database,
#the user will not be counted towards any organization.
#Future changes planned for this file:
#Code cleanup for better readability
#Code commenting for each portion
#Thorough testing for various potential cases we might encounter
#Deciding for certain how to decide whether a user is a member of an organization
#A better method of dealing with local repository rather than deleting each time and re-downloading
#Not having the database password directly in the code
#Look into improving code efficiency where possible for faster runtime
from flask import Flask
from git import *
import sqlalchemy
from sqlalchemy import text
import shutil
import os
import stat
import time
app = Flask(__name__)
@app.route("/")
def pythonBlameHistory():
#path is the hardcoded folder for the last download of ghdata
repo_path = './ghdata'
#We must remove the old ghdata if we want to download a new copy.
#In order to delete it, we must first change the permissions
#To be writable for all files and directories.
#Based on this: http://stackoverflow.com/questions/2853723/whats-the-python-way-for-recursively-setting-file-permissions
if os.path.exists(repo_path):
for root, directories, files in os.walk(repo_path):
for directory in directories:
os.chmod(os.path.join(root, directory), stat.S_IWRITE)
for file in files:
os.chmod(os.path.join(root, file), stat.S_IWRITE)
os.chmod(repo_path, stat.S_IWRITE)
#delete the old ghdata
shutil.rmtree(repo_path)
#connect to the database username:password@hostname:port/databasename
db = sqlalchemy.create_engine('mysql+pymysql://root:password@localhost:3306/msr14')
schema = sqlalchemy.MetaData()
schema.reflect(bind=db)
#Get the ghdata repository from GitHub
repo = Repo.init('ghdata')
origin = repo.create_remote('origin','https://github.com/OSSHealth/ghdata.git')
origin.fetch()
origin.pull(origin.refs[0].remote_head)
#Dictionary to store results of sql queries
#associating emails with organizations.
#Without this dictionary, we would have to repeat
#the same query over and over, which on my local machine
#meant a runtime of over 24 hours (as opposed to several minutes using the dictionary)
orgs_associated_with_user = {}
#This dictionary keeps track of the lines written per organization for a single file.
lines_per_organization_per_file = {}
#This is the total number of lines in a single file
total_lines_in_file = 0
#this is used later to hold percentage results for output
percentage = 0
#This is the total number of lines in an entire repo
total_lines_in_repo = 0
#This dictionary keeps track of the lines written per organization for the entire repo.
lines_per_organization_entire_repo = {}
#The output string will be displayed to the screen once everything is done running.
outputString = ""
#Outer loop: loop through each commit in the master branch.
#This corresponds to the history of commits over time.
for history_commit in repo.iter_commits('master'):
#Since we want to see the change over time in repo percentage by organization,
#clear the variables for total lines and organization lines for each new commit
#we examine.
lines_per_organization_entire_repo = {}
total_lines_in_repo = 0
#Testing output: only purpose is to show you it's still running :)
print("Outer loop: " + str(history_commit))
#Now loop through every file in the repo.
#You cannot use the os library file/directory loop for this part.
#(as was used above to change file permissions)
#That is because some files do not exist in every commit.
#You must loop through the commit tree, not the ghdata directory.
for file_in_repo in history_commit.tree.traverse():
#For each file, we want to clear out the total lines and organization totals per file.
#That's because we're starting over with a new file.
lines_per_organization_per_file = {}
total_lines_in_file = 0
#Files are of the blob type. This if statement prevents us from trying
#to examine 'lines' in a directory.
if file_in_repo.type == 'blob':
#Now for each file, perform git blame. This will traverse
#the lines in the file.
#You can see there are now two variables of type commit:
#history_commit and blame_commit (will improve variable naming in a future update)
#history_commit is the commit with respect to the overall repo history.
#blame_commit is the commit in which this line was most recently changed
#as obtained through git blame. We use the "blame_commit" variable
#to obtain the author of the commit for when the lines were last changed.
for blame_commit, lines in repo.blame(history_commit, file_in_repo.path):
#Git blame does not always return one line at a time.
#Sometimes we are returned several lines committed by the same author.
#In that case, we must count how many lines there are or our
#total will not match the actual file.
blameLineCount = 0
for line in lines:
#increment lines to later attribute to an organization.
blameLineCount += 1
#increment lines in the file as a whole
total_lines_in_file += 1
#Testing output: only shows that things are still running.
print("Inner loop: " + str(blame_commit))
#Get the email address of the author of this commit.
#If we already have it in our dictionary, increase the total
#lines for the associated organization by blameLineCount
if blame_commit.author.email in orgs_associated_with_user:
for organization in orgs_associated_with_user[blame_commit.author.email]:
if organization not in lines_per_organization_per_file:
lines_per_organization_per_file[organization] = blameLineCount
else:
lines_per_organization_per_file[organization] += blameLineCount
#If the email address is not in our dictionary, we must query
#the database to get any associated organizations.
else:
sql = text('select orgUser.login as org_name '
'from users as thisUser join organization_members '
'on organization_members.user_id = thisUser.id '
'join users as orgUser on organization_members.org_id = orgUser.id '
'where thisUser.email = "' + blame_commit.author.email + '"')
result = db.engine.execute(sql)
#add the email to the dictionary
orgs_associated_with_user[blame_commit.author.email] = []
#if there are organizations in the result, associate those organizations with the
#user email in the dictionary.
#Then, set or add blameLineCount to the organization total.
for organization_row in result:
orgs_associated_with_user[blame_commit.author.email] = orgs_associated_with_user[blame_commit.author.email] + [organization_row[0]]
if organization_row[0] not in lines_per_organization_per_file:
lines_per_organization_per_file[organization_row[0]] = blameLineCount
else:
lines_per_organization_per_file[organization_row[0]] += blameLineCount
#If there is at least one line in this file
if total_lines_in_file > 0:
#Add the total lines in this file to the total lines in the repo.
total_lines_in_repo += total_lines_in_file
#Loop through the organization total lines for this file.
#Add each organization to the repo's organization total | |
value is not None and not isinstance(value, bool):
raise BadValueError('Property %s must be a boolean, not a %s'
% (self.name, type(value).__name__))
return value
def convert(self, value):
if not value:
return False
if value in ['1', 'True', 'true', True]:
return True
else:
return False
class ReferenceProperty(Property):
"""A property that represents a many-to-one reference to another model.
"""
data_type = int
field_class = PKCLASS()
type_name = 'Reference'
def __init__(self, reference_class=None, label=None, collection_name=None,
reference_fieldname=None, required=False, engine_name=None, **attrs):
"""Construct ReferenceProperty.
Args:
reference_class: Which model class this property references.
verbose_name or label: User friendly name of property.
collection_name: If provided, alternate name of collection on
reference_class to store back references. Use this to allow
a Model to have multiple fields which refer to the same class.
reference_fieldname used to specify which fieldname of reference_class
should be referenced
"""
super(ReferenceProperty, self).__init__(label, **attrs)
self._collection_name = collection_name
if reference_class and isinstance(reference_class, type) and issubclass(reference_class, Model):
self.reference_fieldname = reference_fieldname or reference_class._primary_field
else:
self.reference_fieldname = reference_fieldname
self.required = required
self.engine_name = engine_name
self.reference_class = reference_class
if __lazy_model_init__:
if inspect.isclass(self.reference_class) and issubclass(self.reference_class, Model):
warnings.simplefilter('default')
warnings.warn("Reference Model should be a string type, but [%s] model class found." % self.reference_class.__name__, DeprecationWarning)
def create(self, cls):
global __nullable__
args = self.kwargs.copy()
args['key'] = self.name
# if not callable(self.default):
# args['default'] = self.default
args['primary_key'] = self.kwargs.get('primary_key', False)
args['autoincrement'] = self.kwargs.get('autoincrement', False)
args['index'] = self.kwargs.get('index', False)
args['unique'] = self.kwargs.get('unique', False)
args['nullable'] = self.kwargs.get('nullable', __nullable__)
f_type = self._create_type()
if __server_default__:
#for int or long data_type, it'll automatically set text('0')
if self.data_type in integer_types:
args['server_default'] = text('0')
else:
v = self.reference_field.kwargs.get('server_default')
args['server_default'] = v
return Column(self.fieldname, f_type, **args)
def _create_type(self):
if not hasattr(self.reference_class, self.reference_fieldname):
raise KindError('reference_fieldname is not existed')
self.reference_field = getattr(self.reference_class, self.reference_fieldname)
#process data_type
self.data_type = self.reference_field.data_type
field_class = self.reference_field.field_class
if self.reference_field.max_length:
f_type = field_class(self.reference_field.max_length)
else:
f_type = field_class
return f_type
def __property_config__(self, model_class, property_name):
"""Loads all of the references that point to this model.
"""
super(ReferenceProperty, self).__property_config__(model_class, property_name)
if not (
(isinstance(self.reference_class, type) and issubclass(self.reference_class, Model)) or
self.reference_class is _SELF_REFERENCE or
valid_model(self.reference_class, self.engine_name)):
raise KindError('reference_class %r must be Model or _SELF_REFERENCE or available table name' % self.reference_class)
if self.reference_class is _SELF_REFERENCE or self.reference_class is None:
self.reference_class = model_class
else:
self.reference_class = get_model(self.reference_class, self.engine_name,
signal=False)
self.reference_fieldname = self.reference_fieldname or self.reference_class._primary_field
self.collection_name = self.reference_class.get_collection_name(model_class.tablename, self._collection_name, model_class.tablename)
setattr(self.reference_class, self.collection_name,
_ReverseReferenceProperty(model_class, property_name, self._id_attr_name(), self.collection_name))
def __get__(self, model_instance, model_class):
"""Get reference object.
This method will fetch unresolved entities from the datastore if
they are not already loaded.
Returns:
ReferenceProperty to Model object if property is set, else None.
"""
if model_instance is None:
return self
if hasattr(model_instance, self._attr_name()):
# reference_id = getattr(model_instance, self._attr_name())
reference_id = self.get_lazy(model_instance, self._attr_name(), None)
else:
reference_id = None
if reference_id:
#this will cache the reference object
resolved = getattr(model_instance, self._resolved_attr_name())
if resolved is not None:
return resolved
else:
#change id_field to reference_fieldname
# id_field = self._id_attr_name()
# d = self.reference_class.c[id_field]
d = self.reference_class.c[self.reference_fieldname]
instance = self.reference_class.get(d==reference_id)
if instance is None:
raise NotFound('ReferenceProperty %s failed to be resolved' % self.reference_fieldname, self.reference_class, reference_id)
setattr(model_instance, self._resolved_attr_name(), instance)
return instance
else:
return None
def get_value_for_datastore(self, model_instance):
if not model_instance:
return None
else:
return getattr(model_instance, self._attr_name(), None)
def __set__(self, model_instance, value):
"""Set reference."""
value = self.validate(value)
if value is not None:
if not isinstance(value, Model):
setattr(model_instance, self._attr_name(), value)
setattr(model_instance, self._resolved_attr_name(), None)
else:
setattr(model_instance, self._attr_name(), getattr(value, self.reference_fieldname))
setattr(model_instance, self._resolved_attr_name(), value)
else:
setattr(model_instance, self._attr_name(), None)
setattr(model_instance, self._resolved_attr_name(), None)
def validate(self, value):
"""Validate reference.
Returns:
A valid value.
Raises:
BadValueError for the following reasons:
- Value is not saved.
- Object not of correct model type for reference.
"""
if value == '':
if self.kwargs.get('nullable', __nullable__):
value = None
else:
value = 0
if not isinstance(value, Model):
return super(ReferenceProperty, self).validate(value)
if not value.is_saved():
raise BadValueError(
'%s instance must be saved before it can be stored as a '
'reference' % self.reference_class.__class__.__name__)
if not isinstance(value, self.reference_class):
raise KindError('Property %s must be an instance of %s' %
(self.name, self.reference_class.__class__.__name__))
return value
validate_dump = validate
def _id_attr_name(self):
"""Get attribute of referenced id.
"""
return self.reference_fieldname
def _resolved_attr_name(self):
"""Get attribute of resolved attribute.
The resolved attribute is where the actual loaded reference instance is
stored on the referring model instance.
Returns:
Attribute name of where to store resolved reference model instance.
"""
return '_RESOLVED_' + self._attr_name()
def convert(self, value):
if value == '':
return 0
if value is None:
return value
try:
return self.data_type(value)
except:
print('Error for converting {!r} to {} of property {}'.format(value,
type(self.data_type), self.property_name))
raise
def get_column_type_name(self):
return self.reference_field.get_column_type_name()
Reference = ReferenceProperty
class OneToOne(ReferenceProperty):
type_name = 'OneToOne'
def create(self, cls):
global __nullable__
args = self.kwargs.copy()
args['key'] = self.name
# if not callable(self.default):
# args['default'] = self.default
args['primary_key'] = self.kwargs.get('primary_key', False)
args['autoincrement'] = self.kwargs.get('autoincrement', False)
args['index'] = self.kwargs.get('index', True)
args['unique'] = self.kwargs.get('unique', True)
args['nullable'] = self.kwargs.get('nullable', __nullable__)
f_type = self._create_type()
if __server_default__:
if self.data_type in integer_types:
args['server_default'] = text('0')
else:
args['server_default'] = self.reference_field.kwargs.get('server_default')
return Column(self.fieldname, f_type, **args)
def __property_config__(self, model_class, property_name):
"""Loads all of the references that point to this model.
"""
#Direct invoke super with ReferenceProperty in order to skip the
#ReferenceProperty process, but instead of invode ReferenceProperty's
#parent function
super(ReferenceProperty, self).__property_config__(model_class, property_name)
if not (
(isinstance(self.reference_class, type) and issubclass(self.reference_class, Model)) or
self.reference_class is _SELF_REFERENCE or
valid_model(self.reference_class, self.engine_name)):
raise KindError('reference_class %r must be Model or _SELF_REFERENCE or available table name' % self.reference_class)
if self.reference_class is _SELF_REFERENCE:
self.reference_class = self.data_type = model_class
else:
self.reference_class = get_model(self.reference_class, self.engine_name,
signal=False)
self.reference_fieldname = self.reference_fieldname or self.reference_class._primary_field
self.collection_name = self._collection_name
if self.collection_name is None:
self.collection_name = '%s' % (model_class.tablename)
#enable reenter 2015/10/29
# if hasattr(self.reference_class, self.collection_name):
# raise DuplicatePropertyError('Class %s already has property %s'
# % (self.reference_class.__name__, self.collection_name))
setattr(self.reference_class, self.collection_name,
_OneToOneReverseReferenceProperty(model_class, property_name,
self._id_attr_name(), self.collection_name))
#append to reference_class._onetoone
self.reference_class._onetoone[self.collection_name] = model_class
def get_objs_columns(objs, field=None, model=None):
keys = []
new_objs = []
if isinstance(objs, string_types):
objs = [x for x in objs.split(',')]
for x in objs:
if not x:
continue
if isinstance(x, (tuple, list)):
new_objs.extend(x)
else:
new_objs.append(x)
if model and field:
prop = getattr(model, field)
else:
prop = None
for o in new_objs:
if not isinstance(o, Model):
if prop:
key = prop.validate(o)
else:
key = o
else:
key = o.get_datastore_value(field or o._primary_field)
if key not in keys:
keys.append(key)
return keys
class Result(object):
def __init__(self, model=None, condition=None, *args, **kwargs):
self.model = model
self.condition = condition
self.columns = list(self.model.table.c)
self.funcs = []
self.args = args
self.kwargs = kwargs
self.result = None
self.default_query_flag = True
self._group_by = None
self._having = None
self.distinct_field = None
self._values_flag = False
self._join = []
self._limit = None
self._offset = None
self.connection = model.get_session()
def do_(self, query):
global do_
return do_(query, self.connection)
def get_column(self, model, fieldname):
if isinstance(fieldname, string_types):
if issubclass(model, Model):
v = fieldname.split('.')
if len(v) > 1:
field = get_model(v[0], engine_name=self.model.get_engine_name(),
signal=False).table.c(v[1])
else:
field = model.table.c[fieldname]
else:
field = model.c[fieldname]
else:
field = fieldname
return field
def get_columns(self, model=None, columns=None):
columns = columns or self.columns
model = model or self.model
fields = []
field = None
if self.distinct_field is not None:
field = self.get_column(model, self.distinct_field)
fields.append(func.distinct(field).label(field.name))
for col in columns:
if col is not field:
fields.append(col)
return fields
def get_fields(self):
"""
get property instance according self.columns
"""
columns = self.columns
model = self.model
fields = []
for col in columns:
if isinstance(col, string_types):
v = col.split('.')
if len(v) > 1:
field = get_model(v[0], engine_name=self.model.get_engine_name(),
signal=False).properties(v[1])
else:
field = model.properties[col]
elif isinstance(col, Column):
field = get_model(col.table.name, engine_name=self.model.get_engine_name(),
signal=False).properties[col.name]
else:
field = col
fields.append(field)
return fields
def connect(self, connection):
if connection:
self.connection = connection
return self
use = connect
def all(self):
return self
def empty(self):
"""
return empty query set
"""
return self.filter(false())
def join(self, model, cond, isouter=False):
_join = None
model = get_model(model, engine_name=self.model.get_engine_name(),
signal=False)
if issubclass(model, Model):
# if cond is None:
# for prop in Model.proterties:
# if isinstance(prop, ReferenceProperty) and prop.reference_class is self.model:
# _right = prop.reference_class
# _join = self.model.table.join(_right.table,
# _right.c[prop.reference_fieldname])
# break
# | |
<reponame>bopopescu/SDK<gh_stars>0
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module used by gcloud to communicate with appengine services."""
from __future__ import with_statement
import urllib2
from googlecloudsdk.api_lib.app import logs_requestor
from googlecloudsdk.api_lib.app import util
from googlecloudsdk.api_lib.app import yaml_parsing
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.credentials import devshell as c_devshell
from googlecloudsdk.core.credentials import http
from googlecloudsdk.core.credentials import service_account as c_service_account
from googlecloudsdk.core.credentials import store as c_store
from googlecloudsdk.third_party.appengine.datastore import datastore_index
from googlecloudsdk.third_party.appengine.tools import appengine_rpc_httplib2
from oauth2client.contrib import gce as oauth2client_gce
import yaml
APPCFG_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
# Parameters for reading from the GCE metadata service.
METADATA_BASE = 'http://metadata.google.internal'
SERVICE_ACCOUNT_BASE = ('computeMetadata/v1/instance/service-accounts/default')
RpcServerClass = appengine_rpc_httplib2.HttpRpcServerOAuth2 # pylint: disable=invalid-name
class Error(exceptions.Error):
"""Base exception for the module."""
pass
class UnknownConfigType(Error):
"""An exception for when trying to update a config type we don't know."""
pass
class AppengineClient(object):
"""Client used by gcloud to communicate with appengine services.
Attributes:
server: The appengine server to which requests are sent.
project: The appengine application in use.
oauth2_access_token: An existing OAuth2 access token to use.
oauth2_refresh_token: An existing OAuth2 refresh token to use.
authenticate_service_account: Authenticate using the default service account
for the Google Compute Engine VM in which gcloud is being called.
ignore_bad_certs: Whether to ignore certificate errors when talking to the
server.
"""
def __init__(self, server=None, ignore_bad_certs=False):
self.server = server or 'appengine.google.com'
self.project = properties.VALUES.core.project.Get(required=True)
self.ignore_bad_certs = ignore_bad_certs
# Auth related options
self.oauth2_access_token = None
self.oauth2_refresh_token = None
self.oauth_scopes = APPCFG_SCOPES
self.authenticate_service_account = False
self.client_id = None
self.client_secret = None
account = properties.VALUES.core.account.Get()
# This statement will raise a c_store.Error if there is a problem
# fetching credentials.
credentials = c_store.Load(account=account)
if isinstance(credentials, c_service_account.ServiceAccountCredentials):
self.oauth2_access_token = credentials.access_token
self.client_id = credentials.client_id
self.client_secret = credentials.client_secret
elif isinstance(credentials, c_devshell.DevshellCredentials):
# TODO(user): This passes the access token to use for API calls to
# appcfg which means that commands that are longer than the lifetime
# of the access token may fail - e.g. some long deployments. The proper
# solution is to integrate appcfg closer with the Cloud SDK libraries,
# this code will go away then and the standard credentials flow will be
# used.
self.oauth2_access_token = credentials.access_token
self.client_id = None
self.client_secret = None
elif isinstance(credentials, oauth2client_gce.AppAssertionCredentials):
# If we are on GCE, use the service account
self.authenticate_service_account = True
self.client_id = None
self.client_secret = None
else:
# Otherwise use a stored refresh token
self.oauth2_refresh_token = credentials.refresh_token
self.client_id = credentials.client_id
self.client_secret = credentials.client_secret
def CleanupIndexes(self, index_yaml):
"""Removes unused datastore indexes.
Args:
index_yaml: The parsed yaml file with index data.
"""
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/datastore/index/diff',
app_id=self.project, payload=index_yaml.ToYAML())
unused_new_indexes, notused_indexes = (
datastore_index.ParseMultipleIndexDefinitions(response))
# Get confirmation from user which indexes should be deleted.
deletions = datastore_index.IndexDefinitions(indexes=[])
if notused_indexes.indexes:
for index in notused_indexes.indexes:
msg = ('This index is no longer defined in your index.yaml file.\n{0}'
.format(str(index.ToYAML())))
prompt = 'Do you want to delete this index'
if console_io.PromptContinue(msg, prompt, default=True):
deletions.indexes.append(index)
# Do deletions of confirmed indexes.
if deletions.indexes:
response = rpcserver.Send('/api/datastore/index/delete',
app_id=self.project, payload=deletions.ToYAML())
not_deleted = datastore_index.ParseIndexDefinitions(response)
# Notify the user when indexes are not deleted.
if not_deleted.indexes:
not_deleted_count = len(not_deleted.indexes)
if not_deleted_count == 1:
warning_message = ('An index was not deleted. Most likely this is '
'because it no longer exists.\n\n')
else:
warning_message = ('%d indexes were not deleted. Most likely this '
'is because they no longer exist.\n\n'
% not_deleted_count)
for index in not_deleted.indexes:
warning_message += index.ToYAML()
log.warning(warning_message)
def GetLogs(self, service, version, severity, vhost, include_vhost,
include_all, num_days, end_date, output_file):
"""Get application logs for the given version of the service.
Args:
service: str, The service of the app to fetch logs from.
version: str, The version of the app to fetch logs for.
severity: int, App log severity to request (0-4); None for request logs
only.
vhost: str, The virtual host of log messages to get. None for all hosts.
include_vhost: bool, If true, the virtual host is included in log
messages.
include_all: bool, If true, we add to the log message everything we know
about the request.
num_days: int, Number of days worth of logs to export; 0 for all
available.
end_date: datetime.date, Date object representing last day of logs to
return. If None, today is used.
output_file: Output file name or '-' for standard output.
"""
rpcserver = self._GetRpcServer()
requestor = logs_requestor.LogsRequester(
rpcserver, self.project, service, version, severity, vhost,
include_vhost, include_all)
requestor.DownloadLogs(num_days, end_date, output_file)
def GetLogsAppend(self, service, version, severity, vhost, include_vhost,
include_all, end_date, output_file):
"""Get application logs and append them to an existing file.
Args:
service: str, The service of the app to fetch logs from.
version: str, The version of the app to fetch logs for.
severity: int, App log severity to request (0-4); None for request logs
only.
vhost: str, The virtual host of log messages to get. None for all hosts.
include_vhost: bool, If true, the virtual host is included in log
messages.
include_all: bool, If true, we add to the log message everything we know
about the request.
end_date: datetime.date, Date object representing last day of logs to
return. If None, today is used.
output_file: Output file name or '-' for standard output.
"""
rpcserver = self._GetRpcServer()
requestor = logs_requestor.LogsRequester(
rpcserver, self.project, service, version, severity, vhost,
include_vhost, include_all)
requestor.DownloadLogsAppend(end_date, output_file)
def PrepareVmRuntime(self):
"""Prepare the application for vm runtimes and return state."""
rpcserver = self._GetRpcServer(timeout_max_errors=5)
rpcserver.Send('/api/vms/prepare', app_id=self.project)
def SetManagedByGoogle(self, service, version, instance=None, wait=True):
"""Sets a service version (and optionally an instance) to Google managed.
This will reboot the machine and restore the instance with a fresh runtime.
Args:
service: str, The service to update.
version: str, The version of the service to update.
instance: str, The instance id of a single instance to update.
wait: bool, True to wait until it takes effect.
Returns:
None, if not waiting. If waiting, returns (bool, message) for the last
attempt at checking state.
"""
return self._SetManagedBy(service, version, instance, '/api/vms/lock', wait)
def SetManagedBySelf(self, service, version, instance=None, wait=True):
"""Sets a service version (optionally a single instance) as self managed.
This is the 'break the glass' mode that lets you ssh into the machine and
debug.
Args:
service: str, The service to update.
version: str, The version of the service to update.
instance: str, The instance id of a single instance to update.
wait: bool, True to wait until it takes effect.
Returns:
None, if not waiting. If waiting, returns (bool, message) for the last
attempt at checking state.
"""
return self._SetManagedBy(service, version, instance, '/api/vms/debug',
wait)
def _SetManagedBy(self, service, version, instance, url, wait):
"""Switches a service version between management modes.
Args:
service: str, The service to update.
version: str, The version of the service to update.
instance: str, The instance id of a single instance to update.
url: str, The URL of the API to call to make the update.
wait: bool, True to wait until it takes effect.
Returns:
None, if not waiting. If waiting, returns (bool, message) for the last
attempt at checking state.
"""
rpcserver = self._GetRpcServer()
kwargs = {'app_id': self.project,
'version_match': version,
'module': service}
if instance:
kwargs['instance'] = instance
rpcserver.Send(url, **kwargs)
if wait:
def GetState():
yaml_data = rpcserver.Send(
'/api/vms/debugstate', app_id=self.project, version_match=version,
module=service)
state = yaml.safe_load(yaml_data)
done = state['state'] != 'PENDING'
return (done, state['message'])
def PrintRetryMessage(msg, delay):
log.status.Print('{0}. Will try again in {1} seconds.'
.format(msg, delay))
return util.RetryWithBackoff(GetState, PrintRetryMessage, initial_delay=1,
backoff_factor=2, max_delay=5, max_tries=20)
def StartService(self, service, version):
"""Starts serving a the given version of the service.
This only works if scaling is set to manual.
Args:
service: str, The service to start.
version: str, The version of the service to start.
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.