body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
482480447b9214d8db97e7e53c42a8c83696bd7cc62190e06ccb61207fc7a5ed
@property @pulumi.getter(name='eligibilityResult') def eligibility_result(self) -> Optional['outputs.FailoverSetEligibilityResultResponseResult']: '\n The eligibility result of the failover set, for failover.\n ' return pulumi.get(self, 'eligibility_result')
The eligibility result of the failover set, for failover.
sdk/python/pulumi_azure_native/storsimple/outputs.py
eligibility_result
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='eligibilityResult') def eligibility_result(self) -> Optional['outputs.FailoverSetEligibilityResultResponseResult']: '\n \n ' return pulumi.get(self, 'eligibility_result')
@property @pulumi.getter(name='eligibilityResult') def eligibility_result(self) -> Optional['outputs.FailoverSetEligibilityResultResponseResult']: '\n \n ' return pulumi.get(self, 'eligibility_result')<|docstring|>The eligibility result of the failover set, for failover.<|endoftext|>
8865b3417a4dd26d9b84bf20f06fb6c2a4e74ab670e0f4d5a660a1e164e04c2d
@property @pulumi.getter(name='volumeContainers') def volume_containers(self) -> Optional[Sequence['outputs.VolumeContainerFailoverMetadataResponseResult']]: '\n The list of meta data of volume containers, which are part of the failover set.\n ' return pulumi.get(self, 'volume_containers')
The list of meta data of volume containers, which are part of the failover set.
sdk/python/pulumi_azure_native/storsimple/outputs.py
volume_containers
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='volumeContainers') def volume_containers(self) -> Optional[Sequence['outputs.VolumeContainerFailoverMetadataResponseResult']]: '\n \n ' return pulumi.get(self, 'volume_containers')
@property @pulumi.getter(name='volumeContainers') def volume_containers(self) -> Optional[Sequence['outputs.VolumeContainerFailoverMetadataResponseResult']]: '\n \n ' return pulumi.get(self, 'volume_containers')<|docstring|>The list of meta data of volume containers, which are part of the failover set.<|endoftext|>
9b05f2b7e498acae1d814ca7b52607a11d58dcca3fe19acd24a62e7eae3cc9c7
def __init__(__self__, *, available_local_storage_in_bytes: Optional[float]=None, available_tiered_storage_in_bytes: Optional[float]=None, data_containers_count: Optional[int]=None, device_id: Optional[str]=None, device_location: Optional[str]=None, device_software_version: Optional[str]=None, device_status: Optional[str]=None, eligibility_result: Optional['outputs.TargetEligibilityResultResponseResult']=None, friendly_device_software_version: Optional[str]=None, model_description: Optional[str]=None, volumes_count: Optional[int]=None): "\n Represents the eligibility of a device as a failover target device.\n :param float available_local_storage_in_bytes: The amount of free local storage available on the device in bytes.\n :param float available_tiered_storage_in_bytes: The amount of free tiered storage available for the device in bytes.\n :param int data_containers_count: The count of data containers on the device.\n :param str device_id: The path ID of the device.\n :param str device_location: The geo location (applicable only for cloud appliances) of the device.\n :param str device_software_version: The software version of the device.\n :param str device_status: The status of the device.\n :param 'TargetEligibilityResultResponseArgs' eligibility_result: The eligibility result of the device, as a failover target device.\n :param str friendly_device_software_version: The friendly name for the current version of software on the device.\n :param str model_description: The model number of the device.\n :param int volumes_count: The count of volumes on the device.\n " if (available_local_storage_in_bytes is not None): pulumi.set(__self__, 'available_local_storage_in_bytes', available_local_storage_in_bytes) if (available_tiered_storage_in_bytes is not None): pulumi.set(__self__, 'available_tiered_storage_in_bytes', available_tiered_storage_in_bytes) if (data_containers_count is not None): pulumi.set(__self__, 'data_containers_count', data_containers_count) if (device_id is not None): pulumi.set(__self__, 'device_id', device_id) if (device_location is not None): pulumi.set(__self__, 'device_location', device_location) if (device_software_version is not None): pulumi.set(__self__, 'device_software_version', device_software_version) if (device_status is not None): pulumi.set(__self__, 'device_status', device_status) if (eligibility_result is not None): pulumi.set(__self__, 'eligibility_result', eligibility_result) if (friendly_device_software_version is not None): pulumi.set(__self__, 'friendly_device_software_version', friendly_device_software_version) if (model_description is not None): pulumi.set(__self__, 'model_description', model_description) if (volumes_count is not None): pulumi.set(__self__, 'volumes_count', volumes_count)
Represents the eligibility of a device as a failover target device. :param float available_local_storage_in_bytes: The amount of free local storage available on the device in bytes. :param float available_tiered_storage_in_bytes: The amount of free tiered storage available for the device in bytes. :param int data_containers_count: The count of data containers on the device. :param str device_id: The path ID of the device. :param str device_location: The geo location (applicable only for cloud appliances) of the device. :param str device_software_version: The software version of the device. :param str device_status: The status of the device. :param 'TargetEligibilityResultResponseArgs' eligibility_result: The eligibility result of the device, as a failover target device. :param str friendly_device_software_version: The friendly name for the current version of software on the device. :param str model_description: The model number of the device. :param int volumes_count: The count of volumes on the device.
sdk/python/pulumi_azure_native/storsimple/outputs.py
__init__
pulumi-bot/pulumi-azure-native
31
python
def __init__(__self__, *, available_local_storage_in_bytes: Optional[float]=None, available_tiered_storage_in_bytes: Optional[float]=None, data_containers_count: Optional[int]=None, device_id: Optional[str]=None, device_location: Optional[str]=None, device_software_version: Optional[str]=None, device_status: Optional[str]=None, eligibility_result: Optional['outputs.TargetEligibilityResultResponseResult']=None, friendly_device_software_version: Optional[str]=None, model_description: Optional[str]=None, volumes_count: Optional[int]=None): "\n Represents the eligibility of a device as a failover target device.\n :param float available_local_storage_in_bytes: The amount of free local storage available on the device in bytes.\n :param float available_tiered_storage_in_bytes: The amount of free tiered storage available for the device in bytes.\n :param int data_containers_count: The count of data containers on the device.\n :param str device_id: The path ID of the device.\n :param str device_location: The geo location (applicable only for cloud appliances) of the device.\n :param str device_software_version: The software version of the device.\n :param str device_status: The status of the device.\n :param 'TargetEligibilityResultResponseArgs' eligibility_result: The eligibility result of the device, as a failover target device.\n :param str friendly_device_software_version: The friendly name for the current version of software on the device.\n :param str model_description: The model number of the device.\n :param int volumes_count: The count of volumes on the device.\n " if (available_local_storage_in_bytes is not None): pulumi.set(__self__, 'available_local_storage_in_bytes', available_local_storage_in_bytes) if (available_tiered_storage_in_bytes is not None): pulumi.set(__self__, 'available_tiered_storage_in_bytes', available_tiered_storage_in_bytes) if (data_containers_count is not None): pulumi.set(__self__, 'data_containers_count', data_containers_count) if (device_id is not None): pulumi.set(__self__, 'device_id', device_id) if (device_location is not None): pulumi.set(__self__, 'device_location', device_location) if (device_software_version is not None): pulumi.set(__self__, 'device_software_version', device_software_version) if (device_status is not None): pulumi.set(__self__, 'device_status', device_status) if (eligibility_result is not None): pulumi.set(__self__, 'eligibility_result', eligibility_result) if (friendly_device_software_version is not None): pulumi.set(__self__, 'friendly_device_software_version', friendly_device_software_version) if (model_description is not None): pulumi.set(__self__, 'model_description', model_description) if (volumes_count is not None): pulumi.set(__self__, 'volumes_count', volumes_count)
def __init__(__self__, *, available_local_storage_in_bytes: Optional[float]=None, available_tiered_storage_in_bytes: Optional[float]=None, data_containers_count: Optional[int]=None, device_id: Optional[str]=None, device_location: Optional[str]=None, device_software_version: Optional[str]=None, device_status: Optional[str]=None, eligibility_result: Optional['outputs.TargetEligibilityResultResponseResult']=None, friendly_device_software_version: Optional[str]=None, model_description: Optional[str]=None, volumes_count: Optional[int]=None): "\n Represents the eligibility of a device as a failover target device.\n :param float available_local_storage_in_bytes: The amount of free local storage available on the device in bytes.\n :param float available_tiered_storage_in_bytes: The amount of free tiered storage available for the device in bytes.\n :param int data_containers_count: The count of data containers on the device.\n :param str device_id: The path ID of the device.\n :param str device_location: The geo location (applicable only for cloud appliances) of the device.\n :param str device_software_version: The software version of the device.\n :param str device_status: The status of the device.\n :param 'TargetEligibilityResultResponseArgs' eligibility_result: The eligibility result of the device, as a failover target device.\n :param str friendly_device_software_version: The friendly name for the current version of software on the device.\n :param str model_description: The model number of the device.\n :param int volumes_count: The count of volumes on the device.\n " if (available_local_storage_in_bytes is not None): pulumi.set(__self__, 'available_local_storage_in_bytes', available_local_storage_in_bytes) if (available_tiered_storage_in_bytes is not None): pulumi.set(__self__, 'available_tiered_storage_in_bytes', available_tiered_storage_in_bytes) if (data_containers_count is not None): pulumi.set(__self__, 'data_containers_count', data_containers_count) if (device_id is not None): pulumi.set(__self__, 'device_id', device_id) if (device_location is not None): pulumi.set(__self__, 'device_location', device_location) if (device_software_version is not None): pulumi.set(__self__, 'device_software_version', device_software_version) if (device_status is not None): pulumi.set(__self__, 'device_status', device_status) if (eligibility_result is not None): pulumi.set(__self__, 'eligibility_result', eligibility_result) if (friendly_device_software_version is not None): pulumi.set(__self__, 'friendly_device_software_version', friendly_device_software_version) if (model_description is not None): pulumi.set(__self__, 'model_description', model_description) if (volumes_count is not None): pulumi.set(__self__, 'volumes_count', volumes_count)<|docstring|>Represents the eligibility of a device as a failover target device. :param float available_local_storage_in_bytes: The amount of free local storage available on the device in bytes. :param float available_tiered_storage_in_bytes: The amount of free tiered storage available for the device in bytes. :param int data_containers_count: The count of data containers on the device. :param str device_id: The path ID of the device. :param str device_location: The geo location (applicable only for cloud appliances) of the device. :param str device_software_version: The software version of the device. :param str device_status: The status of the device. :param 'TargetEligibilityResultResponseArgs' eligibility_result: The eligibility result of the device, as a failover target device. :param str friendly_device_software_version: The friendly name for the current version of software on the device. :param str model_description: The model number of the device. :param int volumes_count: The count of volumes on the device.<|endoftext|>
ddb49e387fdaefe720a2a2c62e85bf20854efaa195bb0dcf0be070544532738c
@property @pulumi.getter(name='availableLocalStorageInBytes') def available_local_storage_in_bytes(self) -> Optional[float]: '\n The amount of free local storage available on the device in bytes.\n ' return pulumi.get(self, 'available_local_storage_in_bytes')
The amount of free local storage available on the device in bytes.
sdk/python/pulumi_azure_native/storsimple/outputs.py
available_local_storage_in_bytes
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='availableLocalStorageInBytes') def available_local_storage_in_bytes(self) -> Optional[float]: '\n \n ' return pulumi.get(self, 'available_local_storage_in_bytes')
@property @pulumi.getter(name='availableLocalStorageInBytes') def available_local_storage_in_bytes(self) -> Optional[float]: '\n \n ' return pulumi.get(self, 'available_local_storage_in_bytes')<|docstring|>The amount of free local storage available on the device in bytes.<|endoftext|>
88cfd79fb8139b0b4d9b95100f1b3054653f6e6eda086ca32cc10a4bd0ad2bda
@property @pulumi.getter(name='availableTieredStorageInBytes') def available_tiered_storage_in_bytes(self) -> Optional[float]: '\n The amount of free tiered storage available for the device in bytes.\n ' return pulumi.get(self, 'available_tiered_storage_in_bytes')
The amount of free tiered storage available for the device in bytes.
sdk/python/pulumi_azure_native/storsimple/outputs.py
available_tiered_storage_in_bytes
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='availableTieredStorageInBytes') def available_tiered_storage_in_bytes(self) -> Optional[float]: '\n \n ' return pulumi.get(self, 'available_tiered_storage_in_bytes')
@property @pulumi.getter(name='availableTieredStorageInBytes') def available_tiered_storage_in_bytes(self) -> Optional[float]: '\n \n ' return pulumi.get(self, 'available_tiered_storage_in_bytes')<|docstring|>The amount of free tiered storage available for the device in bytes.<|endoftext|>
af9fdae618b14b90cd2a0d94d4114c4d7ce5a64660ef8207902138c20299f3af
@property @pulumi.getter(name='dataContainersCount') def data_containers_count(self) -> Optional[int]: '\n The count of data containers on the device.\n ' return pulumi.get(self, 'data_containers_count')
The count of data containers on the device.
sdk/python/pulumi_azure_native/storsimple/outputs.py
data_containers_count
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='dataContainersCount') def data_containers_count(self) -> Optional[int]: '\n \n ' return pulumi.get(self, 'data_containers_count')
@property @pulumi.getter(name='dataContainersCount') def data_containers_count(self) -> Optional[int]: '\n \n ' return pulumi.get(self, 'data_containers_count')<|docstring|>The count of data containers on the device.<|endoftext|>
b7508c81e10dc41d6a392650ae059afe4b82b6f4c19da3141c526859b4843109
@property @pulumi.getter(name='deviceId') def device_id(self) -> Optional[str]: '\n The path ID of the device.\n ' return pulumi.get(self, 'device_id')
The path ID of the device.
sdk/python/pulumi_azure_native/storsimple/outputs.py
device_id
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='deviceId') def device_id(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'device_id')
@property @pulumi.getter(name='deviceId') def device_id(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'device_id')<|docstring|>The path ID of the device.<|endoftext|>
e3db6a05564ebd60f44ebc08fe15f95758574e4988d33b775ae744bcc88c731a
@property @pulumi.getter(name='deviceLocation') def device_location(self) -> Optional[str]: '\n The geo location (applicable only for cloud appliances) of the device.\n ' return pulumi.get(self, 'device_location')
The geo location (applicable only for cloud appliances) of the device.
sdk/python/pulumi_azure_native/storsimple/outputs.py
device_location
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='deviceLocation') def device_location(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'device_location')
@property @pulumi.getter(name='deviceLocation') def device_location(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'device_location')<|docstring|>The geo location (applicable only for cloud appliances) of the device.<|endoftext|>
84cd7b2a61d361d2c8fb942c4216b27da1f8f5d9b78a50d8dac0af838451dc6e
@property @pulumi.getter(name='deviceSoftwareVersion') def device_software_version(self) -> Optional[str]: '\n The software version of the device.\n ' return pulumi.get(self, 'device_software_version')
The software version of the device.
sdk/python/pulumi_azure_native/storsimple/outputs.py
device_software_version
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='deviceSoftwareVersion') def device_software_version(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'device_software_version')
@property @pulumi.getter(name='deviceSoftwareVersion') def device_software_version(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'device_software_version')<|docstring|>The software version of the device.<|endoftext|>
8b7a6f47046c147a03a2396e0c30a14f4e2306016357d8f6adc411209ca85d23
@property @pulumi.getter(name='deviceStatus') def device_status(self) -> Optional[str]: '\n The status of the device.\n ' return pulumi.get(self, 'device_status')
The status of the device.
sdk/python/pulumi_azure_native/storsimple/outputs.py
device_status
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='deviceStatus') def device_status(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'device_status')
@property @pulumi.getter(name='deviceStatus') def device_status(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'device_status')<|docstring|>The status of the device.<|endoftext|>
8590b48e2259baa7531b18fd155b0968e0be577e5ec789ffc4c84aa2cd59a178
@property @pulumi.getter(name='eligibilityResult') def eligibility_result(self) -> Optional['outputs.TargetEligibilityResultResponseResult']: '\n The eligibility result of the device, as a failover target device.\n ' return pulumi.get(self, 'eligibility_result')
The eligibility result of the device, as a failover target device.
sdk/python/pulumi_azure_native/storsimple/outputs.py
eligibility_result
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='eligibilityResult') def eligibility_result(self) -> Optional['outputs.TargetEligibilityResultResponseResult']: '\n \n ' return pulumi.get(self, 'eligibility_result')
@property @pulumi.getter(name='eligibilityResult') def eligibility_result(self) -> Optional['outputs.TargetEligibilityResultResponseResult']: '\n \n ' return pulumi.get(self, 'eligibility_result')<|docstring|>The eligibility result of the device, as a failover target device.<|endoftext|>
fb42597739749d3c4bd3a455d3d118690f8a46062503521f0252b0535aae5ca5
@property @pulumi.getter(name='friendlyDeviceSoftwareVersion') def friendly_device_software_version(self) -> Optional[str]: '\n The friendly name for the current version of software on the device.\n ' return pulumi.get(self, 'friendly_device_software_version')
The friendly name for the current version of software on the device.
sdk/python/pulumi_azure_native/storsimple/outputs.py
friendly_device_software_version
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='friendlyDeviceSoftwareVersion') def friendly_device_software_version(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'friendly_device_software_version')
@property @pulumi.getter(name='friendlyDeviceSoftwareVersion') def friendly_device_software_version(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'friendly_device_software_version')<|docstring|>The friendly name for the current version of software on the device.<|endoftext|>
5c30c245c1d891539a0bc2189d340ef1048ffe670978de3d325c8ba8332d17e1
@property @pulumi.getter(name='modelDescription') def model_description(self) -> Optional[str]: '\n The model number of the device.\n ' return pulumi.get(self, 'model_description')
The model number of the device.
sdk/python/pulumi_azure_native/storsimple/outputs.py
model_description
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='modelDescription') def model_description(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'model_description')
@property @pulumi.getter(name='modelDescription') def model_description(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'model_description')<|docstring|>The model number of the device.<|endoftext|>
61671061f2f593fb55aa81af76d3a521e6ab12892cde8b9762b6b3ab9f270b83
@property @pulumi.getter(name='volumesCount') def volumes_count(self) -> Optional[int]: '\n The count of volumes on the device.\n ' return pulumi.get(self, 'volumes_count')
The count of volumes on the device.
sdk/python/pulumi_azure_native/storsimple/outputs.py
volumes_count
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='volumesCount') def volumes_count(self) -> Optional[int]: '\n \n ' return pulumi.get(self, 'volumes_count')
@property @pulumi.getter(name='volumesCount') def volumes_count(self) -> Optional[int]: '\n \n ' return pulumi.get(self, 'volumes_count')<|docstring|>The count of volumes on the device.<|endoftext|>
26d729da301911bacee1ec123c9f6f0e2435307f7a42f381e551e5ff7cc4bcbe
def __init__(__self__, *, type: str): '\n Intrinsic settings which refers to the type of the StorSimple Manager.\n :param str type: The type of StorSimple Manager.\n ' pulumi.set(__self__, 'type', type)
Intrinsic settings which refers to the type of the StorSimple Manager. :param str type: The type of StorSimple Manager.
sdk/python/pulumi_azure_native/storsimple/outputs.py
__init__
pulumi-bot/pulumi-azure-native
31
python
def __init__(__self__, *, type: str): '\n Intrinsic settings which refers to the type of the StorSimple Manager.\n :param str type: The type of StorSimple Manager.\n ' pulumi.set(__self__, 'type', type)
def __init__(__self__, *, type: str): '\n Intrinsic settings which refers to the type of the StorSimple Manager.\n :param str type: The type of StorSimple Manager.\n ' pulumi.set(__self__, 'type', type)<|docstring|>Intrinsic settings which refers to the type of the StorSimple Manager. :param str type: The type of StorSimple Manager.<|endoftext|>
8ddce351640962a11263bd69844e0d19c8d78410d693ce83ecd0e6d4306c5f26
@property @pulumi.getter def type(self) -> str: '\n The type of StorSimple Manager.\n ' return pulumi.get(self, 'type')
The type of StorSimple Manager.
sdk/python/pulumi_azure_native/storsimple/outputs.py
type
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter def type(self) -> str: '\n \n ' return pulumi.get(self, 'type')
@property @pulumi.getter def type(self) -> str: '\n \n ' return pulumi.get(self, 'type')<|docstring|>The type of StorSimple Manager.<|endoftext|>
d49ea98dd215bd0f22083ff2a6d0294cda965381e844932ed4f1ac93335f7cc1
def __init__(__self__, *, name: str): '\n The Sku.\n :param str name: Refers to the sku name which should be "Standard"\n ' pulumi.set(__self__, 'name', name)
The Sku. :param str name: Refers to the sku name which should be "Standard"
sdk/python/pulumi_azure_native/storsimple/outputs.py
__init__
pulumi-bot/pulumi-azure-native
31
python
def __init__(__self__, *, name: str): '\n The Sku.\n :param str name: Refers to the sku name which should be "Standard"\n ' pulumi.set(__self__, 'name', name)
def __init__(__self__, *, name: str): '\n The Sku.\n :param str name: Refers to the sku name which should be "Standard"\n ' pulumi.set(__self__, 'name', name)<|docstring|>The Sku. :param str name: Refers to the sku name which should be "Standard"<|endoftext|>
25ce39a3ae622b3923a94b8d7b09fe6e1d349fec9405d7a69d196aa68b104b83
@property @pulumi.getter def name(self) -> str: '\n Refers to the sku name which should be "Standard"\n ' return pulumi.get(self, 'name')
Refers to the sku name which should be "Standard"
sdk/python/pulumi_azure_native/storsimple/outputs.py
name
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter def name(self) -> str: '\n \n ' return pulumi.get(self, 'name')
@property @pulumi.getter def name(self) -> str: '\n \n ' return pulumi.get(self, 'name')<|docstring|>Refers to the sku name which should be "Standard"<|endoftext|>
c783474abc07d05961e730248721927cf134a0078adf5b1b9a698fc961695c51
def __init__(__self__, *, recurrence_type: str, recurrence_value: int, weekly_days_list: Optional[Sequence[str]]=None): "\n The schedule recurrence.\n :param str recurrence_type: The recurrence type.\n :param int recurrence_value: The recurrence value.\n :param Sequence[str] weekly_days_list: The week days list. Applicable only for schedules of recurrence type 'weekly'.\n " pulumi.set(__self__, 'recurrence_type', recurrence_type) pulumi.set(__self__, 'recurrence_value', recurrence_value) if (weekly_days_list is not None): pulumi.set(__self__, 'weekly_days_list', weekly_days_list)
The schedule recurrence. :param str recurrence_type: The recurrence type. :param int recurrence_value: The recurrence value. :param Sequence[str] weekly_days_list: The week days list. Applicable only for schedules of recurrence type 'weekly'.
sdk/python/pulumi_azure_native/storsimple/outputs.py
__init__
pulumi-bot/pulumi-azure-native
31
python
def __init__(__self__, *, recurrence_type: str, recurrence_value: int, weekly_days_list: Optional[Sequence[str]]=None): "\n The schedule recurrence.\n :param str recurrence_type: The recurrence type.\n :param int recurrence_value: The recurrence value.\n :param Sequence[str] weekly_days_list: The week days list. Applicable only for schedules of recurrence type 'weekly'.\n " pulumi.set(__self__, 'recurrence_type', recurrence_type) pulumi.set(__self__, 'recurrence_value', recurrence_value) if (weekly_days_list is not None): pulumi.set(__self__, 'weekly_days_list', weekly_days_list)
def __init__(__self__, *, recurrence_type: str, recurrence_value: int, weekly_days_list: Optional[Sequence[str]]=None): "\n The schedule recurrence.\n :param str recurrence_type: The recurrence type.\n :param int recurrence_value: The recurrence value.\n :param Sequence[str] weekly_days_list: The week days list. Applicable only for schedules of recurrence type 'weekly'.\n " pulumi.set(__self__, 'recurrence_type', recurrence_type) pulumi.set(__self__, 'recurrence_value', recurrence_value) if (weekly_days_list is not None): pulumi.set(__self__, 'weekly_days_list', weekly_days_list)<|docstring|>The schedule recurrence. :param str recurrence_type: The recurrence type. :param int recurrence_value: The recurrence value. :param Sequence[str] weekly_days_list: The week days list. Applicable only for schedules of recurrence type 'weekly'.<|endoftext|>
6036703e09b723831cf27c6ccfa16362dd127938ba8e5f95c55636555559ea2c
@property @pulumi.getter(name='recurrenceType') def recurrence_type(self) -> str: '\n The recurrence type.\n ' return pulumi.get(self, 'recurrence_type')
The recurrence type.
sdk/python/pulumi_azure_native/storsimple/outputs.py
recurrence_type
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='recurrenceType') def recurrence_type(self) -> str: '\n \n ' return pulumi.get(self, 'recurrence_type')
@property @pulumi.getter(name='recurrenceType') def recurrence_type(self) -> str: '\n \n ' return pulumi.get(self, 'recurrence_type')<|docstring|>The recurrence type.<|endoftext|>
b69b5d666b214dc222a9eb422f08790e6bd156835a41b0210b4407ea3dc18202
@property @pulumi.getter(name='recurrenceValue') def recurrence_value(self) -> int: '\n The recurrence value.\n ' return pulumi.get(self, 'recurrence_value')
The recurrence value.
sdk/python/pulumi_azure_native/storsimple/outputs.py
recurrence_value
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='recurrenceValue') def recurrence_value(self) -> int: '\n \n ' return pulumi.get(self, 'recurrence_value')
@property @pulumi.getter(name='recurrenceValue') def recurrence_value(self) -> int: '\n \n ' return pulumi.get(self, 'recurrence_value')<|docstring|>The recurrence value.<|endoftext|>
55e1e21743a35f5d1de486c52d779d5529c021aa9051b0151e35bfdc5e4663e8
@property @pulumi.getter(name='weeklyDaysList') def weekly_days_list(self) -> Optional[Sequence[str]]: "\n The week days list. Applicable only for schedules of recurrence type 'weekly'.\n " return pulumi.get(self, 'weekly_days_list')
The week days list. Applicable only for schedules of recurrence type 'weekly'.
sdk/python/pulumi_azure_native/storsimple/outputs.py
weekly_days_list
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='weeklyDaysList') def weekly_days_list(self) -> Optional[Sequence[str]]: "\n \n " return pulumi.get(self, 'weekly_days_list')
@property @pulumi.getter(name='weeklyDaysList') def weekly_days_list(self) -> Optional[Sequence[str]]: "\n \n " return pulumi.get(self, 'weekly_days_list')<|docstring|>The week days list. Applicable only for schedules of recurrence type 'weekly'.<|endoftext|>
dd4f3d4932de9e527a1a99a337e4ecfccda8235b277434fe60db35dcdb5b3d15
def __init__(__self__, *, message: Optional[str]=None, resolution: Optional[str]=None, result_code: Optional[str]=None): '\n The error/warning message due to which the device is ineligible as a failover target device.\n :param str message: The localized error message stating the reason why the device is not eligible as a target device.\n :param str resolution: The localized resolution message for the error.\n :param str result_code: The result code for the error, due to which the device does not qualify as a failover target device.\n ' if (message is not None): pulumi.set(__self__, 'message', message) if (resolution is not None): pulumi.set(__self__, 'resolution', resolution) if (result_code is not None): pulumi.set(__self__, 'result_code', result_code)
The error/warning message due to which the device is ineligible as a failover target device. :param str message: The localized error message stating the reason why the device is not eligible as a target device. :param str resolution: The localized resolution message for the error. :param str result_code: The result code for the error, due to which the device does not qualify as a failover target device.
sdk/python/pulumi_azure_native/storsimple/outputs.py
__init__
pulumi-bot/pulumi-azure-native
31
python
def __init__(__self__, *, message: Optional[str]=None, resolution: Optional[str]=None, result_code: Optional[str]=None): '\n The error/warning message due to which the device is ineligible as a failover target device.\n :param str message: The localized error message stating the reason why the device is not eligible as a target device.\n :param str resolution: The localized resolution message for the error.\n :param str result_code: The result code for the error, due to which the device does not qualify as a failover target device.\n ' if (message is not None): pulumi.set(__self__, 'message', message) if (resolution is not None): pulumi.set(__self__, 'resolution', resolution) if (result_code is not None): pulumi.set(__self__, 'result_code', result_code)
def __init__(__self__, *, message: Optional[str]=None, resolution: Optional[str]=None, result_code: Optional[str]=None): '\n The error/warning message due to which the device is ineligible as a failover target device.\n :param str message: The localized error message stating the reason why the device is not eligible as a target device.\n :param str resolution: The localized resolution message for the error.\n :param str result_code: The result code for the error, due to which the device does not qualify as a failover target device.\n ' if (message is not None): pulumi.set(__self__, 'message', message) if (resolution is not None): pulumi.set(__self__, 'resolution', resolution) if (result_code is not None): pulumi.set(__self__, 'result_code', result_code)<|docstring|>The error/warning message due to which the device is ineligible as a failover target device. :param str message: The localized error message stating the reason why the device is not eligible as a target device. :param str resolution: The localized resolution message for the error. :param str result_code: The result code for the error, due to which the device does not qualify as a failover target device.<|endoftext|>
60fd4bc6be3effda2ad9d23c18c7d8bd4e05e3f59dcdd7ff9b0da2d9cd42f46a
@property @pulumi.getter def message(self) -> Optional[str]: '\n The localized error message stating the reason why the device is not eligible as a target device.\n ' return pulumi.get(self, 'message')
The localized error message stating the reason why the device is not eligible as a target device.
sdk/python/pulumi_azure_native/storsimple/outputs.py
message
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter def message(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'message')
@property @pulumi.getter def message(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'message')<|docstring|>The localized error message stating the reason why the device is not eligible as a target device.<|endoftext|>
ffb4f0d30a79c96d9a554158e0060a9d8ab4ae78fa07c2557f5d7aa3c6c328dc
@property @pulumi.getter def resolution(self) -> Optional[str]: '\n The localized resolution message for the error.\n ' return pulumi.get(self, 'resolution')
The localized resolution message for the error.
sdk/python/pulumi_azure_native/storsimple/outputs.py
resolution
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter def resolution(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'resolution')
@property @pulumi.getter def resolution(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'resolution')<|docstring|>The localized resolution message for the error.<|endoftext|>
16319a2e4b5121f28a28ad566c3b27928d6de7c04f60df01a14c9483caec1785
@property @pulumi.getter(name='resultCode') def result_code(self) -> Optional[str]: '\n The result code for the error, due to which the device does not qualify as a failover target device.\n ' return pulumi.get(self, 'result_code')
The result code for the error, due to which the device does not qualify as a failover target device.
sdk/python/pulumi_azure_native/storsimple/outputs.py
result_code
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='resultCode') def result_code(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'result_code')
@property @pulumi.getter(name='resultCode') def result_code(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'result_code')<|docstring|>The result code for the error, due to which the device does not qualify as a failover target device.<|endoftext|>
b0ecd09100f3cc6a7bcedb77310fab469ef10d6c85b6a76a8b41c9e1653f2a00
def __init__(__self__, *, eligibility_status: Optional[str]=None, messages: Optional[Sequence['outputs.TargetEligibilityErrorMessageResponseResult']]=None): "\n The eligibility result of device, as a failover target device.\n :param str eligibility_status: The eligibility status of device, as a failover target device.\n :param Sequence['TargetEligibilityErrorMessageResponseArgs'] messages: The list of error messages, if a device does not qualify as a failover target device.\n " if (eligibility_status is not None): pulumi.set(__self__, 'eligibility_status', eligibility_status) if (messages is not None): pulumi.set(__self__, 'messages', messages)
The eligibility result of device, as a failover target device. :param str eligibility_status: The eligibility status of device, as a failover target device. :param Sequence['TargetEligibilityErrorMessageResponseArgs'] messages: The list of error messages, if a device does not qualify as a failover target device.
sdk/python/pulumi_azure_native/storsimple/outputs.py
__init__
pulumi-bot/pulumi-azure-native
31
python
def __init__(__self__, *, eligibility_status: Optional[str]=None, messages: Optional[Sequence['outputs.TargetEligibilityErrorMessageResponseResult']]=None): "\n The eligibility result of device, as a failover target device.\n :param str eligibility_status: The eligibility status of device, as a failover target device.\n :param Sequence['TargetEligibilityErrorMessageResponseArgs'] messages: The list of error messages, if a device does not qualify as a failover target device.\n " if (eligibility_status is not None): pulumi.set(__self__, 'eligibility_status', eligibility_status) if (messages is not None): pulumi.set(__self__, 'messages', messages)
def __init__(__self__, *, eligibility_status: Optional[str]=None, messages: Optional[Sequence['outputs.TargetEligibilityErrorMessageResponseResult']]=None): "\n The eligibility result of device, as a failover target device.\n :param str eligibility_status: The eligibility status of device, as a failover target device.\n :param Sequence['TargetEligibilityErrorMessageResponseArgs'] messages: The list of error messages, if a device does not qualify as a failover target device.\n " if (eligibility_status is not None): pulumi.set(__self__, 'eligibility_status', eligibility_status) if (messages is not None): pulumi.set(__self__, 'messages', messages)<|docstring|>The eligibility result of device, as a failover target device. :param str eligibility_status: The eligibility status of device, as a failover target device. :param Sequence['TargetEligibilityErrorMessageResponseArgs'] messages: The list of error messages, if a device does not qualify as a failover target device.<|endoftext|>
2515dda1b1a358d7d106316e43230165b34d2bdd02a54d9af09cf18f5734cdea
@property @pulumi.getter(name='eligibilityStatus') def eligibility_status(self) -> Optional[str]: '\n The eligibility status of device, as a failover target device.\n ' return pulumi.get(self, 'eligibility_status')
The eligibility status of device, as a failover target device.
sdk/python/pulumi_azure_native/storsimple/outputs.py
eligibility_status
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='eligibilityStatus') def eligibility_status(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'eligibility_status')
@property @pulumi.getter(name='eligibilityStatus') def eligibility_status(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'eligibility_status')<|docstring|>The eligibility status of device, as a failover target device.<|endoftext|>
a8c1cbc2e70b32176a43831de2db52fecedfbcf7ec412c4d587743ae25444f55
@property @pulumi.getter def messages(self) -> Optional[Sequence['outputs.TargetEligibilityErrorMessageResponseResult']]: '\n The list of error messages, if a device does not qualify as a failover target device.\n ' return pulumi.get(self, 'messages')
The list of error messages, if a device does not qualify as a failover target device.
sdk/python/pulumi_azure_native/storsimple/outputs.py
messages
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter def messages(self) -> Optional[Sequence['outputs.TargetEligibilityErrorMessageResponseResult']]: '\n \n ' return pulumi.get(self, 'messages')
@property @pulumi.getter def messages(self) -> Optional[Sequence['outputs.TargetEligibilityErrorMessageResponseResult']]: '\n \n ' return pulumi.get(self, 'messages')<|docstring|>The list of error messages, if a device does not qualify as a failover target device.<|endoftext|>
8ec6ed5a3a7ae677c5adcae4e1c014a4465e90dda3a34d3d1063f0a5744d166e
def __init__(__self__, *, hours: int, minutes: int, seconds: int): '\n The time.\n :param int hours: The hour.\n :param int minutes: The minute.\n :param int seconds: The second.\n ' pulumi.set(__self__, 'hours', hours) pulumi.set(__self__, 'minutes', minutes) pulumi.set(__self__, 'seconds', seconds)
The time. :param int hours: The hour. :param int minutes: The minute. :param int seconds: The second.
sdk/python/pulumi_azure_native/storsimple/outputs.py
__init__
pulumi-bot/pulumi-azure-native
31
python
def __init__(__self__, *, hours: int, minutes: int, seconds: int): '\n The time.\n :param int hours: The hour.\n :param int minutes: The minute.\n :param int seconds: The second.\n ' pulumi.set(__self__, 'hours', hours) pulumi.set(__self__, 'minutes', minutes) pulumi.set(__self__, 'seconds', seconds)
def __init__(__self__, *, hours: int, minutes: int, seconds: int): '\n The time.\n :param int hours: The hour.\n :param int minutes: The minute.\n :param int seconds: The second.\n ' pulumi.set(__self__, 'hours', hours) pulumi.set(__self__, 'minutes', minutes) pulumi.set(__self__, 'seconds', seconds)<|docstring|>The time. :param int hours: The hour. :param int minutes: The minute. :param int seconds: The second.<|endoftext|>
826e5cd1f00d819afb9978827d5f2883f654eaeacdb6686b6a25351e9dd46870
@property @pulumi.getter def hours(self) -> int: '\n The hour.\n ' return pulumi.get(self, 'hours')
The hour.
sdk/python/pulumi_azure_native/storsimple/outputs.py
hours
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter def hours(self) -> int: '\n \n ' return pulumi.get(self, 'hours')
@property @pulumi.getter def hours(self) -> int: '\n \n ' return pulumi.get(self, 'hours')<|docstring|>The hour.<|endoftext|>
6cc687e3ab7ae03a84dcb0acac9cc4b796bcbb13268fe65a029ea98200f03812
@property @pulumi.getter def minutes(self) -> int: '\n The minute.\n ' return pulumi.get(self, 'minutes')
The minute.
sdk/python/pulumi_azure_native/storsimple/outputs.py
minutes
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter def minutes(self) -> int: '\n \n ' return pulumi.get(self, 'minutes')
@property @pulumi.getter def minutes(self) -> int: '\n \n ' return pulumi.get(self, 'minutes')<|docstring|>The minute.<|endoftext|>
5ccf3331f4dc272ac27bc0166438e4ef7f987c037061e0e1ab98b288676d405d
@property @pulumi.getter def seconds(self) -> int: '\n The second.\n ' return pulumi.get(self, 'seconds')
The second.
sdk/python/pulumi_azure_native/storsimple/outputs.py
seconds
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter def seconds(self) -> int: '\n \n ' return pulumi.get(self, 'seconds')
@property @pulumi.getter def seconds(self) -> int: '\n \n ' return pulumi.get(self, 'seconds')<|docstring|>The second.<|endoftext|>
74e7dbd9fe75ad18d92080df9f6911f02db0e65b6381b017f243a74c3814cfa0
def __init__(__self__, *, volume_container_id: Optional[str]=None, volumes: Optional[Sequence['outputs.VolumeFailoverMetadataResponseResult']]=None): "\n The metadata of the volume container, that is being considered as part of a failover set.\n :param str volume_container_id: The path ID of the volume container.\n :param Sequence['VolumeFailoverMetadataResponseArgs'] volumes: The list of metadata of volumes inside the volume container, which contains valid cloud snapshots.\n " if (volume_container_id is not None): pulumi.set(__self__, 'volume_container_id', volume_container_id) if (volumes is not None): pulumi.set(__self__, 'volumes', volumes)
The metadata of the volume container, that is being considered as part of a failover set. :param str volume_container_id: The path ID of the volume container. :param Sequence['VolumeFailoverMetadataResponseArgs'] volumes: The list of metadata of volumes inside the volume container, which contains valid cloud snapshots.
sdk/python/pulumi_azure_native/storsimple/outputs.py
__init__
pulumi-bot/pulumi-azure-native
31
python
def __init__(__self__, *, volume_container_id: Optional[str]=None, volumes: Optional[Sequence['outputs.VolumeFailoverMetadataResponseResult']]=None): "\n The metadata of the volume container, that is being considered as part of a failover set.\n :param str volume_container_id: The path ID of the volume container.\n :param Sequence['VolumeFailoverMetadataResponseArgs'] volumes: The list of metadata of volumes inside the volume container, which contains valid cloud snapshots.\n " if (volume_container_id is not None): pulumi.set(__self__, 'volume_container_id', volume_container_id) if (volumes is not None): pulumi.set(__self__, 'volumes', volumes)
def __init__(__self__, *, volume_container_id: Optional[str]=None, volumes: Optional[Sequence['outputs.VolumeFailoverMetadataResponseResult']]=None): "\n The metadata of the volume container, that is being considered as part of a failover set.\n :param str volume_container_id: The path ID of the volume container.\n :param Sequence['VolumeFailoverMetadataResponseArgs'] volumes: The list of metadata of volumes inside the volume container, which contains valid cloud snapshots.\n " if (volume_container_id is not None): pulumi.set(__self__, 'volume_container_id', volume_container_id) if (volumes is not None): pulumi.set(__self__, 'volumes', volumes)<|docstring|>The metadata of the volume container, that is being considered as part of a failover set. :param str volume_container_id: The path ID of the volume container. :param Sequence['VolumeFailoverMetadataResponseArgs'] volumes: The list of metadata of volumes inside the volume container, which contains valid cloud snapshots.<|endoftext|>
9130d607fbbe60287e94de52882d2b6619e68d36060ea4e9b637bc244acad12c
@property @pulumi.getter(name='volumeContainerId') def volume_container_id(self) -> Optional[str]: '\n The path ID of the volume container.\n ' return pulumi.get(self, 'volume_container_id')
The path ID of the volume container.
sdk/python/pulumi_azure_native/storsimple/outputs.py
volume_container_id
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='volumeContainerId') def volume_container_id(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'volume_container_id')
@property @pulumi.getter(name='volumeContainerId') def volume_container_id(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'volume_container_id')<|docstring|>The path ID of the volume container.<|endoftext|>
2a08ed2278df5ae5d8a84a670d610452194e6861dea745645f65210aed4a6cfb
@property @pulumi.getter def volumes(self) -> Optional[Sequence['outputs.VolumeFailoverMetadataResponseResult']]: '\n The list of metadata of volumes inside the volume container, which contains valid cloud snapshots.\n ' return pulumi.get(self, 'volumes')
The list of metadata of volumes inside the volume container, which contains valid cloud snapshots.
sdk/python/pulumi_azure_native/storsimple/outputs.py
volumes
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter def volumes(self) -> Optional[Sequence['outputs.VolumeFailoverMetadataResponseResult']]: '\n \n ' return pulumi.get(self, 'volumes')
@property @pulumi.getter def volumes(self) -> Optional[Sequence['outputs.VolumeFailoverMetadataResponseResult']]: '\n \n ' return pulumi.get(self, 'volumes')<|docstring|>The list of metadata of volumes inside the volume container, which contains valid cloud snapshots.<|endoftext|>
4149cc3c4e8df3f1dce229cb5353bc6542d00c5099f4fb435f5179d06854ed6b
def __init__(__self__, *, backup_created_date: Optional[str]=None, backup_element_id: Optional[str]=None, backup_id: Optional[str]=None, backup_policy_id: Optional[str]=None, size_in_bytes: Optional[float]=None, volume_id: Optional[str]=None, volume_type: Optional[str]=None): '\n The metadata of a volume that has valid cloud snapshot.\n :param str backup_created_date: The date at which the snapshot was taken.\n :param str backup_element_id: The path ID of the backup-element for this volume, inside the backup set.\n :param str backup_id: The path ID of the backup set.\n :param str backup_policy_id: The path ID of the backup policy using which the snapshot was taken.\n :param float size_in_bytes: The size of the volume in bytes at the time the snapshot was taken.\n :param str volume_id: The path ID of the volume.\n :param str volume_type: The type of the volume.\n ' if (backup_created_date is not None): pulumi.set(__self__, 'backup_created_date', backup_created_date) if (backup_element_id is not None): pulumi.set(__self__, 'backup_element_id', backup_element_id) if (backup_id is not None): pulumi.set(__self__, 'backup_id', backup_id) if (backup_policy_id is not None): pulumi.set(__self__, 'backup_policy_id', backup_policy_id) if (size_in_bytes is not None): pulumi.set(__self__, 'size_in_bytes', size_in_bytes) if (volume_id is not None): pulumi.set(__self__, 'volume_id', volume_id) if (volume_type is not None): pulumi.set(__self__, 'volume_type', volume_type)
The metadata of a volume that has valid cloud snapshot. :param str backup_created_date: The date at which the snapshot was taken. :param str backup_element_id: The path ID of the backup-element for this volume, inside the backup set. :param str backup_id: The path ID of the backup set. :param str backup_policy_id: The path ID of the backup policy using which the snapshot was taken. :param float size_in_bytes: The size of the volume in bytes at the time the snapshot was taken. :param str volume_id: The path ID of the volume. :param str volume_type: The type of the volume.
sdk/python/pulumi_azure_native/storsimple/outputs.py
__init__
pulumi-bot/pulumi-azure-native
31
python
def __init__(__self__, *, backup_created_date: Optional[str]=None, backup_element_id: Optional[str]=None, backup_id: Optional[str]=None, backup_policy_id: Optional[str]=None, size_in_bytes: Optional[float]=None, volume_id: Optional[str]=None, volume_type: Optional[str]=None): '\n The metadata of a volume that has valid cloud snapshot.\n :param str backup_created_date: The date at which the snapshot was taken.\n :param str backup_element_id: The path ID of the backup-element for this volume, inside the backup set.\n :param str backup_id: The path ID of the backup set.\n :param str backup_policy_id: The path ID of the backup policy using which the snapshot was taken.\n :param float size_in_bytes: The size of the volume in bytes at the time the snapshot was taken.\n :param str volume_id: The path ID of the volume.\n :param str volume_type: The type of the volume.\n ' if (backup_created_date is not None): pulumi.set(__self__, 'backup_created_date', backup_created_date) if (backup_element_id is not None): pulumi.set(__self__, 'backup_element_id', backup_element_id) if (backup_id is not None): pulumi.set(__self__, 'backup_id', backup_id) if (backup_policy_id is not None): pulumi.set(__self__, 'backup_policy_id', backup_policy_id) if (size_in_bytes is not None): pulumi.set(__self__, 'size_in_bytes', size_in_bytes) if (volume_id is not None): pulumi.set(__self__, 'volume_id', volume_id) if (volume_type is not None): pulumi.set(__self__, 'volume_type', volume_type)
def __init__(__self__, *, backup_created_date: Optional[str]=None, backup_element_id: Optional[str]=None, backup_id: Optional[str]=None, backup_policy_id: Optional[str]=None, size_in_bytes: Optional[float]=None, volume_id: Optional[str]=None, volume_type: Optional[str]=None): '\n The metadata of a volume that has valid cloud snapshot.\n :param str backup_created_date: The date at which the snapshot was taken.\n :param str backup_element_id: The path ID of the backup-element for this volume, inside the backup set.\n :param str backup_id: The path ID of the backup set.\n :param str backup_policy_id: The path ID of the backup policy using which the snapshot was taken.\n :param float size_in_bytes: The size of the volume in bytes at the time the snapshot was taken.\n :param str volume_id: The path ID of the volume.\n :param str volume_type: The type of the volume.\n ' if (backup_created_date is not None): pulumi.set(__self__, 'backup_created_date', backup_created_date) if (backup_element_id is not None): pulumi.set(__self__, 'backup_element_id', backup_element_id) if (backup_id is not None): pulumi.set(__self__, 'backup_id', backup_id) if (backup_policy_id is not None): pulumi.set(__self__, 'backup_policy_id', backup_policy_id) if (size_in_bytes is not None): pulumi.set(__self__, 'size_in_bytes', size_in_bytes) if (volume_id is not None): pulumi.set(__self__, 'volume_id', volume_id) if (volume_type is not None): pulumi.set(__self__, 'volume_type', volume_type)<|docstring|>The metadata of a volume that has valid cloud snapshot. :param str backup_created_date: The date at which the snapshot was taken. :param str backup_element_id: The path ID of the backup-element for this volume, inside the backup set. :param str backup_id: The path ID of the backup set. :param str backup_policy_id: The path ID of the backup policy using which the snapshot was taken. :param float size_in_bytes: The size of the volume in bytes at the time the snapshot was taken. :param str volume_id: The path ID of the volume. :param str volume_type: The type of the volume.<|endoftext|>
e8cc04be91b22502a264e98120854f9a5c79d0259338ffce081cfcdfd4078e82
@property @pulumi.getter(name='backupCreatedDate') def backup_created_date(self) -> Optional[str]: '\n The date at which the snapshot was taken.\n ' return pulumi.get(self, 'backup_created_date')
The date at which the snapshot was taken.
sdk/python/pulumi_azure_native/storsimple/outputs.py
backup_created_date
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='backupCreatedDate') def backup_created_date(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'backup_created_date')
@property @pulumi.getter(name='backupCreatedDate') def backup_created_date(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'backup_created_date')<|docstring|>The date at which the snapshot was taken.<|endoftext|>
dd2c9be7f04e21034808741696c7a34d45600e1d6f6aebba762d4ad95f7a961f
@property @pulumi.getter(name='backupElementId') def backup_element_id(self) -> Optional[str]: '\n The path ID of the backup-element for this volume, inside the backup set.\n ' return pulumi.get(self, 'backup_element_id')
The path ID of the backup-element for this volume, inside the backup set.
sdk/python/pulumi_azure_native/storsimple/outputs.py
backup_element_id
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='backupElementId') def backup_element_id(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'backup_element_id')
@property @pulumi.getter(name='backupElementId') def backup_element_id(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'backup_element_id')<|docstring|>The path ID of the backup-element for this volume, inside the backup set.<|endoftext|>
f77689799457335a20979be6e6edf075cc20d005fb246794499c201abb2855b5
@property @pulumi.getter(name='backupId') def backup_id(self) -> Optional[str]: '\n The path ID of the backup set.\n ' return pulumi.get(self, 'backup_id')
The path ID of the backup set.
sdk/python/pulumi_azure_native/storsimple/outputs.py
backup_id
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='backupId') def backup_id(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'backup_id')
@property @pulumi.getter(name='backupId') def backup_id(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'backup_id')<|docstring|>The path ID of the backup set.<|endoftext|>
6a7e388241223b2269f4e47720523a99fde98a26f2cabc29230886d1b053df3a
@property @pulumi.getter(name='backupPolicyId') def backup_policy_id(self) -> Optional[str]: '\n The path ID of the backup policy using which the snapshot was taken.\n ' return pulumi.get(self, 'backup_policy_id')
The path ID of the backup policy using which the snapshot was taken.
sdk/python/pulumi_azure_native/storsimple/outputs.py
backup_policy_id
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='backupPolicyId') def backup_policy_id(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'backup_policy_id')
@property @pulumi.getter(name='backupPolicyId') def backup_policy_id(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'backup_policy_id')<|docstring|>The path ID of the backup policy using which the snapshot was taken.<|endoftext|>
e4c4c4c4a32432e0463d6bd623acc63066527fdd8e7ece6c218862f647e9869a
@property @pulumi.getter(name='sizeInBytes') def size_in_bytes(self) -> Optional[float]: '\n The size of the volume in bytes at the time the snapshot was taken.\n ' return pulumi.get(self, 'size_in_bytes')
The size of the volume in bytes at the time the snapshot was taken.
sdk/python/pulumi_azure_native/storsimple/outputs.py
size_in_bytes
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='sizeInBytes') def size_in_bytes(self) -> Optional[float]: '\n \n ' return pulumi.get(self, 'size_in_bytes')
@property @pulumi.getter(name='sizeInBytes') def size_in_bytes(self) -> Optional[float]: '\n \n ' return pulumi.get(self, 'size_in_bytes')<|docstring|>The size of the volume in bytes at the time the snapshot was taken.<|endoftext|>
7ae538e2db7e49e0d8cd6e6fbc5fa5903fbfe103706748aa774eb4713d50f596
@property @pulumi.getter(name='volumeId') def volume_id(self) -> Optional[str]: '\n The path ID of the volume.\n ' return pulumi.get(self, 'volume_id')
The path ID of the volume.
sdk/python/pulumi_azure_native/storsimple/outputs.py
volume_id
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='volumeId') def volume_id(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'volume_id')
@property @pulumi.getter(name='volumeId') def volume_id(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'volume_id')<|docstring|>The path ID of the volume.<|endoftext|>
d6bac9f4f602b881919b550596d96c52bb445c8cc24f7904f1089f64db5a6660
@property @pulumi.getter(name='volumeType') def volume_type(self) -> Optional[str]: '\n The type of the volume.\n ' return pulumi.get(self, 'volume_type')
The type of the volume.
sdk/python/pulumi_azure_native/storsimple/outputs.py
volume_type
pulumi-bot/pulumi-azure-native
31
python
@property @pulumi.getter(name='volumeType') def volume_type(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'volume_type')
@property @pulumi.getter(name='volumeType') def volume_type(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'volume_type')<|docstring|>The type of the volume.<|endoftext|>
8f85b7c3ee1a54e480dffa1879cf1802066680aa345216e42faec065c8533007
def __init__(self, logpdf: Callable[([Tensor, Any], Tensor)], grad_logpdf: Callable[([Tensor, Any], Tensor)], device: torch.device, xi: Tensor, lb: float=(- infty), ub: float=infty, use_lower: bool=False, ns: int=50, **fargs: Any) -> None: '\n initialize the upper (and if needed lower) hulls with the specified params\n\n Parameters\n ==========\n f: function that computes log(f(u,...)), for given u, where f(u) is proportional to the\n density we want to sample from\n fprima: d/du log(f(u,...))\n xi: ordered vector of starting points in wich log(f(u,...) is defined\n to initialize the hulls\n use_lower: True means the lower sqeezing will be used; which is more efficient\n for drawing large numbers of samples\n lb: lower bound of the domain\n ub: upper bound of the domain\n ns: maximum number of points defining the hulls\n fargs: arguments for f and fprima\n ' self.device = device self.lb = lb self.ub = ub self.logpdf = logpdf self.grad_logpdf = grad_logpdf self.fargs = fargs self.ns = ns self.xi = xi.to(self.device) (self.B, self.K) = self.xi.size() self.h = torch.zeros(self.B, ns).to(self.device) self.hprime = torch.zeros(self.B, ns).to(self.device) self.x = torch.zeros(self.B, ns).to(self.device) self.h[(:, :self.K)] = self.logpdf(self.xi, **self.fargs) self.hprime[(:, :self.K)] = self.grad_logpdf(self.xi, **self.fargs) assert torch.isfinite(self.hprime).all() self.x[(:, :self.K)] = self.xi self.offset = self.h.max((- 1))[0].view((- 1), 1) self.h = (self.h - self.offset) if (not (self.hprime[(:, 0)] > 0).all()): raise IOError('Initial anchor points must span mode of PDF (left).') if (not (self.hprime[(:, (self.K - 1))] < 0).all()): raise IOError('Initial anchor points must span mode of PDF (right).') self.recalculate_hull()
initialize the upper (and if needed lower) hulls with the specified params Parameters ========== f: function that computes log(f(u,...)), for given u, where f(u) is proportional to the density we want to sample from fprima: d/du log(f(u,...)) xi: ordered vector of starting points in wich log(f(u,...) is defined to initialize the hulls use_lower: True means the lower sqeezing will be used; which is more efficient for drawing large numbers of samples lb: lower bound of the domain ub: upper bound of the domain ns: maximum number of points defining the hulls fargs: arguments for f and fprima
mt/mvae/distributions/ars.py
__init__
macio232/mvae
53
python
def __init__(self, logpdf: Callable[([Tensor, Any], Tensor)], grad_logpdf: Callable[([Tensor, Any], Tensor)], device: torch.device, xi: Tensor, lb: float=(- infty), ub: float=infty, use_lower: bool=False, ns: int=50, **fargs: Any) -> None: '\n initialize the upper (and if needed lower) hulls with the specified params\n\n Parameters\n ==========\n f: function that computes log(f(u,...)), for given u, where f(u) is proportional to the\n density we want to sample from\n fprima: d/du log(f(u,...))\n xi: ordered vector of starting points in wich log(f(u,...) is defined\n to initialize the hulls\n use_lower: True means the lower sqeezing will be used; which is more efficient\n for drawing large numbers of samples\n lb: lower bound of the domain\n ub: upper bound of the domain\n ns: maximum number of points defining the hulls\n fargs: arguments for f and fprima\n ' self.device = device self.lb = lb self.ub = ub self.logpdf = logpdf self.grad_logpdf = grad_logpdf self.fargs = fargs self.ns = ns self.xi = xi.to(self.device) (self.B, self.K) = self.xi.size() self.h = torch.zeros(self.B, ns).to(self.device) self.hprime = torch.zeros(self.B, ns).to(self.device) self.x = torch.zeros(self.B, ns).to(self.device) self.h[(:, :self.K)] = self.logpdf(self.xi, **self.fargs) self.hprime[(:, :self.K)] = self.grad_logpdf(self.xi, **self.fargs) assert torch.isfinite(self.hprime).all() self.x[(:, :self.K)] = self.xi self.offset = self.h.max((- 1))[0].view((- 1), 1) self.h = (self.h - self.offset) if (not (self.hprime[(:, 0)] > 0).all()): raise IOError('Initial anchor points must span mode of PDF (left).') if (not (self.hprime[(:, (self.K - 1))] < 0).all()): raise IOError('Initial anchor points must span mode of PDF (right).') self.recalculate_hull()
def __init__(self, logpdf: Callable[([Tensor, Any], Tensor)], grad_logpdf: Callable[([Tensor, Any], Tensor)], device: torch.device, xi: Tensor, lb: float=(- infty), ub: float=infty, use_lower: bool=False, ns: int=50, **fargs: Any) -> None: '\n initialize the upper (and if needed lower) hulls with the specified params\n\n Parameters\n ==========\n f: function that computes log(f(u,...)), for given u, where f(u) is proportional to the\n density we want to sample from\n fprima: d/du log(f(u,...))\n xi: ordered vector of starting points in wich log(f(u,...) is defined\n to initialize the hulls\n use_lower: True means the lower sqeezing will be used; which is more efficient\n for drawing large numbers of samples\n lb: lower bound of the domain\n ub: upper bound of the domain\n ns: maximum number of points defining the hulls\n fargs: arguments for f and fprima\n ' self.device = device self.lb = lb self.ub = ub self.logpdf = logpdf self.grad_logpdf = grad_logpdf self.fargs = fargs self.ns = ns self.xi = xi.to(self.device) (self.B, self.K) = self.xi.size() self.h = torch.zeros(self.B, ns).to(self.device) self.hprime = torch.zeros(self.B, ns).to(self.device) self.x = torch.zeros(self.B, ns).to(self.device) self.h[(:, :self.K)] = self.logpdf(self.xi, **self.fargs) self.hprime[(:, :self.K)] = self.grad_logpdf(self.xi, **self.fargs) assert torch.isfinite(self.hprime).all() self.x[(:, :self.K)] = self.xi self.offset = self.h.max((- 1))[0].view((- 1), 1) self.h = (self.h - self.offset) if (not (self.hprime[(:, 0)] > 0).all()): raise IOError('Initial anchor points must span mode of PDF (left).') if (not (self.hprime[(:, (self.K - 1))] < 0).all()): raise IOError('Initial anchor points must span mode of PDF (right).') self.recalculate_hull()<|docstring|>initialize the upper (and if needed lower) hulls with the specified params Parameters ========== f: function that computes log(f(u,...)), for given u, where f(u) is proportional to the density we want to sample from fprima: d/du log(f(u,...)) xi: ordered vector of starting points in wich log(f(u,...) is defined to initialize the hulls use_lower: True means the lower sqeezing will be used; which is more efficient for drawing large numbers of samples lb: lower bound of the domain ub: upper bound of the domain ns: maximum number of points defining the hulls fargs: arguments for f and fprima<|endoftext|>
ab096b1d33f841a9fd383dcd6d0d057f4b6364ebe2e658045162228b97aeb217
def sample(self, shape: torch.Size=torch.Size(), max_steps: int=1000.0) -> Tensor: 'Draw N samples and update upper and lower hulls accordingly.' shape = (shape if isinstance(shape, torch.Size) else torch.Size([shape])) samples = torch.ones(self.B, *shape).to(self.device) bool_mask = (torch.ones(self.B, *shape) == 1).to(self.device) count = 0 while (bool_mask.sum() != 0): count += 1 (xt, i) = self.sample_upper(shape) ht = self.logpdf(xt, **self.fargs) hprimet = self.grad_logpdf(xt, **self.fargs) ht = (ht - self.offset) ut = (self.h.gather(1, i) + ((xt - self.x.gather(1, i)) * self.hprime.gather(1, i))) u = torch.rand(shape).to(self.device) accept = (u < torch.exp((ht - ut))) reject = (1 - accept) samples[(bool_mask * accept)] = xt[(bool_mask * accept)] bool_mask[(bool_mask * accept)] = reject[(bool_mask * accept)] if (self.K < self.ns): nb_insert = (self.ns - self.K) self.recalculate_hull(nb_insert, xt[(:, :nb_insert)], ht[(:, :nb_insert)], hprimet[(:, :nb_insert)]) if (count > max_steps): raise ValueError(f'ARS did not converge in {max_steps} steps ({bool_mask.sum()}/{bool_mask.shape}).') return samples.t().unsqueeze((- 1))
Draw N samples and update upper and lower hulls accordingly.
mt/mvae/distributions/ars.py
sample
macio232/mvae
53
python
def sample(self, shape: torch.Size=torch.Size(), max_steps: int=1000.0) -> Tensor: shape = (shape if isinstance(shape, torch.Size) else torch.Size([shape])) samples = torch.ones(self.B, *shape).to(self.device) bool_mask = (torch.ones(self.B, *shape) == 1).to(self.device) count = 0 while (bool_mask.sum() != 0): count += 1 (xt, i) = self.sample_upper(shape) ht = self.logpdf(xt, **self.fargs) hprimet = self.grad_logpdf(xt, **self.fargs) ht = (ht - self.offset) ut = (self.h.gather(1, i) + ((xt - self.x.gather(1, i)) * self.hprime.gather(1, i))) u = torch.rand(shape).to(self.device) accept = (u < torch.exp((ht - ut))) reject = (1 - accept) samples[(bool_mask * accept)] = xt[(bool_mask * accept)] bool_mask[(bool_mask * accept)] = reject[(bool_mask * accept)] if (self.K < self.ns): nb_insert = (self.ns - self.K) self.recalculate_hull(nb_insert, xt[(:, :nb_insert)], ht[(:, :nb_insert)], hprimet[(:, :nb_insert)]) if (count > max_steps): raise ValueError(f'ARS did not converge in {max_steps} steps ({bool_mask.sum()}/{bool_mask.shape}).') return samples.t().unsqueeze((- 1))
def sample(self, shape: torch.Size=torch.Size(), max_steps: int=1000.0) -> Tensor: shape = (shape if isinstance(shape, torch.Size) else torch.Size([shape])) samples = torch.ones(self.B, *shape).to(self.device) bool_mask = (torch.ones(self.B, *shape) == 1).to(self.device) count = 0 while (bool_mask.sum() != 0): count += 1 (xt, i) = self.sample_upper(shape) ht = self.logpdf(xt, **self.fargs) hprimet = self.grad_logpdf(xt, **self.fargs) ht = (ht - self.offset) ut = (self.h.gather(1, i) + ((xt - self.x.gather(1, i)) * self.hprime.gather(1, i))) u = torch.rand(shape).to(self.device) accept = (u < torch.exp((ht - ut))) reject = (1 - accept) samples[(bool_mask * accept)] = xt[(bool_mask * accept)] bool_mask[(bool_mask * accept)] = reject[(bool_mask * accept)] if (self.K < self.ns): nb_insert = (self.ns - self.K) self.recalculate_hull(nb_insert, xt[(:, :nb_insert)], ht[(:, :nb_insert)], hprimet[(:, :nb_insert)]) if (count > max_steps): raise ValueError(f'ARS did not converge in {max_steps} steps ({bool_mask.sum()}/{bool_mask.shape}).') return samples.t().unsqueeze((- 1))<|docstring|>Draw N samples and update upper and lower hulls accordingly.<|endoftext|>
cdf7ec15a2a984590c0c4586feb1cb0846a2f66189d4b2d9bda67d4e4acfee07
def recalculate_hull(self, nbnew: int=0, xnew: Optional[Tensor]=None, hnew: Optional[Tensor]=None, hprimenew: Optional[Tensor]=None) -> None: 'Recalculate hull from existing x, h, hprime.' if (xnew is not None): self.x[(:, self.K:(self.K + nbnew))] = xnew (self.x, idx) = self.x.sort() self.h[(:, self.K:(self.K + nbnew))] = hnew self.h = self.h.gather(1, idx) self.hprime[(:, self.K:(self.K + nbnew))] = hprimenew self.hprime = self.hprime.gather(1, idx) self.K += xnew.size((- 1)) self.z = torch.zeros(self.B, (self.K + 1)).to(self.device) self.z[(:, 0)] = self.lb self.z[(:, self.K)] = self.ub self.z[(:, 1:self.K)] = ((diff(self.h[(:, :self.K)]) - diff((self.x[(:, :self.K)] * self.hprime[(:, :self.K)]))) / (- diff(self.hprime[(:, :self.K)]))) idx = ([0] + list(range(self.K))) self.u = (self.h[(:, idx)] + (self.hprime[(:, idx)] * (self.z - self.x[(:, idx)]))) exp_u = torch.exp(self.u) self.s = (diff(exp_u) / self.hprime[(:, :self.K)]) self.s[(self.hprime[(:, :self.K)] == 0.0)] = 0.0 assert torch.isfinite(self.s).all() self.cs = torch.cat((torch.zeros(self.B, 1).to(self.device), torch.cumsum(self.s, dim=(- 1))), dim=(- 1)) self.cu = self.cs[(:, (- 1))] assert torch.isfinite(self.cu).all()
Recalculate hull from existing x, h, hprime.
mt/mvae/distributions/ars.py
recalculate_hull
macio232/mvae
53
python
def recalculate_hull(self, nbnew: int=0, xnew: Optional[Tensor]=None, hnew: Optional[Tensor]=None, hprimenew: Optional[Tensor]=None) -> None: if (xnew is not None): self.x[(:, self.K:(self.K + nbnew))] = xnew (self.x, idx) = self.x.sort() self.h[(:, self.K:(self.K + nbnew))] = hnew self.h = self.h.gather(1, idx) self.hprime[(:, self.K:(self.K + nbnew))] = hprimenew self.hprime = self.hprime.gather(1, idx) self.K += xnew.size((- 1)) self.z = torch.zeros(self.B, (self.K + 1)).to(self.device) self.z[(:, 0)] = self.lb self.z[(:, self.K)] = self.ub self.z[(:, 1:self.K)] = ((diff(self.h[(:, :self.K)]) - diff((self.x[(:, :self.K)] * self.hprime[(:, :self.K)]))) / (- diff(self.hprime[(:, :self.K)]))) idx = ([0] + list(range(self.K))) self.u = (self.h[(:, idx)] + (self.hprime[(:, idx)] * (self.z - self.x[(:, idx)]))) exp_u = torch.exp(self.u) self.s = (diff(exp_u) / self.hprime[(:, :self.K)]) self.s[(self.hprime[(:, :self.K)] == 0.0)] = 0.0 assert torch.isfinite(self.s).all() self.cs = torch.cat((torch.zeros(self.B, 1).to(self.device), torch.cumsum(self.s, dim=(- 1))), dim=(- 1)) self.cu = self.cs[(:, (- 1))] assert torch.isfinite(self.cu).all()
def recalculate_hull(self, nbnew: int=0, xnew: Optional[Tensor]=None, hnew: Optional[Tensor]=None, hprimenew: Optional[Tensor]=None) -> None: if (xnew is not None): self.x[(:, self.K:(self.K + nbnew))] = xnew (self.x, idx) = self.x.sort() self.h[(:, self.K:(self.K + nbnew))] = hnew self.h = self.h.gather(1, idx) self.hprime[(:, self.K:(self.K + nbnew))] = hprimenew self.hprime = self.hprime.gather(1, idx) self.K += xnew.size((- 1)) self.z = torch.zeros(self.B, (self.K + 1)).to(self.device) self.z[(:, 0)] = self.lb self.z[(:, self.K)] = self.ub self.z[(:, 1:self.K)] = ((diff(self.h[(:, :self.K)]) - diff((self.x[(:, :self.K)] * self.hprime[(:, :self.K)]))) / (- diff(self.hprime[(:, :self.K)]))) idx = ([0] + list(range(self.K))) self.u = (self.h[(:, idx)] + (self.hprime[(:, idx)] * (self.z - self.x[(:, idx)]))) exp_u = torch.exp(self.u) self.s = (diff(exp_u) / self.hprime[(:, :self.K)]) self.s[(self.hprime[(:, :self.K)] == 0.0)] = 0.0 assert torch.isfinite(self.s).all() self.cs = torch.cat((torch.zeros(self.B, 1).to(self.device), torch.cumsum(self.s, dim=(- 1))), dim=(- 1)) self.cu = self.cs[(:, (- 1))] assert torch.isfinite(self.cu).all()<|docstring|>Recalculate hull from existing x, h, hprime.<|endoftext|>
f9c838d7c0915a7bd570764a21b2ad9cbc16513fea244f260101f5d1ec071440
def sample_upper(self, shape: torch.Size=torch.Size()) -> Tuple[(Tensor, Tensor)]: 'Return a single value randomly sampled from the upper hull and index of segment.' u = torch.rand(self.B, *shape, device=self.device) i = ((self.cs / self.cu.unsqueeze((- 1))).unsqueeze((- 1)) <= u.unsqueeze(1).expand(*self.cs.shape, *shape)) idx = (i.sum(1) - 1) xt = (self.x.gather(1, idx) + (((- self.h.gather(1, idx)) + torch.log(((self.hprime.gather(1, idx) * ((self.cu.unsqueeze((- 1)) * u) - self.cs.gather(1, idx))) + torch.exp(self.u.gather(1, idx))))) / self.hprime.gather(1, idx))) assert torch.isfinite(xt).all() return (xt, idx)
Return a single value randomly sampled from the upper hull and index of segment.
mt/mvae/distributions/ars.py
sample_upper
macio232/mvae
53
python
def sample_upper(self, shape: torch.Size=torch.Size()) -> Tuple[(Tensor, Tensor)]: u = torch.rand(self.B, *shape, device=self.device) i = ((self.cs / self.cu.unsqueeze((- 1))).unsqueeze((- 1)) <= u.unsqueeze(1).expand(*self.cs.shape, *shape)) idx = (i.sum(1) - 1) xt = (self.x.gather(1, idx) + (((- self.h.gather(1, idx)) + torch.log(((self.hprime.gather(1, idx) * ((self.cu.unsqueeze((- 1)) * u) - self.cs.gather(1, idx))) + torch.exp(self.u.gather(1, idx))))) / self.hprime.gather(1, idx))) assert torch.isfinite(xt).all() return (xt, idx)
def sample_upper(self, shape: torch.Size=torch.Size()) -> Tuple[(Tensor, Tensor)]: u = torch.rand(self.B, *shape, device=self.device) i = ((self.cs / self.cu.unsqueeze((- 1))).unsqueeze((- 1)) <= u.unsqueeze(1).expand(*self.cs.shape, *shape)) idx = (i.sum(1) - 1) xt = (self.x.gather(1, idx) + (((- self.h.gather(1, idx)) + torch.log(((self.hprime.gather(1, idx) * ((self.cu.unsqueeze((- 1)) * u) - self.cs.gather(1, idx))) + torch.exp(self.u.gather(1, idx))))) / self.hprime.gather(1, idx))) assert torch.isfinite(xt).all() return (xt, idx)<|docstring|>Return a single value randomly sampled from the upper hull and index of segment.<|endoftext|>
a51087f1621cb778b054e14857c83e3d46b443a304ede8e6194577f3a422a62b
def minimumMoves(self, grid): '\n :type grid: List[List[int]]\n :rtype: int\n ' (level, q, lookup) = (0, [(0, 0, False)], set()) while q: next_q = [] for (r, c, is_vertical) in q: if ((r, c, is_vertical) in lookup): continue if ((r, c, is_vertical) == ((len(grid) - 1), (len(grid) - 2), False)): return level lookup.add((r, c, is_vertical)) if (not is_vertical): if (((c + 2) != len(grid[0])) and (grid[r][(c + 2)] == 0)): next_q.append((r, (c + 1), is_vertical)) if (((r + 1) != len(grid)) and (grid[(r + 1)][c] == 0) and (grid[(r + 1)][(c + 1)] == 0)): next_q.append(((r + 1), c, is_vertical)) next_q.append((r, c, (not is_vertical))) else: if (((r + 2) != len(grid)) and (grid[(r + 2)][c] == 0)): next_q.append(((r + 1), c, is_vertical)) if (((c + 1) != len(grid)) and (grid[r][(c + 1)] == 0) and (grid[(r + 1)][(c + 1)] == 0)): next_q.append((r, (c + 1), is_vertical)) next_q.append((r, c, (not is_vertical))) q = next_q level += 1 return (- 1)
:type grid: List[List[int]] :rtype: int
Python/minimum-moves-to-reach-target-with-rotations.py
minimumMoves
donaldcao/LeetCode-Solutions
3,269
python
def minimumMoves(self, grid): '\n :type grid: List[List[int]]\n :rtype: int\n ' (level, q, lookup) = (0, [(0, 0, False)], set()) while q: next_q = [] for (r, c, is_vertical) in q: if ((r, c, is_vertical) in lookup): continue if ((r, c, is_vertical) == ((len(grid) - 1), (len(grid) - 2), False)): return level lookup.add((r, c, is_vertical)) if (not is_vertical): if (((c + 2) != len(grid[0])) and (grid[r][(c + 2)] == 0)): next_q.append((r, (c + 1), is_vertical)) if (((r + 1) != len(grid)) and (grid[(r + 1)][c] == 0) and (grid[(r + 1)][(c + 1)] == 0)): next_q.append(((r + 1), c, is_vertical)) next_q.append((r, c, (not is_vertical))) else: if (((r + 2) != len(grid)) and (grid[(r + 2)][c] == 0)): next_q.append(((r + 1), c, is_vertical)) if (((c + 1) != len(grid)) and (grid[r][(c + 1)] == 0) and (grid[(r + 1)][(c + 1)] == 0)): next_q.append((r, (c + 1), is_vertical)) next_q.append((r, c, (not is_vertical))) q = next_q level += 1 return (- 1)
def minimumMoves(self, grid): '\n :type grid: List[List[int]]\n :rtype: int\n ' (level, q, lookup) = (0, [(0, 0, False)], set()) while q: next_q = [] for (r, c, is_vertical) in q: if ((r, c, is_vertical) in lookup): continue if ((r, c, is_vertical) == ((len(grid) - 1), (len(grid) - 2), False)): return level lookup.add((r, c, is_vertical)) if (not is_vertical): if (((c + 2) != len(grid[0])) and (grid[r][(c + 2)] == 0)): next_q.append((r, (c + 1), is_vertical)) if (((r + 1) != len(grid)) and (grid[(r + 1)][c] == 0) and (grid[(r + 1)][(c + 1)] == 0)): next_q.append(((r + 1), c, is_vertical)) next_q.append((r, c, (not is_vertical))) else: if (((r + 2) != len(grid)) and (grid[(r + 2)][c] == 0)): next_q.append(((r + 1), c, is_vertical)) if (((c + 1) != len(grid)) and (grid[r][(c + 1)] == 0) and (grid[(r + 1)][(c + 1)] == 0)): next_q.append((r, (c + 1), is_vertical)) next_q.append((r, c, (not is_vertical))) q = next_q level += 1 return (- 1)<|docstring|>:type grid: List[List[int]] :rtype: int<|endoftext|>
317bbef5b8fbb21c50fae6159e24d1cd5919b0a661b86f4e0ba46e011686dfcb
def bayesopt(key: Any, model: gp.GP, sub_dataset_key: Union[(int, str)], query_oracle: Callable[([Any], Any)], ac_func: Callable[(..., jnp.array)], iters: int, input_sampler: Callable[(..., jnp.array)]) -> SubDataset: 'Running simulated bayesopt on a set of pre-evaluated inputs x_queries.\n\n Args:\n key: Jax random state.\n model: gp.GP.\n sub_dataset_key: key of the sub_dataset for testing in dataset.\n query_oracle: evaluation function.\n ac_func: acquisition function handle (see acfun.py).\n iters: number of iterations in BayesOpt sequential queries.\n input_sampler: function for sampling inputs among which the initial point is\n chosen for acquisition function optimization.\n\n Returns:\n All observations after bayesopt in the form of an (x_observed, y_observed)\n tuple. These observations include those made before bayesopt.\n ' input_dim = model.input_dim for i in range(iters): start_time = time.time() x_samples = input_sampler(key, input_dim) evals = ac_func(model=model, sub_dataset_key=sub_dataset_key, x_queries=x_samples) select_idx = evals.argmax() x_init = x_samples[select_idx] def f(x): return (- ac_func(model=model, sub_dataset_key=sub_dataset_key, x_queries=jnp.array([x])).flatten()[0]) opt = jaxopt.ScipyBoundedMinimize(method='L-BFGS-B', fun=f) opt_ret = opt.run(x_init, bounds=[jnp.zeros(input_dim), jnp.ones(input_dim)]) eval_datapoint = (opt_ret.params, query_oracle(opt_ret.params[(None, :)])) logging.info(msg=f'{i}-th iter, x_init={x_init}, eval_datapoint={eval_datapoint}, elpased_time={(time.time() - start_time)}') model.update_sub_dataset(eval_datapoint, sub_dataset_key=sub_dataset_key, is_append=True) if (('retrain' in model.params.config) and (model.params.config['retrain'] > 0)): if (model.params.config['objective'] in [obj.regkl, obj.regeuc]): raise ValueError('Objective must include NLL to retrain.') else: maxiter = model.params.config['retrain'] logging.info(msg=f'Retraining with maxiter = {maxiter}.') model.params.config['maxiter'] = maxiter model.train() return model.dataset.get(sub_dataset_key, SubDataset(jnp.empty(0), jnp.empty(0)))
Running simulated bayesopt on a set of pre-evaluated inputs x_queries. Args: key: Jax random state. model: gp.GP. sub_dataset_key: key of the sub_dataset for testing in dataset. query_oracle: evaluation function. ac_func: acquisition function handle (see acfun.py). iters: number of iterations in BayesOpt sequential queries. input_sampler: function for sampling inputs among which the initial point is chosen for acquisition function optimization. Returns: All observations after bayesopt in the form of an (x_observed, y_observed) tuple. These observations include those made before bayesopt.
hyperbo/bo_utils/bayesopt.py
bayesopt
google-research/hyperbo
3
python
def bayesopt(key: Any, model: gp.GP, sub_dataset_key: Union[(int, str)], query_oracle: Callable[([Any], Any)], ac_func: Callable[(..., jnp.array)], iters: int, input_sampler: Callable[(..., jnp.array)]) -> SubDataset: 'Running simulated bayesopt on a set of pre-evaluated inputs x_queries.\n\n Args:\n key: Jax random state.\n model: gp.GP.\n sub_dataset_key: key of the sub_dataset for testing in dataset.\n query_oracle: evaluation function.\n ac_func: acquisition function handle (see acfun.py).\n iters: number of iterations in BayesOpt sequential queries.\n input_sampler: function for sampling inputs among which the initial point is\n chosen for acquisition function optimization.\n\n Returns:\n All observations after bayesopt in the form of an (x_observed, y_observed)\n tuple. These observations include those made before bayesopt.\n ' input_dim = model.input_dim for i in range(iters): start_time = time.time() x_samples = input_sampler(key, input_dim) evals = ac_func(model=model, sub_dataset_key=sub_dataset_key, x_queries=x_samples) select_idx = evals.argmax() x_init = x_samples[select_idx] def f(x): return (- ac_func(model=model, sub_dataset_key=sub_dataset_key, x_queries=jnp.array([x])).flatten()[0]) opt = jaxopt.ScipyBoundedMinimize(method='L-BFGS-B', fun=f) opt_ret = opt.run(x_init, bounds=[jnp.zeros(input_dim), jnp.ones(input_dim)]) eval_datapoint = (opt_ret.params, query_oracle(opt_ret.params[(None, :)])) logging.info(msg=f'{i}-th iter, x_init={x_init}, eval_datapoint={eval_datapoint}, elpased_time={(time.time() - start_time)}') model.update_sub_dataset(eval_datapoint, sub_dataset_key=sub_dataset_key, is_append=True) if (('retrain' in model.params.config) and (model.params.config['retrain'] > 0)): if (model.params.config['objective'] in [obj.regkl, obj.regeuc]): raise ValueError('Objective must include NLL to retrain.') else: maxiter = model.params.config['retrain'] logging.info(msg=f'Retraining with maxiter = {maxiter}.') model.params.config['maxiter'] = maxiter model.train() return model.dataset.get(sub_dataset_key, SubDataset(jnp.empty(0), jnp.empty(0)))
def bayesopt(key: Any, model: gp.GP, sub_dataset_key: Union[(int, str)], query_oracle: Callable[([Any], Any)], ac_func: Callable[(..., jnp.array)], iters: int, input_sampler: Callable[(..., jnp.array)]) -> SubDataset: 'Running simulated bayesopt on a set of pre-evaluated inputs x_queries.\n\n Args:\n key: Jax random state.\n model: gp.GP.\n sub_dataset_key: key of the sub_dataset for testing in dataset.\n query_oracle: evaluation function.\n ac_func: acquisition function handle (see acfun.py).\n iters: number of iterations in BayesOpt sequential queries.\n input_sampler: function for sampling inputs among which the initial point is\n chosen for acquisition function optimization.\n\n Returns:\n All observations after bayesopt in the form of an (x_observed, y_observed)\n tuple. These observations include those made before bayesopt.\n ' input_dim = model.input_dim for i in range(iters): start_time = time.time() x_samples = input_sampler(key, input_dim) evals = ac_func(model=model, sub_dataset_key=sub_dataset_key, x_queries=x_samples) select_idx = evals.argmax() x_init = x_samples[select_idx] def f(x): return (- ac_func(model=model, sub_dataset_key=sub_dataset_key, x_queries=jnp.array([x])).flatten()[0]) opt = jaxopt.ScipyBoundedMinimize(method='L-BFGS-B', fun=f) opt_ret = opt.run(x_init, bounds=[jnp.zeros(input_dim), jnp.ones(input_dim)]) eval_datapoint = (opt_ret.params, query_oracle(opt_ret.params[(None, :)])) logging.info(msg=f'{i}-th iter, x_init={x_init}, eval_datapoint={eval_datapoint}, elpased_time={(time.time() - start_time)}') model.update_sub_dataset(eval_datapoint, sub_dataset_key=sub_dataset_key, is_append=True) if (('retrain' in model.params.config) and (model.params.config['retrain'] > 0)): if (model.params.config['objective'] in [obj.regkl, obj.regeuc]): raise ValueError('Objective must include NLL to retrain.') else: maxiter = model.params.config['retrain'] logging.info(msg=f'Retraining with maxiter = {maxiter}.') model.params.config['maxiter'] = maxiter model.train() return model.dataset.get(sub_dataset_key, SubDataset(jnp.empty(0), jnp.empty(0)))<|docstring|>Running simulated bayesopt on a set of pre-evaluated inputs x_queries. Args: key: Jax random state. model: gp.GP. sub_dataset_key: key of the sub_dataset for testing in dataset. query_oracle: evaluation function. ac_func: acquisition function handle (see acfun.py). iters: number of iterations in BayesOpt sequential queries. input_sampler: function for sampling inputs among which the initial point is chosen for acquisition function optimization. Returns: All observations after bayesopt in the form of an (x_observed, y_observed) tuple. These observations include those made before bayesopt.<|endoftext|>
c39b0febf4ab9486fa13f8f9d19800195f7787e61a583b00878de696bd087a4e
def simulated_bayesopt(model: gp.GP, sub_dataset_key: Union[(int, str)], queried_sub_dataset: SubDataset, ac_func: Callable[(..., jnp.array)], iters: int) -> SubDataset: 'Running simulated bayesopt on a set of pre-evaluated inputs x_queries.\n\n Args:\n model: gp.GP.\n sub_dataset_key: key of the sub_dataset for testing in dataset.\n queried_sub_dataset: sub_dataset that can be queried.\n ac_func: acquisition function handle (see acfun.py).\n iters: number of iterations in BayesOpt sequential queries.\n\n Returns:\n All observations after bayesopt in the form of an (x_observed, y_observed)\n tuple. These observations include those made before bayesopt.\n ' for _ in range(iters): evals = ac_func(model=model, sub_dataset_key=sub_dataset_key, x_queries=queried_sub_dataset.x) select_idx = evals.argmax() eval_datapoint = (queried_sub_dataset.x[select_idx], queried_sub_dataset.y[select_idx]) model.update_sub_dataset(eval_datapoint, sub_dataset_key=sub_dataset_key, is_append=True) if (('retrain' in model.params.config) and (model.params.config['retrain'] > 0)): if (model.params.config['objective'] in [obj.regkl, obj.regeuc]): raise ValueError('Objective must include NLL to retrain.') else: maxiter = model.params.config['retrain'] logging.info(msg=f'Retraining with maxiter = {maxiter}.') model.params.config['maxiter'] = maxiter model.train() return model.dataset.get(sub_dataset_key, SubDataset(jnp.empty(0), jnp.empty(0)))
Running simulated bayesopt on a set of pre-evaluated inputs x_queries. Args: model: gp.GP. sub_dataset_key: key of the sub_dataset for testing in dataset. queried_sub_dataset: sub_dataset that can be queried. ac_func: acquisition function handle (see acfun.py). iters: number of iterations in BayesOpt sequential queries. Returns: All observations after bayesopt in the form of an (x_observed, y_observed) tuple. These observations include those made before bayesopt.
hyperbo/bo_utils/bayesopt.py
simulated_bayesopt
google-research/hyperbo
3
python
def simulated_bayesopt(model: gp.GP, sub_dataset_key: Union[(int, str)], queried_sub_dataset: SubDataset, ac_func: Callable[(..., jnp.array)], iters: int) -> SubDataset: 'Running simulated bayesopt on a set of pre-evaluated inputs x_queries.\n\n Args:\n model: gp.GP.\n sub_dataset_key: key of the sub_dataset for testing in dataset.\n queried_sub_dataset: sub_dataset that can be queried.\n ac_func: acquisition function handle (see acfun.py).\n iters: number of iterations in BayesOpt sequential queries.\n\n Returns:\n All observations after bayesopt in the form of an (x_observed, y_observed)\n tuple. These observations include those made before bayesopt.\n ' for _ in range(iters): evals = ac_func(model=model, sub_dataset_key=sub_dataset_key, x_queries=queried_sub_dataset.x) select_idx = evals.argmax() eval_datapoint = (queried_sub_dataset.x[select_idx], queried_sub_dataset.y[select_idx]) model.update_sub_dataset(eval_datapoint, sub_dataset_key=sub_dataset_key, is_append=True) if (('retrain' in model.params.config) and (model.params.config['retrain'] > 0)): if (model.params.config['objective'] in [obj.regkl, obj.regeuc]): raise ValueError('Objective must include NLL to retrain.') else: maxiter = model.params.config['retrain'] logging.info(msg=f'Retraining with maxiter = {maxiter}.') model.params.config['maxiter'] = maxiter model.train() return model.dataset.get(sub_dataset_key, SubDataset(jnp.empty(0), jnp.empty(0)))
def simulated_bayesopt(model: gp.GP, sub_dataset_key: Union[(int, str)], queried_sub_dataset: SubDataset, ac_func: Callable[(..., jnp.array)], iters: int) -> SubDataset: 'Running simulated bayesopt on a set of pre-evaluated inputs x_queries.\n\n Args:\n model: gp.GP.\n sub_dataset_key: key of the sub_dataset for testing in dataset.\n queried_sub_dataset: sub_dataset that can be queried.\n ac_func: acquisition function handle (see acfun.py).\n iters: number of iterations in BayesOpt sequential queries.\n\n Returns:\n All observations after bayesopt in the form of an (x_observed, y_observed)\n tuple. These observations include those made before bayesopt.\n ' for _ in range(iters): evals = ac_func(model=model, sub_dataset_key=sub_dataset_key, x_queries=queried_sub_dataset.x) select_idx = evals.argmax() eval_datapoint = (queried_sub_dataset.x[select_idx], queried_sub_dataset.y[select_idx]) model.update_sub_dataset(eval_datapoint, sub_dataset_key=sub_dataset_key, is_append=True) if (('retrain' in model.params.config) and (model.params.config['retrain'] > 0)): if (model.params.config['objective'] in [obj.regkl, obj.regeuc]): raise ValueError('Objective must include NLL to retrain.') else: maxiter = model.params.config['retrain'] logging.info(msg=f'Retraining with maxiter = {maxiter}.') model.params.config['maxiter'] = maxiter model.train() return model.dataset.get(sub_dataset_key, SubDataset(jnp.empty(0), jnp.empty(0)))<|docstring|>Running simulated bayesopt on a set of pre-evaluated inputs x_queries. Args: model: gp.GP. sub_dataset_key: key of the sub_dataset for testing in dataset. queried_sub_dataset: sub_dataset that can be queried. ac_func: acquisition function handle (see acfun.py). iters: number of iterations in BayesOpt sequential queries. Returns: All observations after bayesopt in the form of an (x_observed, y_observed) tuple. These observations include those made before bayesopt.<|endoftext|>
4234338bf259b6006783f32cd699128bee6b9d05b3daa1da52ab1c1fe3f495d7
def run_synthetic(dataset, sub_dataset_key, queried_sub_dataset, mean_func, cov_func, init_params, ac_func, iters, warp_func=None, init_random_key=None, method='hyperbo', init_model=False, finite_search_space=True, data_loader_name='', params_save_file=None): 'Running bayesopt experiment with synthetic data.\n\n Args:\n dataset: a list of vx, vy pairs, i.e. [(vx, vy)_i], where vx is\n m_points_historical x d and vy is m_points_historical x 1.\n sub_dataset_key: key of the sub_dataset for testing in dataset.\n queried_sub_dataset: sub_dataset that can be queried.\n mean_func: mean function handle that maps from (params, n x d input,\n warp_func) to an n dimensional mean vector. (see vector_map in mean.py for\n more details).\n cov_func: covariance function handle that maps from (params, n1 x d input1,\n n2 x d input2, wrap_func) to a n1 x n2 covariance matrix (see matrix_map\n in kernel.py for more details).\n init_params: initial GP parameters for inference.\n ac_func: acquisition function handle (see acfun.py).\n iters: Number of iterations in sequential bayesopt queries.\n warp_func: optional dictionary that specifies the warping function for each\n parameter.\n init_random_key: random state for jax.random, to be used to initialize\n required parts of GPParams.\n method: BO method.\n init_model: to initialize model if True; otherwise False.\n finite_search_space: use a finite search space if True; otherwise False.\n data_loader_name: data loader name, e.g. pd1, hpob.\n params_save_file: optional file name to save params.\n\n Returns:\n All observations in (x, y) pairs returned by the bayesopt strategy and all\n the query points in (x, y) pairs. Model params as a dict.\n ' logging.info(msg=f'run_synthetic is using method {method}.') if (method in const.USE_HGP): model_class = gp.HGP init_params.config.update({'objective': 'nll', 'method': 'slice_sample', 'burnin': 50, 'nsamples': 50, 'priors': priors.DEFAULT_PRIORS}) else: model_class = gp.GP model = model_class(dataset=dataset, mean_func=mean_func, cov_func=cov_func, params=init_params, warp_func=warp_func) key = init_random_key if init_model: (key, subkey) = jax.random.split(key) model.initialize_params(subkey) (key, subkey) = jax.random.split(key) model.train(subkey, params_save_file) if finite_search_space: sub_dataset = simulated_bayesopt(model=model, sub_dataset_key=sub_dataset_key, queried_sub_dataset=queried_sub_dataset, ac_func=ac_func, iters=iters) return ((sub_dataset.x, sub_dataset.y), (queried_sub_dataset.x, queried_sub_dataset.y), model.params.__dict__) else: (_, sample_key) = jax.random.split(key) sub_dataset = bayesopt(key=sample_key, model=model, sub_dataset_key=sub_dataset_key, query_oracle=queried_sub_dataset, ac_func=ac_func, iters=iters, input_sampler=INPUT_SAMPLERS[data_loader_name]) return ((sub_dataset.x, sub_dataset.y), None, model.params.__dict__)
Running bayesopt experiment with synthetic data. Args: dataset: a list of vx, vy pairs, i.e. [(vx, vy)_i], where vx is m_points_historical x d and vy is m_points_historical x 1. sub_dataset_key: key of the sub_dataset for testing in dataset. queried_sub_dataset: sub_dataset that can be queried. mean_func: mean function handle that maps from (params, n x d input, warp_func) to an n dimensional mean vector. (see vector_map in mean.py for more details). cov_func: covariance function handle that maps from (params, n1 x d input1, n2 x d input2, wrap_func) to a n1 x n2 covariance matrix (see matrix_map in kernel.py for more details). init_params: initial GP parameters for inference. ac_func: acquisition function handle (see acfun.py). iters: Number of iterations in sequential bayesopt queries. warp_func: optional dictionary that specifies the warping function for each parameter. init_random_key: random state for jax.random, to be used to initialize required parts of GPParams. method: BO method. init_model: to initialize model if True; otherwise False. finite_search_space: use a finite search space if True; otherwise False. data_loader_name: data loader name, e.g. pd1, hpob. params_save_file: optional file name to save params. Returns: All observations in (x, y) pairs returned by the bayesopt strategy and all the query points in (x, y) pairs. Model params as a dict.
hyperbo/bo_utils/bayesopt.py
run_synthetic
google-research/hyperbo
3
python
def run_synthetic(dataset, sub_dataset_key, queried_sub_dataset, mean_func, cov_func, init_params, ac_func, iters, warp_func=None, init_random_key=None, method='hyperbo', init_model=False, finite_search_space=True, data_loader_name=, params_save_file=None): 'Running bayesopt experiment with synthetic data.\n\n Args:\n dataset: a list of vx, vy pairs, i.e. [(vx, vy)_i], where vx is\n m_points_historical x d and vy is m_points_historical x 1.\n sub_dataset_key: key of the sub_dataset for testing in dataset.\n queried_sub_dataset: sub_dataset that can be queried.\n mean_func: mean function handle that maps from (params, n x d input,\n warp_func) to an n dimensional mean vector. (see vector_map in mean.py for\n more details).\n cov_func: covariance function handle that maps from (params, n1 x d input1,\n n2 x d input2, wrap_func) to a n1 x n2 covariance matrix (see matrix_map\n in kernel.py for more details).\n init_params: initial GP parameters for inference.\n ac_func: acquisition function handle (see acfun.py).\n iters: Number of iterations in sequential bayesopt queries.\n warp_func: optional dictionary that specifies the warping function for each\n parameter.\n init_random_key: random state for jax.random, to be used to initialize\n required parts of GPParams.\n method: BO method.\n init_model: to initialize model if True; otherwise False.\n finite_search_space: use a finite search space if True; otherwise False.\n data_loader_name: data loader name, e.g. pd1, hpob.\n params_save_file: optional file name to save params.\n\n Returns:\n All observations in (x, y) pairs returned by the bayesopt strategy and all\n the query points in (x, y) pairs. Model params as a dict.\n ' logging.info(msg=f'run_synthetic is using method {method}.') if (method in const.USE_HGP): model_class = gp.HGP init_params.config.update({'objective': 'nll', 'method': 'slice_sample', 'burnin': 50, 'nsamples': 50, 'priors': priors.DEFAULT_PRIORS}) else: model_class = gp.GP model = model_class(dataset=dataset, mean_func=mean_func, cov_func=cov_func, params=init_params, warp_func=warp_func) key = init_random_key if init_model: (key, subkey) = jax.random.split(key) model.initialize_params(subkey) (key, subkey) = jax.random.split(key) model.train(subkey, params_save_file) if finite_search_space: sub_dataset = simulated_bayesopt(model=model, sub_dataset_key=sub_dataset_key, queried_sub_dataset=queried_sub_dataset, ac_func=ac_func, iters=iters) return ((sub_dataset.x, sub_dataset.y), (queried_sub_dataset.x, queried_sub_dataset.y), model.params.__dict__) else: (_, sample_key) = jax.random.split(key) sub_dataset = bayesopt(key=sample_key, model=model, sub_dataset_key=sub_dataset_key, query_oracle=queried_sub_dataset, ac_func=ac_func, iters=iters, input_sampler=INPUT_SAMPLERS[data_loader_name]) return ((sub_dataset.x, sub_dataset.y), None, model.params.__dict__)
def run_synthetic(dataset, sub_dataset_key, queried_sub_dataset, mean_func, cov_func, init_params, ac_func, iters, warp_func=None, init_random_key=None, method='hyperbo', init_model=False, finite_search_space=True, data_loader_name=, params_save_file=None): 'Running bayesopt experiment with synthetic data.\n\n Args:\n dataset: a list of vx, vy pairs, i.e. [(vx, vy)_i], where vx is\n m_points_historical x d and vy is m_points_historical x 1.\n sub_dataset_key: key of the sub_dataset for testing in dataset.\n queried_sub_dataset: sub_dataset that can be queried.\n mean_func: mean function handle that maps from (params, n x d input,\n warp_func) to an n dimensional mean vector. (see vector_map in mean.py for\n more details).\n cov_func: covariance function handle that maps from (params, n1 x d input1,\n n2 x d input2, wrap_func) to a n1 x n2 covariance matrix (see matrix_map\n in kernel.py for more details).\n init_params: initial GP parameters for inference.\n ac_func: acquisition function handle (see acfun.py).\n iters: Number of iterations in sequential bayesopt queries.\n warp_func: optional dictionary that specifies the warping function for each\n parameter.\n init_random_key: random state for jax.random, to be used to initialize\n required parts of GPParams.\n method: BO method.\n init_model: to initialize model if True; otherwise False.\n finite_search_space: use a finite search space if True; otherwise False.\n data_loader_name: data loader name, e.g. pd1, hpob.\n params_save_file: optional file name to save params.\n\n Returns:\n All observations in (x, y) pairs returned by the bayesopt strategy and all\n the query points in (x, y) pairs. Model params as a dict.\n ' logging.info(msg=f'run_synthetic is using method {method}.') if (method in const.USE_HGP): model_class = gp.HGP init_params.config.update({'objective': 'nll', 'method': 'slice_sample', 'burnin': 50, 'nsamples': 50, 'priors': priors.DEFAULT_PRIORS}) else: model_class = gp.GP model = model_class(dataset=dataset, mean_func=mean_func, cov_func=cov_func, params=init_params, warp_func=warp_func) key = init_random_key if init_model: (key, subkey) = jax.random.split(key) model.initialize_params(subkey) (key, subkey) = jax.random.split(key) model.train(subkey, params_save_file) if finite_search_space: sub_dataset = simulated_bayesopt(model=model, sub_dataset_key=sub_dataset_key, queried_sub_dataset=queried_sub_dataset, ac_func=ac_func, iters=iters) return ((sub_dataset.x, sub_dataset.y), (queried_sub_dataset.x, queried_sub_dataset.y), model.params.__dict__) else: (_, sample_key) = jax.random.split(key) sub_dataset = bayesopt(key=sample_key, model=model, sub_dataset_key=sub_dataset_key, query_oracle=queried_sub_dataset, ac_func=ac_func, iters=iters, input_sampler=INPUT_SAMPLERS[data_loader_name]) return ((sub_dataset.x, sub_dataset.y), None, model.params.__dict__)<|docstring|>Running bayesopt experiment with synthetic data. Args: dataset: a list of vx, vy pairs, i.e. [(vx, vy)_i], where vx is m_points_historical x d and vy is m_points_historical x 1. sub_dataset_key: key of the sub_dataset for testing in dataset. queried_sub_dataset: sub_dataset that can be queried. mean_func: mean function handle that maps from (params, n x d input, warp_func) to an n dimensional mean vector. (see vector_map in mean.py for more details). cov_func: covariance function handle that maps from (params, n1 x d input1, n2 x d input2, wrap_func) to a n1 x n2 covariance matrix (see matrix_map in kernel.py for more details). init_params: initial GP parameters for inference. ac_func: acquisition function handle (see acfun.py). iters: Number of iterations in sequential bayesopt queries. warp_func: optional dictionary that specifies the warping function for each parameter. init_random_key: random state for jax.random, to be used to initialize required parts of GPParams. method: BO method. init_model: to initialize model if True; otherwise False. finite_search_space: use a finite search space if True; otherwise False. data_loader_name: data loader name, e.g. pd1, hpob. params_save_file: optional file name to save params. Returns: All observations in (x, y) pairs returned by the bayesopt strategy and all the query points in (x, y) pairs. Model params as a dict.<|endoftext|>
2032e433f5d151da5c7e244c41f916e8134ad3c492f576afe06c76141922fdbb
def _onehot_matrix(shape, idx) -> np.ndarray: 'Each row is a one-hot vector with idx-th element equal to 1.' zeros = np.zeros(shape) zeros[(:, idx)] = 1 return zeros
Each row is a one-hot vector with idx-th element equal to 1.
hyperbo/bo_utils/bayesopt.py
_onehot_matrix
google-research/hyperbo
3
python
def _onehot_matrix(shape, idx) -> np.ndarray: zeros = np.zeros(shape) zeros[(:, idx)] = 1 return zeros
def _onehot_matrix(shape, idx) -> np.ndarray: zeros = np.zeros(shape) zeros[(:, idx)] = 1 return zeros<|docstring|>Each row is a one-hot vector with idx-th element equal to 1.<|endoftext|>
9e446bbfdeb1cc3a5b8e630b18d3c9d5df04b0ec9e68fcb094c5b7a7984b49b1
def step(self): 'Step by the inner optimizer' self.optimizer.step()
Step by the inner optimizer
spk_class.py
step
tomato1mule/Contrastive-Predictive-Coding-PyTorch
339
python
def step(self): self.optimizer.step()
def step(self): self.optimizer.step()<|docstring|>Step by the inner optimizer<|endoftext|>
409f48052f1f118f2f55a2589246ac191cdecbdd31d11ae00558d8b6d622c269
def zero_grad(self): 'Zero out the gradients by the inner optimizer' self.optimizer.zero_grad()
Zero out the gradients by the inner optimizer
spk_class.py
zero_grad
tomato1mule/Contrastive-Predictive-Coding-PyTorch
339
python
def zero_grad(self): self.optimizer.zero_grad()
def zero_grad(self): self.optimizer.zero_grad()<|docstring|>Zero out the gradients by the inner optimizer<|endoftext|>
52991d251be0b9295192095978b5008be1f9c16b752e21bec0e4543afebf8e4e
def update_learning_rate(self): 'Learning rate scheduling per step' self.n_current_steps += self.delta new_lr = (np.power(self.d_model, (- 0.5)) * np.min([np.power(self.n_current_steps, (- 0.5)), (np.power(self.n_warmup_steps, (- 1.5)) * self.n_current_steps)])) for param_group in self.optimizer.param_groups: param_group['lr'] = new_lr return new_lr
Learning rate scheduling per step
spk_class.py
update_learning_rate
tomato1mule/Contrastive-Predictive-Coding-PyTorch
339
python
def update_learning_rate(self): self.n_current_steps += self.delta new_lr = (np.power(self.d_model, (- 0.5)) * np.min([np.power(self.n_current_steps, (- 0.5)), (np.power(self.n_warmup_steps, (- 1.5)) * self.n_current_steps)])) for param_group in self.optimizer.param_groups: param_group['lr'] = new_lr return new_lr
def update_learning_rate(self): self.n_current_steps += self.delta new_lr = (np.power(self.d_model, (- 0.5)) * np.min([np.power(self.n_current_steps, (- 0.5)), (np.power(self.n_warmup_steps, (- 1.5)) * self.n_current_steps)])) for param_group in self.optimizer.param_groups: param_group['lr'] = new_lr return new_lr<|docstring|>Learning rate scheduling per step<|endoftext|>
430eba8976177af3bfa92dff04bf6c0b9c81b984b3f3c019699dbe052d757916
def save_section(self): 'Save the current custom section to a file.' if (len(self.section.middle_line_points) > 1): initial_pose = Pose(self.section.middle_line.interpolate_pose(0)) tf = Transform(initial_pose.position, initial_pose.orientation).inverse self.section.middle_line_points = [(tf * p) for p in self.section.middle_line_points] self.section.save_as_yaml(self.param.file_path)
Save the current custom section to a file.
simulation/src/simulation_groundtruth/src/scan/node.py
save_section
KITcar-Team/kitcar-gazebo-simulation
13
python
def save_section(self): if (len(self.section.middle_line_points) > 1): initial_pose = Pose(self.section.middle_line.interpolate_pose(0)) tf = Transform(initial_pose.position, initial_pose.orientation).inverse self.section.middle_line_points = [(tf * p) for p in self.section.middle_line_points] self.section.save_as_yaml(self.param.file_path)
def save_section(self): if (len(self.section.middle_line_points) > 1): initial_pose = Pose(self.section.middle_line.interpolate_pose(0)) tf = Transform(initial_pose.position, initial_pose.orientation).inverse self.section.middle_line_points = [(tf * p) for p in self.section.middle_line_points] self.section.save_as_yaml(self.param.file_path)<|docstring|>Save the current custom section to a file.<|endoftext|>
429b4ba8361d525d209fda6fd854ac9042807737c8b99754a809a3f1652ae0be
def get_current_pose(self): 'Try to get the current pose from /tf.' try: tf_transform = self.listener.lookup_transform('odom', 'vehicle', rospy.Time.now(), timeout=rospy.Duration(0.01)) return Pose(tf_transform.transform) except Exception as e: rospy.logerr(f'Could not lookup transform {e}') return
Try to get the current pose from /tf.
simulation/src/simulation_groundtruth/src/scan/node.py
get_current_pose
KITcar-Team/kitcar-gazebo-simulation
13
python
def get_current_pose(self): try: tf_transform = self.listener.lookup_transform('odom', 'vehicle', rospy.Time.now(), timeout=rospy.Duration(0.01)) return Pose(tf_transform.transform) except Exception as e: rospy.logerr(f'Could not lookup transform {e}') return
def get_current_pose(self): try: tf_transform = self.listener.lookup_transform('odom', 'vehicle', rospy.Time.now(), timeout=rospy.Duration(0.01)) return Pose(tf_transform.transform) except Exception as e: rospy.logerr(f'Could not lookup transform {e}') return<|docstring|>Try to get the current pose from /tf.<|endoftext|>
5a34c3c5238bfd949955715744e90404e0f6543edce9a015c9565abc7326fb4b
def update_middle_line(self): 'Update the middle line using the current position.' current_pose = self.get_current_pose() if (current_pose is None): return middle_line_position = (current_pose.position + Vector(0, 0.2).rotated(current_pose.orientation).rotated((math.pi / 2))) if ((len(self.section.middle_line_points) == 0) or (middle_line_position.distance(self.section.middle_line_points[(- 1)]) > self.param.min_point_distance)): self.section.middle_line_points.append(middle_line_position) rospy.logdebug(f'Add new point: {middle_line_position}')
Update the middle line using the current position.
simulation/src/simulation_groundtruth/src/scan/node.py
update_middle_line
KITcar-Team/kitcar-gazebo-simulation
13
python
def update_middle_line(self): current_pose = self.get_current_pose() if (current_pose is None): return middle_line_position = (current_pose.position + Vector(0, 0.2).rotated(current_pose.orientation).rotated((math.pi / 2))) if ((len(self.section.middle_line_points) == 0) or (middle_line_position.distance(self.section.middle_line_points[(- 1)]) > self.param.min_point_distance)): self.section.middle_line_points.append(middle_line_position) rospy.logdebug(f'Add new point: {middle_line_position}')
def update_middle_line(self): current_pose = self.get_current_pose() if (current_pose is None): return middle_line_position = (current_pose.position + Vector(0, 0.2).rotated(current_pose.orientation).rotated((math.pi / 2))) if ((len(self.section.middle_line_points) == 0) or (middle_line_position.distance(self.section.middle_line_points[(- 1)]) > self.param.min_point_distance)): self.section.middle_line_points.append(middle_line_position) rospy.logdebug(f'Add new point: {middle_line_position}')<|docstring|>Update the middle line using the current position.<|endoftext|>
038235c451155f082ada35e2575eda1a60fb5876fa9cad40511a01577414996c
def test_classification(classification_overview): 'Test aroma.utils.classification and ensure classifications come out the same.' clf_overview_df = pd.read_table(classification_overview) test_df = clf_overview_df[['edge_fract', 'csf_fract', 'max_RP_corr', 'HFC']] (test_df, metadata) = utils.classification(test_df, {}) true_classifications = clf_overview_df['classification'].tolist() test_classifications = test_df['classification'].tolist() assert (true_classifications == test_classifications)
Test aroma.utils.classification and ensure classifications come out the same.
aroma/tests/test_utils.py
test_classification
CesarCaballeroGaudes/aroma
5
python
def test_classification(classification_overview): clf_overview_df = pd.read_table(classification_overview) test_df = clf_overview_df[['edge_fract', 'csf_fract', 'max_RP_corr', 'HFC']] (test_df, metadata) = utils.classification(test_df, {}) true_classifications = clf_overview_df['classification'].tolist() test_classifications = test_df['classification'].tolist() assert (true_classifications == test_classifications)
def test_classification(classification_overview): clf_overview_df = pd.read_table(classification_overview) test_df = clf_overview_df[['edge_fract', 'csf_fract', 'max_RP_corr', 'HFC']] (test_df, metadata) = utils.classification(test_df, {}) true_classifications = clf_overview_df['classification'].tolist() test_classifications = test_df['classification'].tolist() assert (true_classifications == test_classifications)<|docstring|>Test aroma.utils.classification and ensure classifications come out the same.<|endoftext|>
496b2ad97aa249b79f1ec8d2aabcbf22b77dd7651a938456004ffd730f66aaa0
def test_load_motpars_manual(motion_parameters): 'Test aroma.utils.load_motpars with manual source determination.' fsl = utils.load_motpars(motion_parameters['FSL'], source='fsl') afni = utils.load_motpars(motion_parameters['AfNI'], source='afni') spm = utils.load_motpars(motion_parameters['SPM'], source='spm') fmriprep = utils.load_motpars(motion_parameters['fMRIPrep'], source='fmriprep') assert np.allclose(fsl, afni) assert np.allclose(fsl, spm) assert np.allclose(fsl, fmriprep)
Test aroma.utils.load_motpars with manual source determination.
aroma/tests/test_utils.py
test_load_motpars_manual
CesarCaballeroGaudes/aroma
5
python
def test_load_motpars_manual(motion_parameters): fsl = utils.load_motpars(motion_parameters['FSL'], source='fsl') afni = utils.load_motpars(motion_parameters['AfNI'], source='afni') spm = utils.load_motpars(motion_parameters['SPM'], source='spm') fmriprep = utils.load_motpars(motion_parameters['fMRIPrep'], source='fmriprep') assert np.allclose(fsl, afni) assert np.allclose(fsl, spm) assert np.allclose(fsl, fmriprep)
def test_load_motpars_manual(motion_parameters): fsl = utils.load_motpars(motion_parameters['FSL'], source='fsl') afni = utils.load_motpars(motion_parameters['AfNI'], source='afni') spm = utils.load_motpars(motion_parameters['SPM'], source='spm') fmriprep = utils.load_motpars(motion_parameters['fMRIPrep'], source='fmriprep') assert np.allclose(fsl, afni) assert np.allclose(fsl, spm) assert np.allclose(fsl, fmriprep)<|docstring|>Test aroma.utils.load_motpars with manual source determination.<|endoftext|>
3125093c0714e16da3a6b75638b93643c59ee7da7e784d19c100e9842032d6ad
def test_load_motpars_auto(motion_parameters): 'Test aroma.utils.load_motpars with automatic source determination.' fsl = utils.load_motpars(motion_parameters['FSL'], source='auto') afni = utils.load_motpars(motion_parameters['AfNI'], source='auto') spm = utils.load_motpars(motion_parameters['SPM'], source='auto') fmriprep = utils.load_motpars(motion_parameters['fMRIPrep'], source='auto') assert np.allclose(fsl, afni) assert np.allclose(fsl, spm) assert np.allclose(fsl, fmriprep)
Test aroma.utils.load_motpars with automatic source determination.
aroma/tests/test_utils.py
test_load_motpars_auto
CesarCaballeroGaudes/aroma
5
python
def test_load_motpars_auto(motion_parameters): fsl = utils.load_motpars(motion_parameters['FSL'], source='auto') afni = utils.load_motpars(motion_parameters['AfNI'], source='auto') spm = utils.load_motpars(motion_parameters['SPM'], source='auto') fmriprep = utils.load_motpars(motion_parameters['fMRIPrep'], source='auto') assert np.allclose(fsl, afni) assert np.allclose(fsl, spm) assert np.allclose(fsl, fmriprep)
def test_load_motpars_auto(motion_parameters): fsl = utils.load_motpars(motion_parameters['FSL'], source='auto') afni = utils.load_motpars(motion_parameters['AfNI'], source='auto') spm = utils.load_motpars(motion_parameters['SPM'], source='auto') fmriprep = utils.load_motpars(motion_parameters['fMRIPrep'], source='auto') assert np.allclose(fsl, afni) assert np.allclose(fsl, spm) assert np.allclose(fsl, fmriprep)<|docstring|>Test aroma.utils.load_motpars with automatic source determination.<|endoftext|>
8a8f94d639fa2635ac8e09b2730895487d2dfefba5ad28fa9c2d33c9823a457d
def test_load_motpars_break(motion_parameters): 'Break aroma.utils.load_motpars.' with pytest.raises(Exception): utils.load_motpars('dog.dog', source='auto') with pytest.raises(ValueError): utils.load_motpars(motion_parameters['FSL'], source='dog')
Break aroma.utils.load_motpars.
aroma/tests/test_utils.py
test_load_motpars_break
CesarCaballeroGaudes/aroma
5
python
def test_load_motpars_break(motion_parameters): with pytest.raises(Exception): utils.load_motpars('dog.dog', source='auto') with pytest.raises(ValueError): utils.load_motpars(motion_parameters['FSL'], source='dog')
def test_load_motpars_break(motion_parameters): with pytest.raises(Exception): utils.load_motpars('dog.dog', source='auto') with pytest.raises(ValueError): utils.load_motpars(motion_parameters['FSL'], source='dog')<|docstring|>Break aroma.utils.load_motpars.<|endoftext|>
5c88ffaafcfc67cca4eceb5987a10b8bc8753b2d859c1c36c100cd23824a2112
def test_motpars_fmriprep2fsl(motion_parameters): 'Test aroma.utils.motpars_fmriprep2fsl.' fsl = utils.load_motpars(motion_parameters['FSL'], source='fsl') fmriprep = utils.motpars_fmriprep2fsl(motion_parameters['fMRIPrep']) assert np.allclose(fsl, fmriprep) with pytest.raises(ValueError): utils.motpars_fmriprep2fsl(5) bad_data = np.random.random((200, 7)) with pytest.raises(ValueError): utils.motpars_fmriprep2fsl(bad_data)
Test aroma.utils.motpars_fmriprep2fsl.
aroma/tests/test_utils.py
test_motpars_fmriprep2fsl
CesarCaballeroGaudes/aroma
5
python
def test_motpars_fmriprep2fsl(motion_parameters): fsl = utils.load_motpars(motion_parameters['FSL'], source='fsl') fmriprep = utils.motpars_fmriprep2fsl(motion_parameters['fMRIPrep']) assert np.allclose(fsl, fmriprep) with pytest.raises(ValueError): utils.motpars_fmriprep2fsl(5) bad_data = np.random.random((200, 7)) with pytest.raises(ValueError): utils.motpars_fmriprep2fsl(bad_data)
def test_motpars_fmriprep2fsl(motion_parameters): fsl = utils.load_motpars(motion_parameters['FSL'], source='fsl') fmriprep = utils.motpars_fmriprep2fsl(motion_parameters['fMRIPrep']) assert np.allclose(fsl, fmriprep) with pytest.raises(ValueError): utils.motpars_fmriprep2fsl(5) bad_data = np.random.random((200, 7)) with pytest.raises(ValueError): utils.motpars_fmriprep2fsl(bad_data)<|docstring|>Test aroma.utils.motpars_fmriprep2fsl.<|endoftext|>
48a08f5bf6c4e6bed23c1343f3a75d62f8c5d74a48e2446cbdf41266cff4f2f8
def test_motpars_spm2fsl(motion_parameters): 'Test aroma.utils.motpars_spm2fsl.' fsl = utils.load_motpars(motion_parameters['FSL'], source='fsl') spm = utils.motpars_spm2fsl(motion_parameters['SPM']) assert np.allclose(fsl, spm) with pytest.raises(ValueError): utils.motpars_spm2fsl(5) bad_data = np.random.random((200, 7)) with pytest.raises(ValueError): utils.motpars_spm2fsl(bad_data)
Test aroma.utils.motpars_spm2fsl.
aroma/tests/test_utils.py
test_motpars_spm2fsl
CesarCaballeroGaudes/aroma
5
python
def test_motpars_spm2fsl(motion_parameters): fsl = utils.load_motpars(motion_parameters['FSL'], source='fsl') spm = utils.motpars_spm2fsl(motion_parameters['SPM']) assert np.allclose(fsl, spm) with pytest.raises(ValueError): utils.motpars_spm2fsl(5) bad_data = np.random.random((200, 7)) with pytest.raises(ValueError): utils.motpars_spm2fsl(bad_data)
def test_motpars_spm2fsl(motion_parameters): fsl = utils.load_motpars(motion_parameters['FSL'], source='fsl') spm = utils.motpars_spm2fsl(motion_parameters['SPM']) assert np.allclose(fsl, spm) with pytest.raises(ValueError): utils.motpars_spm2fsl(5) bad_data = np.random.random((200, 7)) with pytest.raises(ValueError): utils.motpars_spm2fsl(bad_data)<|docstring|>Test aroma.utils.motpars_spm2fsl.<|endoftext|>
46470f9d13d42d0a3f8ece1548223fc9c988c5389fb11961796c47553a94edd7
def test_motpars_afni2fsl(motion_parameters): 'Test aroma.utils.motpars_afni2fsl.' fsl = utils.load_motpars(motion_parameters['FSL'], source='fsl') afni = utils.motpars_afni2fsl(motion_parameters['AfNI']) assert np.allclose(fsl, afni) with pytest.raises(ValueError): utils.motpars_afni2fsl(5) bad_data = np.random.random((200, 7)) with pytest.raises(ValueError): utils.motpars_afni2fsl(bad_data)
Test aroma.utils.motpars_afni2fsl.
aroma/tests/test_utils.py
test_motpars_afni2fsl
CesarCaballeroGaudes/aroma
5
python
def test_motpars_afni2fsl(motion_parameters): fsl = utils.load_motpars(motion_parameters['FSL'], source='fsl') afni = utils.motpars_afni2fsl(motion_parameters['AfNI']) assert np.allclose(fsl, afni) with pytest.raises(ValueError): utils.motpars_afni2fsl(5) bad_data = np.random.random((200, 7)) with pytest.raises(ValueError): utils.motpars_afni2fsl(bad_data)
def test_motpars_afni2fsl(motion_parameters): fsl = utils.load_motpars(motion_parameters['FSL'], source='fsl') afni = utils.motpars_afni2fsl(motion_parameters['AfNI']) assert np.allclose(fsl, afni) with pytest.raises(ValueError): utils.motpars_afni2fsl(5) bad_data = np.random.random((200, 7)) with pytest.raises(ValueError): utils.motpars_afni2fsl(bad_data)<|docstring|>Test aroma.utils.motpars_afni2fsl.<|endoftext|>
c7dafaa66f1e8a22fc714a0278b8c2b981b8f0e34886a9536d528d2ec4190887
def test_cross_correlation(): 'Test aroma.utils.cross_correlation.' np.random.seed(5) a = np.random.rand(4, 4) b = np.random.rand(2, 4) true_cross_corr = np.array([[(- 0.28624708), (- 0.62178458)], [0.37905408, (- 0.51091252)], [0.24162976, (- 0.13454275)], [0.69255319, 0.07156853]]) cross_corr = utils.cross_correlation(a.T, b.T) assert np.allclose(cross_corr, true_cross_corr)
Test aroma.utils.cross_correlation.
aroma/tests/test_utils.py
test_cross_correlation
CesarCaballeroGaudes/aroma
5
python
def test_cross_correlation(): np.random.seed(5) a = np.random.rand(4, 4) b = np.random.rand(2, 4) true_cross_corr = np.array([[(- 0.28624708), (- 0.62178458)], [0.37905408, (- 0.51091252)], [0.24162976, (- 0.13454275)], [0.69255319, 0.07156853]]) cross_corr = utils.cross_correlation(a.T, b.T) assert np.allclose(cross_corr, true_cross_corr)
def test_cross_correlation(): np.random.seed(5) a = np.random.rand(4, 4) b = np.random.rand(2, 4) true_cross_corr = np.array([[(- 0.28624708), (- 0.62178458)], [0.37905408, (- 0.51091252)], [0.24162976, (- 0.13454275)], [0.69255319, 0.07156853]]) cross_corr = utils.cross_correlation(a.T, b.T) assert np.allclose(cross_corr, true_cross_corr)<|docstring|>Test aroma.utils.cross_correlation.<|endoftext|>
9ce87cddca23db4a17457542a8982369e4840323b450b28fb2584a08991e78ba
def get_search_space_updates(): '\n Search space updates to the task can be added using HyperparameterSearchSpaceUpdates\n Returns:\n HyperparameterSearchSpaceUpdates\n ' updates = HyperparameterSearchSpaceUpdates() updates.append(node_name='data_loader', hyperparameter='batch_size', value_range=[16, 512], default_value=32) updates.append(node_name='lr_scheduler', hyperparameter='CosineAnnealingLR:T_max', value_range=[50, 60], default_value=55) updates.append(node_name='network_backbone', hyperparameter='ResNetBackbone:dropout', value_range=[0, 0.5], default_value=0.2) return updates
Search space updates to the task can be added using HyperparameterSearchSpaceUpdates Returns: HyperparameterSearchSpaceUpdates
examples/40_advanced/example_custom_configuration_space.py
get_search_space_updates
maxpark/Auto-PyTorch
1
python
def get_search_space_updates(): '\n Search space updates to the task can be added using HyperparameterSearchSpaceUpdates\n Returns:\n HyperparameterSearchSpaceUpdates\n ' updates = HyperparameterSearchSpaceUpdates() updates.append(node_name='data_loader', hyperparameter='batch_size', value_range=[16, 512], default_value=32) updates.append(node_name='lr_scheduler', hyperparameter='CosineAnnealingLR:T_max', value_range=[50, 60], default_value=55) updates.append(node_name='network_backbone', hyperparameter='ResNetBackbone:dropout', value_range=[0, 0.5], default_value=0.2) return updates
def get_search_space_updates(): '\n Search space updates to the task can be added using HyperparameterSearchSpaceUpdates\n Returns:\n HyperparameterSearchSpaceUpdates\n ' updates = HyperparameterSearchSpaceUpdates() updates.append(node_name='data_loader', hyperparameter='batch_size', value_range=[16, 512], default_value=32) updates.append(node_name='lr_scheduler', hyperparameter='CosineAnnealingLR:T_max', value_range=[50, 60], default_value=55) updates.append(node_name='network_backbone', hyperparameter='ResNetBackbone:dropout', value_range=[0, 0.5], default_value=0.2) return updates<|docstring|>Search space updates to the task can be added using HyperparameterSearchSpaceUpdates Returns: HyperparameterSearchSpaceUpdates<|endoftext|>
d647fc34502a087a6ee62ac0055e963e107ddd7d4604245755568bc3ceca94f7
def isNan(tensor): 'get a tensor and return True if it includes Nan' return (tensor != tensor).any()
get a tensor and return True if it includes Nan
misc.py
isNan
minkyu-choi04/misc
0
python
def isNan(tensor): return (tensor != tensor).any()
def isNan(tensor): return (tensor != tensor).any()<|docstring|>get a tensor and return True if it includes Nan<|endoftext|>
c3765d94ee19b5a72c6f4740062bac44ee5bf9e70196e0ee15d2e416e2ff2218
def load_salicon(batch_size, server_type): "In order to use this function, you need to move all the images in the ./test/ into ./test/1/. \n This is because the pytorch's imageFolder and Dataloader works in this way. \n " if (server_type == 'libigpu1'): path_dataset = os.path.expanduser('~/datasets/salicon_original') elif (server_type == 'libigpu5'): path_dataset = os.path.expanduser('~/datasets/salicon_original') else: print('[ERROR]: Server type not implemented') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) data_train = ds.SALICON(path_dataset, mode='train') data_val = ds.SALICON(path_dataset, mode='val') data_test = datasets.ImageFolder(root=os.path.expanduser(os.path.join(path_dataset, 'image', 'images', 'test')), transform=transforms.Compose([transforms.ToTensor(), normalize])) train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True, drop_last=True) val_loader = torch.utils.data.DataLoader(data_val, batch_size=batch_size, shuffle=True, num_workers=2, pin_memory=True, drop_last=True) test_loader = torch.utils.data.DataLoader(data_test, batch_size=batch_size, shuffle=False, num_workers=2, pin_memory=True, drop_last=False) return (train_loader, val_loader, test_loader)
In order to use this function, you need to move all the images in the ./test/ into ./test/1/. This is because the pytorch's imageFolder and Dataloader works in this way.
misc.py
load_salicon
minkyu-choi04/misc
0
python
def load_salicon(batch_size, server_type): "In order to use this function, you need to move all the images in the ./test/ into ./test/1/. \n This is because the pytorch's imageFolder and Dataloader works in this way. \n " if (server_type == 'libigpu1'): path_dataset = os.path.expanduser('~/datasets/salicon_original') elif (server_type == 'libigpu5'): path_dataset = os.path.expanduser('~/datasets/salicon_original') else: print('[ERROR]: Server type not implemented') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) data_train = ds.SALICON(path_dataset, mode='train') data_val = ds.SALICON(path_dataset, mode='val') data_test = datasets.ImageFolder(root=os.path.expanduser(os.path.join(path_dataset, 'image', 'images', 'test')), transform=transforms.Compose([transforms.ToTensor(), normalize])) train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True, drop_last=True) val_loader = torch.utils.data.DataLoader(data_val, batch_size=batch_size, shuffle=True, num_workers=2, pin_memory=True, drop_last=True) test_loader = torch.utils.data.DataLoader(data_test, batch_size=batch_size, shuffle=False, num_workers=2, pin_memory=True, drop_last=False) return (train_loader, val_loader, test_loader)
def load_salicon(batch_size, server_type): "In order to use this function, you need to move all the images in the ./test/ into ./test/1/. \n This is because the pytorch's imageFolder and Dataloader works in this way. \n " if (server_type == 'libigpu1'): path_dataset = os.path.expanduser('~/datasets/salicon_original') elif (server_type == 'libigpu5'): path_dataset = os.path.expanduser('~/datasets/salicon_original') else: print('[ERROR]: Server type not implemented') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) data_train = ds.SALICON(path_dataset, mode='train') data_val = ds.SALICON(path_dataset, mode='val') data_test = datasets.ImageFolder(root=os.path.expanduser(os.path.join(path_dataset, 'image', 'images', 'test')), transform=transforms.Compose([transforms.ToTensor(), normalize])) train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True, drop_last=True) val_loader = torch.utils.data.DataLoader(data_val, batch_size=batch_size, shuffle=True, num_workers=2, pin_memory=True, drop_last=True) test_loader = torch.utils.data.DataLoader(data_test, batch_size=batch_size, shuffle=False, num_workers=2, pin_memory=True, drop_last=False) return (train_loader, val_loader, test_loader)<|docstring|>In order to use this function, you need to move all the images in the ./test/ into ./test/1/. This is because the pytorch's imageFolder and Dataloader works in this way.<|endoftext|>
5d6a62fe8f8219b1f09c194eb701cb72c1a0335fc67e4152f9ea39fd3589f576
def load_MIE(batch_size, server_type, data_type): "\n When I downloaded the corresponding dataset from https://www-percept.irisa.fr/asperger_to_kanner/, \n I unzip the file and changed file names from one digit to two digits by hand. >> 1.png --> 01.png \n And I also put a ./0 dir inside the MIE_Fo and MIE_No and moved all images into it. \n\n data_type: 'fo' or 'no'\n " if (server_type == 'libigpu1'): if (data_type == 'fo'): path_dataset = os.path.expanduser('/home/libiadm/HDD1/libigpu1/minkyu/datasets/ASD/MIE_Fo/stimuli/') elif (data_type == 'no'): path_dataset = os.path.expanduser('/home/libiadm/HDD1/libigpu1/minkyu/datasets/ASD/MIE_No/stimuli/') else: print('[ERROR]: something is wrong') else: print('[ERROR]: Server type not implemented') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) test_data = datasets.ImageFolder(root=os.path.expanduser(path_dataset), transform=transforms.Compose([transforms.Resize((360, 480)), transforms.ToTensor(), normalize])) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=2, pin_memory=True, drop_last=False) return (test_loader, test_loader, test_loader)
When I downloaded the corresponding dataset from https://www-percept.irisa.fr/asperger_to_kanner/, I unzip the file and changed file names from one digit to two digits by hand. >> 1.png --> 01.png And I also put a ./0 dir inside the MIE_Fo and MIE_No and moved all images into it. data_type: 'fo' or 'no'
misc.py
load_MIE
minkyu-choi04/misc
0
python
def load_MIE(batch_size, server_type, data_type): "\n When I downloaded the corresponding dataset from https://www-percept.irisa.fr/asperger_to_kanner/, \n I unzip the file and changed file names from one digit to two digits by hand. >> 1.png --> 01.png \n And I also put a ./0 dir inside the MIE_Fo and MIE_No and moved all images into it. \n\n data_type: 'fo' or 'no'\n " if (server_type == 'libigpu1'): if (data_type == 'fo'): path_dataset = os.path.expanduser('/home/libiadm/HDD1/libigpu1/minkyu/datasets/ASD/MIE_Fo/stimuli/') elif (data_type == 'no'): path_dataset = os.path.expanduser('/home/libiadm/HDD1/libigpu1/minkyu/datasets/ASD/MIE_No/stimuli/') else: print('[ERROR]: something is wrong') else: print('[ERROR]: Server type not implemented') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) test_data = datasets.ImageFolder(root=os.path.expanduser(path_dataset), transform=transforms.Compose([transforms.Resize((360, 480)), transforms.ToTensor(), normalize])) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=2, pin_memory=True, drop_last=False) return (test_loader, test_loader, test_loader)
def load_MIE(batch_size, server_type, data_type): "\n When I downloaded the corresponding dataset from https://www-percept.irisa.fr/asperger_to_kanner/, \n I unzip the file and changed file names from one digit to two digits by hand. >> 1.png --> 01.png \n And I also put a ./0 dir inside the MIE_Fo and MIE_No and moved all images into it. \n\n data_type: 'fo' or 'no'\n " if (server_type == 'libigpu1'): if (data_type == 'fo'): path_dataset = os.path.expanduser('/home/libiadm/HDD1/libigpu1/minkyu/datasets/ASD/MIE_Fo/stimuli/') elif (data_type == 'no'): path_dataset = os.path.expanduser('/home/libiadm/HDD1/libigpu1/minkyu/datasets/ASD/MIE_No/stimuli/') else: print('[ERROR]: something is wrong') else: print('[ERROR]: Server type not implemented') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) test_data = datasets.ImageFolder(root=os.path.expanduser(path_dataset), transform=transforms.Compose([transforms.Resize((360, 480)), transforms.ToTensor(), normalize])) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=2, pin_memory=True, drop_last=False) return (test_loader, test_loader, test_loader)<|docstring|>When I downloaded the corresponding dataset from https://www-percept.irisa.fr/asperger_to_kanner/, I unzip the file and changed file names from one digit to two digits by hand. >> 1.png --> 01.png And I also put a ./0 dir inside the MIE_Fo and MIE_No and moved all images into it. data_type: 'fo' or 'no'<|endoftext|>
68f2e817ed439e831c45eb66bf4fec8420ce1bcbead9b7bd29ee5a13ecb42307
def plot_samples_from_images(images, batch_size, plot_path, filename, isRange01=False): ' Plot images\n Changed 2020.11.23\n isRange01 is added to normalize image in different way. \n\n Args: \n images: (b, c, h, w), tensor in any range. (c=3 or 1)\n batch_size: int\n plot_path: string\n filename: string\n isRange01: True/False, Normalization will be different. \n ' if isRange01: max_pix = torch.max(torch.abs(images)) images = (images / max_pix) else: max_pix = torch.max(torch.abs(images)) if (max_pix != 0.0): images = (((images / max_pix) + 1.0) / 2.0) else: images = ((images + 1.0) / 2.0) if (images.size()[1] == 1): images = torch.cat((images, images, images), 1) images = np.swapaxes(np.swapaxes(images.cpu().numpy(), 1, 2), 2, 3) fig = plt.figure(figsize=(((batch_size / 4) + 5), ((batch_size / 4) + 5))) for idx in np.arange(batch_size): ax = fig.add_subplot((batch_size / 8), 8, (idx + 1), xticks=[], yticks=[]) ax.imshow(images[idx]) plt.tight_layout(pad=1, w_pad=0, h_pad=0) if plot_path: plt.savefig(os.path.join(plot_path, filename)) else: plt.show() plt.close() pass
Plot images Changed 2020.11.23 isRange01 is added to normalize image in different way. Args: images: (b, c, h, w), tensor in any range. (c=3 or 1) batch_size: int plot_path: string filename: string isRange01: True/False, Normalization will be different.
misc.py
plot_samples_from_images
minkyu-choi04/misc
0
python
def plot_samples_from_images(images, batch_size, plot_path, filename, isRange01=False): ' Plot images\n Changed 2020.11.23\n isRange01 is added to normalize image in different way. \n\n Args: \n images: (b, c, h, w), tensor in any range. (c=3 or 1)\n batch_size: int\n plot_path: string\n filename: string\n isRange01: True/False, Normalization will be different. \n ' if isRange01: max_pix = torch.max(torch.abs(images)) images = (images / max_pix) else: max_pix = torch.max(torch.abs(images)) if (max_pix != 0.0): images = (((images / max_pix) + 1.0) / 2.0) else: images = ((images + 1.0) / 2.0) if (images.size()[1] == 1): images = torch.cat((images, images, images), 1) images = np.swapaxes(np.swapaxes(images.cpu().numpy(), 1, 2), 2, 3) fig = plt.figure(figsize=(((batch_size / 4) + 5), ((batch_size / 4) + 5))) for idx in np.arange(batch_size): ax = fig.add_subplot((batch_size / 8), 8, (idx + 1), xticks=[], yticks=[]) ax.imshow(images[idx]) plt.tight_layout(pad=1, w_pad=0, h_pad=0) if plot_path: plt.savefig(os.path.join(plot_path, filename)) else: plt.show() plt.close() pass
def plot_samples_from_images(images, batch_size, plot_path, filename, isRange01=False): ' Plot images\n Changed 2020.11.23\n isRange01 is added to normalize image in different way. \n\n Args: \n images: (b, c, h, w), tensor in any range. (c=3 or 1)\n batch_size: int\n plot_path: string\n filename: string\n isRange01: True/False, Normalization will be different. \n ' if isRange01: max_pix = torch.max(torch.abs(images)) images = (images / max_pix) else: max_pix = torch.max(torch.abs(images)) if (max_pix != 0.0): images = (((images / max_pix) + 1.0) / 2.0) else: images = ((images + 1.0) / 2.0) if (images.size()[1] == 1): images = torch.cat((images, images, images), 1) images = np.swapaxes(np.swapaxes(images.cpu().numpy(), 1, 2), 2, 3) fig = plt.figure(figsize=(((batch_size / 4) + 5), ((batch_size / 4) + 5))) for idx in np.arange(batch_size): ax = fig.add_subplot((batch_size / 8), 8, (idx + 1), xticks=[], yticks=[]) ax.imshow(images[idx]) plt.tight_layout(pad=1, w_pad=0, h_pad=0) if plot_path: plt.savefig(os.path.join(plot_path, filename)) else: plt.show() plt.close() pass<|docstring|>Plot images Changed 2020.11.23 isRange01 is added to normalize image in different way. Args: images: (b, c, h, w), tensor in any range. (c=3 or 1) batch_size: int plot_path: string filename: string isRange01: True/False, Normalization will be different.<|endoftext|>
9ac58a8c4ae439d17a2757c8892bef929aedaf58ff23f1ab2a5e08670d55f6f2
def accuracy(output, target, topk=(1,)): 'Computes the accuracy over the k top predictions for the specified values of k' with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) (_, pred) = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, (- 1)).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True) res.append(correct_k.mul_((100.0 / batch_size))) return res
Computes the accuracy over the k top predictions for the specified values of k
misc.py
accuracy
minkyu-choi04/misc
0
python
def accuracy(output, target, topk=(1,)): with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) (_, pred) = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, (- 1)).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True) res.append(correct_k.mul_((100.0 / batch_size))) return res
def accuracy(output, target, topk=(1,)): with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) (_, pred) = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, (- 1)).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True) res.append(correct_k.mul_((100.0 / batch_size))) return res<|docstring|>Computes the accuracy over the k top predictions for the specified values of k<|endoftext|>
b665702a8c9cd79e77b2707affe1374371cdbe6e5249e67ef8dc145e77f1d71c
def get_PE_cart(batch_s, pe_s): '\n\tGenerates cartesian Positional Encoding. \n\tArgs: \n\t\tbatch_s: int, batch size. \n\t\tpe_s: (int h, int w), size of Positional encoding feature \n\tReturn:\n\t\tpe: (batch_s, 2, pe_s[0], pe_s[1])\n\t' lin_x = torch.unsqueeze(torch.linspace((- 1.0), 1.0, steps=pe_s[1], device='cuda'), 0).repeat(pe_s[0], 1) lin_y = torch.unsqueeze(torch.linspace((- 1.0), 1.0, steps=pe_s[0], device='cuda'), 0).repeat(pe_s[1], 1) lin_y = lin_y.t() lin_x = torch.unsqueeze(lin_x, 0).repeat(batch_s, 1, 1) lin_y = torch.unsqueeze(lin_y, 0).repeat(batch_s, 1, 1) pe = torch.cat((lin_x.unsqueeze(1), lin_y.unsqueeze(1)), 1) return pe
Generates cartesian Positional Encoding. Args: batch_s: int, batch size. pe_s: (int h, int w), size of Positional encoding feature Return: pe: (batch_s, 2, pe_s[0], pe_s[1])
misc.py
get_PE_cart
minkyu-choi04/misc
0
python
def get_PE_cart(batch_s, pe_s): '\n\tGenerates cartesian Positional Encoding. \n\tArgs: \n\t\tbatch_s: int, batch size. \n\t\tpe_s: (int h, int w), size of Positional encoding feature \n\tReturn:\n\t\tpe: (batch_s, 2, pe_s[0], pe_s[1])\n\t' lin_x = torch.unsqueeze(torch.linspace((- 1.0), 1.0, steps=pe_s[1], device='cuda'), 0).repeat(pe_s[0], 1) lin_y = torch.unsqueeze(torch.linspace((- 1.0), 1.0, steps=pe_s[0], device='cuda'), 0).repeat(pe_s[1], 1) lin_y = lin_y.t() lin_x = torch.unsqueeze(lin_x, 0).repeat(batch_s, 1, 1) lin_y = torch.unsqueeze(lin_y, 0).repeat(batch_s, 1, 1) pe = torch.cat((lin_x.unsqueeze(1), lin_y.unsqueeze(1)), 1) return pe
def get_PE_cart(batch_s, pe_s): '\n\tGenerates cartesian Positional Encoding. \n\tArgs: \n\t\tbatch_s: int, batch size. \n\t\tpe_s: (int h, int w), size of Positional encoding feature \n\tReturn:\n\t\tpe: (batch_s, 2, pe_s[0], pe_s[1])\n\t' lin_x = torch.unsqueeze(torch.linspace((- 1.0), 1.0, steps=pe_s[1], device='cuda'), 0).repeat(pe_s[0], 1) lin_y = torch.unsqueeze(torch.linspace((- 1.0), 1.0, steps=pe_s[0], device='cuda'), 0).repeat(pe_s[1], 1) lin_y = lin_y.t() lin_x = torch.unsqueeze(lin_x, 0).repeat(batch_s, 1, 1) lin_y = torch.unsqueeze(lin_y, 0).repeat(batch_s, 1, 1) pe = torch.cat((lin_x.unsqueeze(1), lin_y.unsqueeze(1)), 1) return pe<|docstring|>Generates cartesian Positional Encoding. Args: batch_s: int, batch size. pe_s: (int h, int w), size of Positional encoding feature Return: pe: (batch_s, 2, pe_s[0], pe_s[1])<|endoftext|>
ffca44e7b3f9b1917a0ace38f05bc86925c1d7bb585fd2e62bd10880fa508d2c
def get_coord_feature(batch_s, input_s, pe_s, polar_grid=None): "\n 20201124\n Changed to have an option to return Cart PE. \n\n code from (base) min@libigpu1:~/research_mk/attention_model_biliniear_localGlobal_6_reinforce/detach_recurrency_group/rl_base_recon_corHM_absHM_corrREINFORCE_corInitRNN_detachTD_hlr_lowResD_detachVD_removeBottleNeckV2I_conventionalResInc_contVer110\n\n Generate Positional encoding\n PE feature will first be generated in cartesian space with same size of input. \n And then it will be transformed to polar space, if polar_grid not nont. \n After that, it will be resized to pe_s. \n\n Args:\n batch_s: int. Batch size\n input_s: (int h, int w), Size of loaded input image in cartesian space. \n pe_s: (int h', int w'), Size of positional encoding feature in polar space. This size of feature will be returned. \n polar_grid: Grid for polar transformation. It must be the same grid used in polar CNN. Fixation points must be already added to this grid.\n return:\n polar_pe_resized: (b, 2, h', w')\n " with torch.no_grad(): lin_x = torch.unsqueeze(torch.linspace((- 1.0), 1.0, steps=input_s[1], device='cuda'), 0).repeat(input_s[0], 1) lin_y = torch.unsqueeze(torch.linspace((- 1.0), 1.0, steps=input_s[0], device='cuda'), 0).repeat(input_s[1], 1) lin_y = lin_y.t() lin_x = torch.unsqueeze(lin_x, 0).repeat(batch_s, 1, 1) lin_y = torch.unsqueeze(lin_y, 0).repeat(batch_s, 1, 1) cart_pe = torch.cat((lin_x.unsqueeze(1), lin_y.unsqueeze(1)), 1) if (polar_grid is not None): pe = torch.nn.functional.grid_sample(cart_pe, polar_grid, align_corners=False) else: pe = cart_pe pe_resized = torch.nn.functional.interpolate(pe, pe_s) return pe_resized
20201124 Changed to have an option to return Cart PE. code from (base) min@libigpu1:~/research_mk/attention_model_biliniear_localGlobal_6_reinforce/detach_recurrency_group/rl_base_recon_corHM_absHM_corrREINFORCE_corInitRNN_detachTD_hlr_lowResD_detachVD_removeBottleNeckV2I_conventionalResInc_contVer110 Generate Positional encoding PE feature will first be generated in cartesian space with same size of input. And then it will be transformed to polar space, if polar_grid not nont. After that, it will be resized to pe_s. Args: batch_s: int. Batch size input_s: (int h, int w), Size of loaded input image in cartesian space. pe_s: (int h', int w'), Size of positional encoding feature in polar space. This size of feature will be returned. polar_grid: Grid for polar transformation. It must be the same grid used in polar CNN. Fixation points must be already added to this grid. return: polar_pe_resized: (b, 2, h', w')
misc.py
get_coord_feature
minkyu-choi04/misc
0
python
def get_coord_feature(batch_s, input_s, pe_s, polar_grid=None): "\n 20201124\n Changed to have an option to return Cart PE. \n\n code from (base) min@libigpu1:~/research_mk/attention_model_biliniear_localGlobal_6_reinforce/detach_recurrency_group/rl_base_recon_corHM_absHM_corrREINFORCE_corInitRNN_detachTD_hlr_lowResD_detachVD_removeBottleNeckV2I_conventionalResInc_contVer110\n\n Generate Positional encoding\n PE feature will first be generated in cartesian space with same size of input. \n And then it will be transformed to polar space, if polar_grid not nont. \n After that, it will be resized to pe_s. \n\n Args:\n batch_s: int. Batch size\n input_s: (int h, int w), Size of loaded input image in cartesian space. \n pe_s: (int h', int w'), Size of positional encoding feature in polar space. This size of feature will be returned. \n polar_grid: Grid for polar transformation. It must be the same grid used in polar CNN. Fixation points must be already added to this grid.\n return:\n polar_pe_resized: (b, 2, h', w')\n " with torch.no_grad(): lin_x = torch.unsqueeze(torch.linspace((- 1.0), 1.0, steps=input_s[1], device='cuda'), 0).repeat(input_s[0], 1) lin_y = torch.unsqueeze(torch.linspace((- 1.0), 1.0, steps=input_s[0], device='cuda'), 0).repeat(input_s[1], 1) lin_y = lin_y.t() lin_x = torch.unsqueeze(lin_x, 0).repeat(batch_s, 1, 1) lin_y = torch.unsqueeze(lin_y, 0).repeat(batch_s, 1, 1) cart_pe = torch.cat((lin_x.unsqueeze(1), lin_y.unsqueeze(1)), 1) if (polar_grid is not None): pe = torch.nn.functional.grid_sample(cart_pe, polar_grid, align_corners=False) else: pe = cart_pe pe_resized = torch.nn.functional.interpolate(pe, pe_s) return pe_resized
def get_coord_feature(batch_s, input_s, pe_s, polar_grid=None): "\n 20201124\n Changed to have an option to return Cart PE. \n\n code from (base) min@libigpu1:~/research_mk/attention_model_biliniear_localGlobal_6_reinforce/detach_recurrency_group/rl_base_recon_corHM_absHM_corrREINFORCE_corInitRNN_detachTD_hlr_lowResD_detachVD_removeBottleNeckV2I_conventionalResInc_contVer110\n\n Generate Positional encoding\n PE feature will first be generated in cartesian space with same size of input. \n And then it will be transformed to polar space, if polar_grid not nont. \n After that, it will be resized to pe_s. \n\n Args:\n batch_s: int. Batch size\n input_s: (int h, int w), Size of loaded input image in cartesian space. \n pe_s: (int h', int w'), Size of positional encoding feature in polar space. This size of feature will be returned. \n polar_grid: Grid for polar transformation. It must be the same grid used in polar CNN. Fixation points must be already added to this grid.\n return:\n polar_pe_resized: (b, 2, h', w')\n " with torch.no_grad(): lin_x = torch.unsqueeze(torch.linspace((- 1.0), 1.0, steps=input_s[1], device='cuda'), 0).repeat(input_s[0], 1) lin_y = torch.unsqueeze(torch.linspace((- 1.0), 1.0, steps=input_s[0], device='cuda'), 0).repeat(input_s[1], 1) lin_y = lin_y.t() lin_x = torch.unsqueeze(lin_x, 0).repeat(batch_s, 1, 1) lin_y = torch.unsqueeze(lin_y, 0).repeat(batch_s, 1, 1) cart_pe = torch.cat((lin_x.unsqueeze(1), lin_y.unsqueeze(1)), 1) if (polar_grid is not None): pe = torch.nn.functional.grid_sample(cart_pe, polar_grid, align_corners=False) else: pe = cart_pe pe_resized = torch.nn.functional.interpolate(pe, pe_s) return pe_resized<|docstring|>20201124 Changed to have an option to return Cart PE. code from (base) min@libigpu1:~/research_mk/attention_model_biliniear_localGlobal_6_reinforce/detach_recurrency_group/rl_base_recon_corHM_absHM_corrREINFORCE_corInitRNN_detachTD_hlr_lowResD_detachVD_removeBottleNeckV2I_conventionalResInc_contVer110 Generate Positional encoding PE feature will first be generated in cartesian space with same size of input. And then it will be transformed to polar space, if polar_grid not nont. After that, it will be resized to pe_s. Args: batch_s: int. Batch size input_s: (int h, int w), Size of loaded input image in cartesian space. pe_s: (int h', int w'), Size of positional encoding feature in polar space. This size of feature will be returned. polar_grid: Grid for polar transformation. It must be the same grid used in polar CNN. Fixation points must be already added to this grid. return: polar_pe_resized: (b, 2, h', w')<|endoftext|>
08078417495c24f6c1e4d224f244ae2d0a5efa09f5f80350159e1313753039f9
def noralize_min_max(fms): ' Normalize input fms range from 0 to 1. \n Args: \n fms: (b, c, h, w)\n return: \n fms_norm: (b, c, h, w)\n ' fms_s = fms.size() if (len(fms_s) == 3): fms = fms.unsqueeze(1) fms_s = fms.size() min_val = torch.min(fms.view(fms_s[0], (- 1)), 1)[0].unsqueeze(1).unsqueeze(1).unsqueeze(1) max_val = torch.max(fms.view(fms_s[0], (- 1)), 1)[0].unsqueeze(1).unsqueeze(1).unsqueeze(1) fms_norm = ((fms - min_val) / (max_val - min_val)) return fms_norm
Normalize input fms range from 0 to 1. Args: fms: (b, c, h, w) return: fms_norm: (b, c, h, w)
misc.py
noralize_min_max
minkyu-choi04/misc
0
python
def noralize_min_max(fms): ' Normalize input fms range from 0 to 1. \n Args: \n fms: (b, c, h, w)\n return: \n fms_norm: (b, c, h, w)\n ' fms_s = fms.size() if (len(fms_s) == 3): fms = fms.unsqueeze(1) fms_s = fms.size() min_val = torch.min(fms.view(fms_s[0], (- 1)), 1)[0].unsqueeze(1).unsqueeze(1).unsqueeze(1) max_val = torch.max(fms.view(fms_s[0], (- 1)), 1)[0].unsqueeze(1).unsqueeze(1).unsqueeze(1) fms_norm = ((fms - min_val) / (max_val - min_val)) return fms_norm
def noralize_min_max(fms): ' Normalize input fms range from 0 to 1. \n Args: \n fms: (b, c, h, w)\n return: \n fms_norm: (b, c, h, w)\n ' fms_s = fms.size() if (len(fms_s) == 3): fms = fms.unsqueeze(1) fms_s = fms.size() min_val = torch.min(fms.view(fms_s[0], (- 1)), 1)[0].unsqueeze(1).unsqueeze(1).unsqueeze(1) max_val = torch.max(fms.view(fms_s[0], (- 1)), 1)[0].unsqueeze(1).unsqueeze(1).unsqueeze(1) fms_norm = ((fms - min_val) / (max_val - min_val)) return fms_norm<|docstring|>Normalize input fms range from 0 to 1. Args: fms: (b, c, h, w) return: fms_norm: (b, c, h, w)<|endoftext|>
a78f056cc08fde3716adc43c5ff33cd4d797f5001171e1af777804b625e2304a
def mark_point(imgs, fixs, ds=7, isRed=True): '\n Mark a point in the given image. \n Args:\n imgs: (b, 3, h, w), tensor, any range\n fixs: (b, 2), (float x, float y), tensor, -1~1\n return:\n img_marked: (b, 3, h, w)\n ' img_s = imgs.size() fixs = ((fixs + 1) / 2.0) fixs[(:, 0)] = (fixs[(:, 0)] * img_s[(- 1)]) fixs[(:, 1)] = (fixs[(:, 1)] * img_s[(- 2)]) fixs = fixs.to(torch.int) 'Edited 20200811. \n Code below had wrong order of x, y coordinates. I simply changed 0-->1 and 1-->0 of fixs[b,n]. ' for b in range(img_s[0]): if isRed: imgs[(b, :, (fixs[(b, 1)] - ds):(fixs[(b, 1)] + ds), (fixs[(b, 0)] - ds):(fixs[(b, 0)] + ds))] = 0.0 imgs[(b, 0, (fixs[(b, 1)] - ds):(fixs[(b, 1)] + ds), (fixs[(b, 0)] - ds):(fixs[(b, 0)] + ds))] = 2.0 else: imgs[(b, :, (fixs[(b, 1)] - ds):(fixs[(b, 1)] + ds), (fixs[(b, 0)] - ds):(fixs[(b, 0)] + ds))] = 0.0 imgs[(b, 2, (fixs[(b, 1)] - ds):(fixs[(b, 1)] + ds), (fixs[(b, 0)] - ds):(fixs[(b, 0)] + ds))] = 2.0 return imgs
Mark a point in the given image. Args: imgs: (b, 3, h, w), tensor, any range fixs: (b, 2), (float x, float y), tensor, -1~1 return: img_marked: (b, 3, h, w)
misc.py
mark_point
minkyu-choi04/misc
0
python
def mark_point(imgs, fixs, ds=7, isRed=True): '\n Mark a point in the given image. \n Args:\n imgs: (b, 3, h, w), tensor, any range\n fixs: (b, 2), (float x, float y), tensor, -1~1\n return:\n img_marked: (b, 3, h, w)\n ' img_s = imgs.size() fixs = ((fixs + 1) / 2.0) fixs[(:, 0)] = (fixs[(:, 0)] * img_s[(- 1)]) fixs[(:, 1)] = (fixs[(:, 1)] * img_s[(- 2)]) fixs = fixs.to(torch.int) 'Edited 20200811. \n Code below had wrong order of x, y coordinates. I simply changed 0-->1 and 1-->0 of fixs[b,n]. ' for b in range(img_s[0]): if isRed: imgs[(b, :, (fixs[(b, 1)] - ds):(fixs[(b, 1)] + ds), (fixs[(b, 0)] - ds):(fixs[(b, 0)] + ds))] = 0.0 imgs[(b, 0, (fixs[(b, 1)] - ds):(fixs[(b, 1)] + ds), (fixs[(b, 0)] - ds):(fixs[(b, 0)] + ds))] = 2.0 else: imgs[(b, :, (fixs[(b, 1)] - ds):(fixs[(b, 1)] + ds), (fixs[(b, 0)] - ds):(fixs[(b, 0)] + ds))] = 0.0 imgs[(b, 2, (fixs[(b, 1)] - ds):(fixs[(b, 1)] + ds), (fixs[(b, 0)] - ds):(fixs[(b, 0)] + ds))] = 2.0 return imgs
def mark_point(imgs, fixs, ds=7, isRed=True): '\n Mark a point in the given image. \n Args:\n imgs: (b, 3, h, w), tensor, any range\n fixs: (b, 2), (float x, float y), tensor, -1~1\n return:\n img_marked: (b, 3, h, w)\n ' img_s = imgs.size() fixs = ((fixs + 1) / 2.0) fixs[(:, 0)] = (fixs[(:, 0)] * img_s[(- 1)]) fixs[(:, 1)] = (fixs[(:, 1)] * img_s[(- 2)]) fixs = fixs.to(torch.int) 'Edited 20200811. \n Code below had wrong order of x, y coordinates. I simply changed 0-->1 and 1-->0 of fixs[b,n]. ' for b in range(img_s[0]): if isRed: imgs[(b, :, (fixs[(b, 1)] - ds):(fixs[(b, 1)] + ds), (fixs[(b, 0)] - ds):(fixs[(b, 0)] + ds))] = 0.0 imgs[(b, 0, (fixs[(b, 1)] - ds):(fixs[(b, 1)] + ds), (fixs[(b, 0)] - ds):(fixs[(b, 0)] + ds))] = 2.0 else: imgs[(b, :, (fixs[(b, 1)] - ds):(fixs[(b, 1)] + ds), (fixs[(b, 0)] - ds):(fixs[(b, 0)] + ds))] = 0.0 imgs[(b, 2, (fixs[(b, 1)] - ds):(fixs[(b, 1)] + ds), (fixs[(b, 0)] - ds):(fixs[(b, 0)] + ds))] = 2.0 return imgs<|docstring|>Mark a point in the given image. Args: imgs: (b, 3, h, w), tensor, any range fixs: (b, 2), (float x, float y), tensor, -1~1 return: img_marked: (b, 3, h, w)<|endoftext|>
00398ff8b1397d10257676f4978d44d451d00f3b21c86d8b26e58e853f6bbf09
def mark_fixations(imgs, fixs, ds=7, isRed=True): '\n Mark fixation points in the given images. This function is used to mark a fixation. \n Args:\n imgs: (b, 3, h, w), tensor, any range\n fixs: (b, 2), (float x, float y), tensor, -1~1\n return:\n img_marked: (b, 3, h, w)\n ' imgs = noralize_min_max(imgs) imgs = mark_point(imgs, fixs, ds=ds, isRed=isRed) return ((imgs - 0.5) * 2.0)
Mark fixation points in the given images. This function is used to mark a fixation. Args: imgs: (b, 3, h, w), tensor, any range fixs: (b, 2), (float x, float y), tensor, -1~1 return: img_marked: (b, 3, h, w)
misc.py
mark_fixations
minkyu-choi04/misc
0
python
def mark_fixations(imgs, fixs, ds=7, isRed=True): '\n Mark fixation points in the given images. This function is used to mark a fixation. \n Args:\n imgs: (b, 3, h, w), tensor, any range\n fixs: (b, 2), (float x, float y), tensor, -1~1\n return:\n img_marked: (b, 3, h, w)\n ' imgs = noralize_min_max(imgs) imgs = mark_point(imgs, fixs, ds=ds, isRed=isRed) return ((imgs - 0.5) * 2.0)
def mark_fixations(imgs, fixs, ds=7, isRed=True): '\n Mark fixation points in the given images. This function is used to mark a fixation. \n Args:\n imgs: (b, 3, h, w), tensor, any range\n fixs: (b, 2), (float x, float y), tensor, -1~1\n return:\n img_marked: (b, 3, h, w)\n ' imgs = noralize_min_max(imgs) imgs = mark_point(imgs, fixs, ds=ds, isRed=isRed) return ((imgs - 0.5) * 2.0)<|docstring|>Mark fixation points in the given images. This function is used to mark a fixation. Args: imgs: (b, 3, h, w), tensor, any range fixs: (b, 2), (float x, float y), tensor, -1~1 return: img_marked: (b, 3, h, w)<|endoftext|>
ee65e70a24d6f05a1033c1b7419b1afc990d4b2eb0ee4f42ad0446a21849c07b
def mark_fixations_history(imgs, fixs_h, ds=7): '\n Mark fixation history in the given images. This function is used to mark fixation history. \n Args:\n imgs: (b, 3, h, w), tensor, any range\n fixs: (b, step, 2), (float x, float y), tensor, -1~1\n return:\n img_marked: (b, 3, h, w)\n ' n_steps = fixs_h.size(1) imgs = noralize_min_max(imgs) img_m = imgs for step in range(n_steps): if (step == (n_steps - 1)): imgs = mark_point(imgs, fixs_h[(:, step, :)], ds=ds, isRed=True) else: imgs = mark_point(imgs, fixs_h[(:, step, :)], ds=ds, isRed=False) return ((imgs - 0.5) * 2.0)
Mark fixation history in the given images. This function is used to mark fixation history. Args: imgs: (b, 3, h, w), tensor, any range fixs: (b, step, 2), (float x, float y), tensor, -1~1 return: img_marked: (b, 3, h, w)
misc.py
mark_fixations_history
minkyu-choi04/misc
0
python
def mark_fixations_history(imgs, fixs_h, ds=7): '\n Mark fixation history in the given images. This function is used to mark fixation history. \n Args:\n imgs: (b, 3, h, w), tensor, any range\n fixs: (b, step, 2), (float x, float y), tensor, -1~1\n return:\n img_marked: (b, 3, h, w)\n ' n_steps = fixs_h.size(1) imgs = noralize_min_max(imgs) img_m = imgs for step in range(n_steps): if (step == (n_steps - 1)): imgs = mark_point(imgs, fixs_h[(:, step, :)], ds=ds, isRed=True) else: imgs = mark_point(imgs, fixs_h[(:, step, :)], ds=ds, isRed=False) return ((imgs - 0.5) * 2.0)
def mark_fixations_history(imgs, fixs_h, ds=7): '\n Mark fixation history in the given images. This function is used to mark fixation history. \n Args:\n imgs: (b, 3, h, w), tensor, any range\n fixs: (b, step, 2), (float x, float y), tensor, -1~1\n return:\n img_marked: (b, 3, h, w)\n ' n_steps = fixs_h.size(1) imgs = noralize_min_max(imgs) img_m = imgs for step in range(n_steps): if (step == (n_steps - 1)): imgs = mark_point(imgs, fixs_h[(:, step, :)], ds=ds, isRed=True) else: imgs = mark_point(imgs, fixs_h[(:, step, :)], ds=ds, isRed=False) return ((imgs - 0.5) * 2.0)<|docstring|>Mark fixation history in the given images. This function is used to mark fixation history. Args: imgs: (b, 3, h, w), tensor, any range fixs: (b, step, 2), (float x, float y), tensor, -1~1 return: img_marked: (b, 3, h, w)<|endoftext|>
7f5d04c9534a87649fdc70f25ad11621e18cf53126fdeef9eb067bb6614b2ea3
def save_caption(captions, ref, epoch, step, vocab, isTrain=True): ' \n Convert predicted captions in idx to words\n ref: https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/03-advanced/image_captioning/sample.py\n Args:\n captions: (batch, max_seq_length), list of list, in word idx, predicted caption\n ref: (batch, max_seq_length), list of list, in word idx, reference caption\n epoch: int\n step: int\n vocab: vocab\n return:\n None\n ' if isTrain: fp = open(((('caption_train_e' + str(epoch)) + 's') + str(step)), 'w') else: fp = open(((('caption_test_e' + str(epoch)) + 's') + str(step)), 'w') bs = len(captions) for b in range(bs): s_pred = convert_idx2word(captions[b], vocab) s_ref = convert_idx2word(ref[b][0], vocab) fp.write((('batch: ' + str(b)) + '\n')) fp.write(((' Pred: ' + s_pred) + '\n')) fp.write(((' Ref: ' + s_ref) + '\n')) fp.close()
Convert predicted captions in idx to words ref: https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/03-advanced/image_captioning/sample.py Args: captions: (batch, max_seq_length), list of list, in word idx, predicted caption ref: (batch, max_seq_length), list of list, in word idx, reference caption epoch: int step: int vocab: vocab return: None
misc.py
save_caption
minkyu-choi04/misc
0
python
def save_caption(captions, ref, epoch, step, vocab, isTrain=True): ' \n Convert predicted captions in idx to words\n ref: https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/03-advanced/image_captioning/sample.py\n Args:\n captions: (batch, max_seq_length), list of list, in word idx, predicted caption\n ref: (batch, max_seq_length), list of list, in word idx, reference caption\n epoch: int\n step: int\n vocab: vocab\n return:\n None\n ' if isTrain: fp = open(((('caption_train_e' + str(epoch)) + 's') + str(step)), 'w') else: fp = open(((('caption_test_e' + str(epoch)) + 's') + str(step)), 'w') bs = len(captions) for b in range(bs): s_pred = convert_idx2word(captions[b], vocab) s_ref = convert_idx2word(ref[b][0], vocab) fp.write((('batch: ' + str(b)) + '\n')) fp.write(((' Pred: ' + s_pred) + '\n')) fp.write(((' Ref: ' + s_ref) + '\n')) fp.close()
def save_caption(captions, ref, epoch, step, vocab, isTrain=True): ' \n Convert predicted captions in idx to words\n ref: https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/03-advanced/image_captioning/sample.py\n Args:\n captions: (batch, max_seq_length), list of list, in word idx, predicted caption\n ref: (batch, max_seq_length), list of list, in word idx, reference caption\n epoch: int\n step: int\n vocab: vocab\n return:\n None\n ' if isTrain: fp = open(((('caption_train_e' + str(epoch)) + 's') + str(step)), 'w') else: fp = open(((('caption_test_e' + str(epoch)) + 's') + str(step)), 'w') bs = len(captions) for b in range(bs): s_pred = convert_idx2word(captions[b], vocab) s_ref = convert_idx2word(ref[b][0], vocab) fp.write((('batch: ' + str(b)) + '\n')) fp.write(((' Pred: ' + s_pred) + '\n')) fp.write(((' Ref: ' + s_ref) + '\n')) fp.close()<|docstring|>Convert predicted captions in idx to words ref: https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/03-advanced/image_captioning/sample.py Args: captions: (batch, max_seq_length), list of list, in word idx, predicted caption ref: (batch, max_seq_length), list of list, in word idx, reference caption epoch: int step: int vocab: vocab return: None<|endoftext|>
cb2accee9534e5f40d6ce4cfb78741b7236a20ce411a9cb51e83da0ae97fba11
def convert_idx2word(caption, vocab): '\n convert given sentence in idx to words.\n Args:\n caption: (length, ), numpy\n vocab: vocab\n return:\n words: string\n ' sentence = [] for word_id in caption: word = vocab.idx2word[word_id] sentence.append(word) if (word == '<end>'): break sentence_j = ' '.join(sentence) return sentence_j
convert given sentence in idx to words. Args: caption: (length, ), numpy vocab: vocab return: words: string
misc.py
convert_idx2word
minkyu-choi04/misc
0
python
def convert_idx2word(caption, vocab): '\n convert given sentence in idx to words.\n Args:\n caption: (length, ), numpy\n vocab: vocab\n return:\n words: string\n ' sentence = [] for word_id in caption: word = vocab.idx2word[word_id] sentence.append(word) if (word == '<end>'): break sentence_j = ' '.join(sentence) return sentence_j
def convert_idx2word(caption, vocab): '\n convert given sentence in idx to words.\n Args:\n caption: (length, ), numpy\n vocab: vocab\n return:\n words: string\n ' sentence = [] for word_id in caption: word = vocab.idx2word[word_id] sentence.append(word) if (word == '<end>'): break sentence_j = ' '.join(sentence) return sentence_j<|docstring|>convert given sentence in idx to words. Args: caption: (length, ), numpy vocab: vocab return: words: string<|endoftext|>
705fe6699937904041ffe7ceb271c8c7da31b290d31560ad3cdbfb03dd2c9cd2
def remove_pads_sentence(caption, vocab): 'remove pads in a given sentence\n Args: \n captions: (max_seq_length), list, including word idx\n return:\n caption_clear: (seq_length_without_pad), list\n ' caption_clear = [w for w in caption if (w not in [0, 1, 2, 3])] "for i, word_id in enumerate(caption):\n word = vocab.idx2word[word_id]\n if word == '<end>':\n break" return caption_clear
remove pads in a given sentence Args: captions: (max_seq_length), list, including word idx return: caption_clear: (seq_length_without_pad), list
misc.py
remove_pads_sentence
minkyu-choi04/misc
0
python
def remove_pads_sentence(caption, vocab): 'remove pads in a given sentence\n Args: \n captions: (max_seq_length), list, including word idx\n return:\n caption_clear: (seq_length_without_pad), list\n ' caption_clear = [w for w in caption if (w not in [0, 1, 2, 3])] "for i, word_id in enumerate(caption):\n word = vocab.idx2word[word_id]\n if word == '<end>':\n break" return caption_clear
def remove_pads_sentence(caption, vocab): 'remove pads in a given sentence\n Args: \n captions: (max_seq_length), list, including word idx\n return:\n caption_clear: (seq_length_without_pad), list\n ' caption_clear = [w for w in caption if (w not in [0, 1, 2, 3])] "for i, word_id in enumerate(caption):\n word = vocab.idx2word[word_id]\n if word == '<end>':\n break" return caption_clear<|docstring|>remove pads in a given sentence Args: captions: (max_seq_length), list, including word idx return: caption_clear: (seq_length_without_pad), list<|endoftext|>
fe35e2ae05f54bf41a03214f2ff116f4d1cbcc3ed0fb75f79bd00329816a8ca9
def remove_pads_batch(captions, vocab, isTarget=False): 'remove pads in a given sentence batch\n Args: \n captions: (batch, max_seq_length), numpy, including word idx\n return:\n captions_clear: (batch, varying seq_length_without_pad), list of list\n ' bs = len(captions) captions_clear = list() for b in range(bs): cap_clear = remove_pads_sentence(captions[b], vocab) if isTarget: captions_clear.append([cap_clear]) else: captions_clear.append(cap_clear) return captions_clear
remove pads in a given sentence batch Args: captions: (batch, max_seq_length), numpy, including word idx return: captions_clear: (batch, varying seq_length_without_pad), list of list
misc.py
remove_pads_batch
minkyu-choi04/misc
0
python
def remove_pads_batch(captions, vocab, isTarget=False): 'remove pads in a given sentence batch\n Args: \n captions: (batch, max_seq_length), numpy, including word idx\n return:\n captions_clear: (batch, varying seq_length_without_pad), list of list\n ' bs = len(captions) captions_clear = list() for b in range(bs): cap_clear = remove_pads_sentence(captions[b], vocab) if isTarget: captions_clear.append([cap_clear]) else: captions_clear.append(cap_clear) return captions_clear
def remove_pads_batch(captions, vocab, isTarget=False): 'remove pads in a given sentence batch\n Args: \n captions: (batch, max_seq_length), numpy, including word idx\n return:\n captions_clear: (batch, varying seq_length_without_pad), list of list\n ' bs = len(captions) captions_clear = list() for b in range(bs): cap_clear = remove_pads_sentence(captions[b], vocab) if isTarget: captions_clear.append([cap_clear]) else: captions_clear.append(cap_clear) return captions_clear<|docstring|>remove pads in a given sentence batch Args: captions: (batch, max_seq_length), numpy, including word idx return: captions_clear: (batch, varying seq_length_without_pad), list of list<|endoftext|>
d4bb107939b62de295d2c764c83202cfd09d9d18cd964de5c273bd0cd7ea627c
def make_sequential_mask(lengths, device='cuda'): ' make sequential mask\n see http://juditacs.github.io/2018/12/27/masked-attention.html\n Args:\n lengths: (batch, ), including length of sequence\n return \n mask: (batch, max_seq_length)\n ' maxlen = np.max(lengths) lengths = torch.tensor(lengths, device=device) mask = (torch.arange(maxlen, device=device)[(None, :)] < lengths[(:, None)]) return mask
make sequential mask see http://juditacs.github.io/2018/12/27/masked-attention.html Args: lengths: (batch, ), including length of sequence return mask: (batch, max_seq_length)
misc.py
make_sequential_mask
minkyu-choi04/misc
0
python
def make_sequential_mask(lengths, device='cuda'): ' make sequential mask\n see http://juditacs.github.io/2018/12/27/masked-attention.html\n Args:\n lengths: (batch, ), including length of sequence\n return \n mask: (batch, max_seq_length)\n ' maxlen = np.max(lengths) lengths = torch.tensor(lengths, device=device) mask = (torch.arange(maxlen, device=device)[(None, :)] < lengths[(:, None)]) return mask
def make_sequential_mask(lengths, device='cuda'): ' make sequential mask\n see http://juditacs.github.io/2018/12/27/masked-attention.html\n Args:\n lengths: (batch, ), including length of sequence\n return \n mask: (batch, max_seq_length)\n ' maxlen = np.max(lengths) lengths = torch.tensor(lengths, device=device) mask = (torch.arange(maxlen, device=device)[(None, :)] < lengths[(:, None)]) return mask<|docstring|>make sequential mask see http://juditacs.github.io/2018/12/27/masked-attention.html Args: lengths: (batch, ), including length of sequence return mask: (batch, max_seq_length)<|endoftext|>
c335680ea6c1e642aaefa0aab87f7938606266698f842a224e327c62dff04448
def add_heatmap_on_image(heatmap, image): 'Visualize heatmap on image. This function is not based on batch.\n Args:\n heatmap: (h, w), 0~1 ranged numpy array\n image: (3, h, w), 0~1 ranged numpy array\n heatmap and image must be in the same size. \n return:\n hm_img: (h, w, 3), 0~255 ranged numy array\n ' heatmap_cv = (heatmap * 255) heatmap_cv = cv2.applyColorMap(heatmap_cv.astype(np.uint8), cv2.COLORMAP_JET) image_cv = (np.swapaxes(np.swapaxes(image, 0, 1), 1, 2) * 255) hm_img = cv2.addWeighted(heatmap_cv, 0.7, image_cv.astype(np.uint8), 0.3, 0) return hm_img
Visualize heatmap on image. This function is not based on batch. Args: heatmap: (h, w), 0~1 ranged numpy array image: (3, h, w), 0~1 ranged numpy array heatmap and image must be in the same size. return: hm_img: (h, w, 3), 0~255 ranged numy array
misc.py
add_heatmap_on_image
minkyu-choi04/misc
0
python
def add_heatmap_on_image(heatmap, image): 'Visualize heatmap on image. This function is not based on batch.\n Args:\n heatmap: (h, w), 0~1 ranged numpy array\n image: (3, h, w), 0~1 ranged numpy array\n heatmap and image must be in the same size. \n return:\n hm_img: (h, w, 3), 0~255 ranged numy array\n ' heatmap_cv = (heatmap * 255) heatmap_cv = cv2.applyColorMap(heatmap_cv.astype(np.uint8), cv2.COLORMAP_JET) image_cv = (np.swapaxes(np.swapaxes(image, 0, 1), 1, 2) * 255) hm_img = cv2.addWeighted(heatmap_cv, 0.7, image_cv.astype(np.uint8), 0.3, 0) return hm_img
def add_heatmap_on_image(heatmap, image): 'Visualize heatmap on image. This function is not based on batch.\n Args:\n heatmap: (h, w), 0~1 ranged numpy array\n image: (3, h, w), 0~1 ranged numpy array\n heatmap and image must be in the same size. \n return:\n hm_img: (h, w, 3), 0~255 ranged numy array\n ' heatmap_cv = (heatmap * 255) heatmap_cv = cv2.applyColorMap(heatmap_cv.astype(np.uint8), cv2.COLORMAP_JET) image_cv = (np.swapaxes(np.swapaxes(image, 0, 1), 1, 2) * 255) hm_img = cv2.addWeighted(heatmap_cv, 0.7, image_cv.astype(np.uint8), 0.3, 0) return hm_img<|docstring|>Visualize heatmap on image. This function is not based on batch. Args: heatmap: (h, w), 0~1 ranged numpy array image: (3, h, w), 0~1 ranged numpy array heatmap and image must be in the same size. return: hm_img: (h, w, 3), 0~255 ranged numy array<|endoftext|>
c89cc5c9a654ede2908dc4a9c51930f1f0256135b65a1894817277af451ed852
def add_heatmap_on_image_tensor(heatmap, image, resize_s=(112, 112), isNormHM=True, device='cpu'): 'Visualize heatmap on image. This function works based on batched tensors\n Args:\n heatmap: (b, h, w), any ranged tensor\n image: (b, 3, h, w), any ranged tensor\n resize_s: (int, int), heatmap and image will be resized to this size\n isNormHM: True/False, if True, heatmap will be normalized to 0~1\n return:\n hm_img: (b, 3, h, w), 0~1 ranged tensor\n ' ret = [] bs = image.size(0) heatmap = torch.squeeze(heatmap) heatmap = heatmap.unsqueeze(1) heatmap = torch.nn.functional.interpolate(heatmap, resize_s, mode='bilinear') image = torch.nn.functional.interpolate(image, resize_s, mode='bilinear') if isNormHM: heatmap = noralize_min_max(heatmap) image = noralize_min_max(image) for b in range(bs): hm_i = torch.squeeze(heatmap[b]).cpu().numpy() image_i = image[b].cpu().numpy() hmimg = add_heatmap_on_image(hm_i, image_i) ret.append(hmimg) ret = np.stack(ret, axis=0) ret = np.swapaxes(np.swapaxes(ret.astype(np.float32), 2, 3), 1, 2) ret = torch.tensor((ret / 255.0), device=device) return ret
Visualize heatmap on image. This function works based on batched tensors Args: heatmap: (b, h, w), any ranged tensor image: (b, 3, h, w), any ranged tensor resize_s: (int, int), heatmap and image will be resized to this size isNormHM: True/False, if True, heatmap will be normalized to 0~1 return: hm_img: (b, 3, h, w), 0~1 ranged tensor
misc.py
add_heatmap_on_image_tensor
minkyu-choi04/misc
0
python
def add_heatmap_on_image_tensor(heatmap, image, resize_s=(112, 112), isNormHM=True, device='cpu'): 'Visualize heatmap on image. This function works based on batched tensors\n Args:\n heatmap: (b, h, w), any ranged tensor\n image: (b, 3, h, w), any ranged tensor\n resize_s: (int, int), heatmap and image will be resized to this size\n isNormHM: True/False, if True, heatmap will be normalized to 0~1\n return:\n hm_img: (b, 3, h, w), 0~1 ranged tensor\n ' ret = [] bs = image.size(0) heatmap = torch.squeeze(heatmap) heatmap = heatmap.unsqueeze(1) heatmap = torch.nn.functional.interpolate(heatmap, resize_s, mode='bilinear') image = torch.nn.functional.interpolate(image, resize_s, mode='bilinear') if isNormHM: heatmap = noralize_min_max(heatmap) image = noralize_min_max(image) for b in range(bs): hm_i = torch.squeeze(heatmap[b]).cpu().numpy() image_i = image[b].cpu().numpy() hmimg = add_heatmap_on_image(hm_i, image_i) ret.append(hmimg) ret = np.stack(ret, axis=0) ret = np.swapaxes(np.swapaxes(ret.astype(np.float32), 2, 3), 1, 2) ret = torch.tensor((ret / 255.0), device=device) return ret
def add_heatmap_on_image_tensor(heatmap, image, resize_s=(112, 112), isNormHM=True, device='cpu'): 'Visualize heatmap on image. This function works based on batched tensors\n Args:\n heatmap: (b, h, w), any ranged tensor\n image: (b, 3, h, w), any ranged tensor\n resize_s: (int, int), heatmap and image will be resized to this size\n isNormHM: True/False, if True, heatmap will be normalized to 0~1\n return:\n hm_img: (b, 3, h, w), 0~1 ranged tensor\n ' ret = [] bs = image.size(0) heatmap = torch.squeeze(heatmap) heatmap = heatmap.unsqueeze(1) heatmap = torch.nn.functional.interpolate(heatmap, resize_s, mode='bilinear') image = torch.nn.functional.interpolate(image, resize_s, mode='bilinear') if isNormHM: heatmap = noralize_min_max(heatmap) image = noralize_min_max(image) for b in range(bs): hm_i = torch.squeeze(heatmap[b]).cpu().numpy() image_i = image[b].cpu().numpy() hmimg = add_heatmap_on_image(hm_i, image_i) ret.append(hmimg) ret = np.stack(ret, axis=0) ret = np.swapaxes(np.swapaxes(ret.astype(np.float32), 2, 3), 1, 2) ret = torch.tensor((ret / 255.0), device=device) return ret<|docstring|>Visualize heatmap on image. This function works based on batched tensors Args: heatmap: (b, h, w), any ranged tensor image: (b, 3, h, w), any ranged tensor resize_s: (int, int), heatmap and image will be resized to this size isNormHM: True/False, if True, heatmap will be normalized to 0~1 return: hm_img: (b, 3, h, w), 0~1 ranged tensor<|endoftext|>
7d764a18fbb0fd5fd970ccefa1dcd19e169255adff08852033ac395cce4e497c
def check_gradients(model): "20201124\n return model's gradient stats\n https://discuss.pytorch.org/t/get-the-gradient-of-the-network-parameters/50575\n " max_max = 0 max_mean = 0 max_val = 0 for p in model.parameters(): mx = torch.max(torch.abs(p.grad)) if (max_max < mx): max_max = mx mn = torch.mean(torch.abs(p.grad)) if (max_mean < mn): max_mean = mn return (max_max, max_mean, max_val)
20201124 return model's gradient stats https://discuss.pytorch.org/t/get-the-gradient-of-the-network-parameters/50575
misc.py
check_gradients
minkyu-choi04/misc
0
python
def check_gradients(model): "20201124\n return model's gradient stats\n https://discuss.pytorch.org/t/get-the-gradient-of-the-network-parameters/50575\n " max_max = 0 max_mean = 0 max_val = 0 for p in model.parameters(): mx = torch.max(torch.abs(p.grad)) if (max_max < mx): max_max = mx mn = torch.mean(torch.abs(p.grad)) if (max_mean < mn): max_mean = mn return (max_max, max_mean, max_val)
def check_gradients(model): "20201124\n return model's gradient stats\n https://discuss.pytorch.org/t/get-the-gradient-of-the-network-parameters/50575\n " max_max = 0 max_mean = 0 max_val = 0 for p in model.parameters(): mx = torch.max(torch.abs(p.grad)) if (max_max < mx): max_max = mx mn = torch.mean(torch.abs(p.grad)) if (max_mean < mn): max_mean = mn return (max_max, max_mean, max_val)<|docstring|>20201124 return model's gradient stats https://discuss.pytorch.org/t/get-the-gradient-of-the-network-parameters/50575<|endoftext|>
e4a8b20ca7fb2835656742d21053c41d1ee030d5b56a06ce4f926dceca70ead5
@router.get('/all', response_model=List[schemas.Survey]) def read_all_surveys(db: Session=Depends(deps.get_db), skip: int=0, limit: int=100, current_user: models.User=Depends(deps.get_current_active_user)) -> Any: '\n Retrieve surveys.\n ' surveys = crud.survey.get_multi(db, skip=skip, limit=limit) return surveys
Retrieve surveys.
Sistema/sisprel/backend/app/app/api/api_v1/endpoints/survey.py
read_all_surveys
JosueHernandezR/TTR
0
python
@router.get('/all', response_model=List[schemas.Survey]) def read_all_surveys(db: Session=Depends(deps.get_db), skip: int=0, limit: int=100, current_user: models.User=Depends(deps.get_current_active_user)) -> Any: '\n \n ' surveys = crud.survey.get_multi(db, skip=skip, limit=limit) return surveys
@router.get('/all', response_model=List[schemas.Survey]) def read_all_surveys(db: Session=Depends(deps.get_db), skip: int=0, limit: int=100, current_user: models.User=Depends(deps.get_current_active_user)) -> Any: '\n \n ' surveys = crud.survey.get_multi(db, skip=skip, limit=limit) return surveys<|docstring|>Retrieve surveys.<|endoftext|>
33a7f71b4aa88fe8801a18af3ae7f3c52efa610bf5af00eb2c50a5e33ff378bb
@router.put('/{id}', response_model=schemas.Survey) def update_survey(*, db: Session=Depends(deps.get_db), id: int, survey_in: schemas.SurveyUpdate, current_user: models.User=Depends(deps.get_current_active_user)) -> Any: '\n Update an survey.\n ' weight_total = (len(crud.question.get_questions_by_survey(db=db, survey_id=id)) * 5) survey = crud.survey.get(db=db, id=id) if (not survey): raise HTTPException(status_code=404, detail='Survey not found') if (survey.owner_id != current_user.id): raise HTTPException(status_code=400, detail='Not enough permissions') survey = crud.survey.update_weight_survey(db=db, db_obj=survey, obj_in=survey_in, weight_total=weight_total) return survey
Update an survey.
Sistema/sisprel/backend/app/app/api/api_v1/endpoints/survey.py
update_survey
JosueHernandezR/TTR
0
python
@router.put('/{id}', response_model=schemas.Survey) def update_survey(*, db: Session=Depends(deps.get_db), id: int, survey_in: schemas.SurveyUpdate, current_user: models.User=Depends(deps.get_current_active_user)) -> Any: '\n \n ' weight_total = (len(crud.question.get_questions_by_survey(db=db, survey_id=id)) * 5) survey = crud.survey.get(db=db, id=id) if (not survey): raise HTTPException(status_code=404, detail='Survey not found') if (survey.owner_id != current_user.id): raise HTTPException(status_code=400, detail='Not enough permissions') survey = crud.survey.update_weight_survey(db=db, db_obj=survey, obj_in=survey_in, weight_total=weight_total) return survey
@router.put('/{id}', response_model=schemas.Survey) def update_survey(*, db: Session=Depends(deps.get_db), id: int, survey_in: schemas.SurveyUpdate, current_user: models.User=Depends(deps.get_current_active_user)) -> Any: '\n \n ' weight_total = (len(crud.question.get_questions_by_survey(db=db, survey_id=id)) * 5) survey = crud.survey.get(db=db, id=id) if (not survey): raise HTTPException(status_code=404, detail='Survey not found') if (survey.owner_id != current_user.id): raise HTTPException(status_code=400, detail='Not enough permissions') survey = crud.survey.update_weight_survey(db=db, db_obj=survey, obj_in=survey_in, weight_total=weight_total) return survey<|docstring|>Update an survey.<|endoftext|>
0a01a3b065de434e3285488e377092dafa64d255f34f783f059ae91ab2483b83
@router.get('/{id}', response_model=schemas.Survey) def read_survey(*, db: Session=Depends(deps.get_db), id: int, current_user: models.User=Depends(deps.get_current_active_user)) -> Any: '\n Get survey by ID.\n ' survey = crud.survey.get(db=db, id=id) if (not survey): raise HTTPException(status_code=404, detail='Survey not found') if (survey.owner_id != current_user.id): raise HTTPException(status_code=400, detail='Not enough permissions') return survey
Get survey by ID.
Sistema/sisprel/backend/app/app/api/api_v1/endpoints/survey.py
read_survey
JosueHernandezR/TTR
0
python
@router.get('/{id}', response_model=schemas.Survey) def read_survey(*, db: Session=Depends(deps.get_db), id: int, current_user: models.User=Depends(deps.get_current_active_user)) -> Any: '\n \n ' survey = crud.survey.get(db=db, id=id) if (not survey): raise HTTPException(status_code=404, detail='Survey not found') if (survey.owner_id != current_user.id): raise HTTPException(status_code=400, detail='Not enough permissions') return survey
@router.get('/{id}', response_model=schemas.Survey) def read_survey(*, db: Session=Depends(deps.get_db), id: int, current_user: models.User=Depends(deps.get_current_active_user)) -> Any: '\n \n ' survey = crud.survey.get(db=db, id=id) if (not survey): raise HTTPException(status_code=404, detail='Survey not found') if (survey.owner_id != current_user.id): raise HTTPException(status_code=400, detail='Not enough permissions') return survey<|docstring|>Get survey by ID.<|endoftext|>
002359a9c66d110d08f3453bfd5f6ee1c8df14c9ad956aa6ba4a712f670a0929
@router.delete('/{id}', response_model=schemas.Survey) def delete_survey(*, db: Session=Depends(deps.get_db), id: int, current_user: models.User=Depends(deps.get_current_active_user)) -> Any: '\n Delete an survey.\n ' survey = crud.survey.get(db=db, id=id) if (not survey): raise HTTPException(status_code=404, detail='Survey not found') if (survey.owner_id != current_user.id): raise HTTPException(status_code=400, detail='Not enough permissions') survey = crud.survey.remove(db=db, id=id) return survey
Delete an survey.
Sistema/sisprel/backend/app/app/api/api_v1/endpoints/survey.py
delete_survey
JosueHernandezR/TTR
0
python
@router.delete('/{id}', response_model=schemas.Survey) def delete_survey(*, db: Session=Depends(deps.get_db), id: int, current_user: models.User=Depends(deps.get_current_active_user)) -> Any: '\n \n ' survey = crud.survey.get(db=db, id=id) if (not survey): raise HTTPException(status_code=404, detail='Survey not found') if (survey.owner_id != current_user.id): raise HTTPException(status_code=400, detail='Not enough permissions') survey = crud.survey.remove(db=db, id=id) return survey
@router.delete('/{id}', response_model=schemas.Survey) def delete_survey(*, db: Session=Depends(deps.get_db), id: int, current_user: models.User=Depends(deps.get_current_active_user)) -> Any: '\n \n ' survey = crud.survey.get(db=db, id=id) if (not survey): raise HTTPException(status_code=404, detail='Survey not found') if (survey.owner_id != current_user.id): raise HTTPException(status_code=400, detail='Not enough permissions') survey = crud.survey.remove(db=db, id=id) return survey<|docstring|>Delete an survey.<|endoftext|>
8ea2952bf4788e3e7953e26a7c32e45c9e34dcf5db32ae1ed41a3efd80938ccd
@cached(cache) def _sentence_embedding(sentences: tuple) -> np.ndarray: 'use sentences as a tuple is to be consistent with tf.hub' sen_embedding = [_single_sentence(st) for st in sentences] sen_embedding = np.array(sen_embedding) return sen_embedding
use sentences as a tuple is to be consistent with tf.hub
gensim2/sentence_sim_w2v/sen_sim_spacy_punc.py
_sentence_embedding
arfu2016/DuReader
0
python
@cached(cache) def _sentence_embedding(sentences: tuple) -> np.ndarray: sen_embedding = [_single_sentence(st) for st in sentences] sen_embedding = np.array(sen_embedding) return sen_embedding
@cached(cache) def _sentence_embedding(sentences: tuple) -> np.ndarray: sen_embedding = [_single_sentence(st) for st in sentences] sen_embedding = np.array(sen_embedding) return sen_embedding<|docstring|>use sentences as a tuple is to be consistent with tf.hub<|endoftext|>
8ee21bcba4bf41d345c2fd7ca62167dd70d65ebb4db70a3af37addd6893023f0
def _vector_similarity(encode1: list, encode2: list) -> float: 'assume the length of encode1 and encode2 are n, time complexity is\n O(n), space complexity is O(n)\n ' sim_score = sum([(x * y) for (x, y) in zip(encode1, encode2)]) return sim_score
assume the length of encode1 and encode2 are n, time complexity is O(n), space complexity is O(n)
gensim2/sentence_sim_w2v/sen_sim_spacy_punc.py
_vector_similarity
arfu2016/DuReader
0
python
def _vector_similarity(encode1: list, encode2: list) -> float: 'assume the length of encode1 and encode2 are n, time complexity is\n O(n), space complexity is O(n)\n ' sim_score = sum([(x * y) for (x, y) in zip(encode1, encode2)]) return sim_score
def _vector_similarity(encode1: list, encode2: list) -> float: 'assume the length of encode1 and encode2 are n, time complexity is\n O(n), space complexity is O(n)\n ' sim_score = sum([(x * y) for (x, y) in zip(encode1, encode2)]) return sim_score<|docstring|>assume the length of encode1 and encode2 are n, time complexity is O(n), space complexity is O(n)<|endoftext|>
615194b5a7e53cb968922d2e72f8a01bf6e30905fbabef8c736407f13ce9e436
def _similarity_scores(training_vectors: np.ndarray, test_vector: list) -> list: 'Assume for training vectors, the number of vectors is m, and the\n length of each vector is n, then time complexity is O(mn) for single\n thread. But in numpy, this could be optimized. For multiprocessing, time\n is also reduced.\n ' training_vectors = training_vectors.tolist() test_vector = test_vector sim_scores = [_vector_similarity(vector, test_vector) for vector in training_vectors] return sim_scores
Assume for training vectors, the number of vectors is m, and the length of each vector is n, then time complexity is O(mn) for single thread. But in numpy, this could be optimized. For multiprocessing, time is also reduced.
gensim2/sentence_sim_w2v/sen_sim_spacy_punc.py
_similarity_scores
arfu2016/DuReader
0
python
def _similarity_scores(training_vectors: np.ndarray, test_vector: list) -> list: 'Assume for training vectors, the number of vectors is m, and the\n length of each vector is n, then time complexity is O(mn) for single\n thread. But in numpy, this could be optimized. For multiprocessing, time\n is also reduced.\n ' training_vectors = training_vectors.tolist() test_vector = test_vector sim_scores = [_vector_similarity(vector, test_vector) for vector in training_vectors] return sim_scores
def _similarity_scores(training_vectors: np.ndarray, test_vector: list) -> list: 'Assume for training vectors, the number of vectors is m, and the\n length of each vector is n, then time complexity is O(mn) for single\n thread. But in numpy, this could be optimized. For multiprocessing, time\n is also reduced.\n ' training_vectors = training_vectors.tolist() test_vector = test_vector sim_scores = [_vector_similarity(vector, test_vector) for vector in training_vectors] return sim_scores<|docstring|>Assume for training vectors, the number of vectors is m, and the length of each vector is n, then time complexity is O(mn) for single thread. But in numpy, this could be optimized. For multiprocessing, time is also reduced.<|endoftext|>
0595fbc1fb05fc928dc232ef3382debe8a6eb04e7bc0f9307fc68e7dbe789674
def union(au, bu, area_intersection): '\n\t并的区域就好求的多了:两个区域的总面积减去交面积就可以了\n\t' area_a = ((au[2] - au[0]) * (au[3] - au[1])) area_b = ((bu[2] - bu[0]) * (bu[3] - bu[1])) area_union = ((area_a + area_b) - area_intersection) return area_union
并的区域就好求的多了:两个区域的总面积减去交面积就可以了
keras_frcnn/data_generators.py
union
zouzhen/simple-faster-rcnn
0
python
def union(au, bu, area_intersection): '\n\t\n\t' area_a = ((au[2] - au[0]) * (au[3] - au[1])) area_b = ((bu[2] - bu[0]) * (bu[3] - bu[1])) area_union = ((area_a + area_b) - area_intersection) return area_union
def union(au, bu, area_intersection): '\n\t\n\t' area_a = ((au[2] - au[0]) * (au[3] - au[1])) area_b = ((bu[2] - bu[0]) * (bu[3] - bu[1])) area_union = ((area_a + area_b) - area_intersection) return area_union<|docstring|>并的区域就好求的多了:两个区域的总面积减去交面积就可以了<|endoftext|>
8e559d2d8e7e6db96ed815409efbf1c73191388f943f4c3a3d727636dfba8469
def intersection(ai, bi): '\n\t得到交区域的左上和右下角坐标。如果右下坐标大于左上坐标则求交面积\n\t' x = max(ai[0], bi[0]) y = max(ai[1], bi[1]) w = (min(ai[2], bi[2]) - x) h = (min(ai[3], bi[3]) - y) if ((w < 0) or (h < 0)): return 0 return (w * h)
得到交区域的左上和右下角坐标。如果右下坐标大于左上坐标则求交面积
keras_frcnn/data_generators.py
intersection
zouzhen/simple-faster-rcnn
0
python
def intersection(ai, bi): '\n\t\n\t' x = max(ai[0], bi[0]) y = max(ai[1], bi[1]) w = (min(ai[2], bi[2]) - x) h = (min(ai[3], bi[3]) - y) if ((w < 0) or (h < 0)): return 0 return (w * h)
def intersection(ai, bi): '\n\t\n\t' x = max(ai[0], bi[0]) y = max(ai[1], bi[1]) w = (min(ai[2], bi[2]) - x) h = (min(ai[3], bi[3]) - y) if ((w < 0) or (h < 0)): return 0 return (w * h)<|docstring|>得到交区域的左上和右下角坐标。如果右下坐标大于左上坐标则求交面积<|endoftext|>
5506c7e746d6102408847e7c2d530d258e5c83c21bca6936d73d3bb280126172
def iou(a, b): '\n\tif语句是要求右下角的点大于左上角的点,属于逻辑检查\n\tintersection:计算两个面积的交\n\tunion:计算两个面积的并\n\t最后返回交并并比【分母加上1e-6,是为了防止分母为0】\n\t' if ((a[0] >= a[2]) or (a[1] >= a[3]) or (b[0] >= b[2]) or (b[1] >= b[3])): return 0.0 area_i = intersection(a, b) area_u = union(a, b, area_i) return (float(area_i) / float((area_u + 1e-06)))
if语句是要求右下角的点大于左上角的点,属于逻辑检查 intersection:计算两个面积的交 union:计算两个面积的并 最后返回交并并比【分母加上1e-6,是为了防止分母为0】
keras_frcnn/data_generators.py
iou
zouzhen/simple-faster-rcnn
0
python
def iou(a, b): '\n\tif语句是要求右下角的点大于左上角的点,属于逻辑检查\n\tintersection:计算两个面积的交\n\tunion:计算两个面积的并\n\t最后返回交并并比【分母加上1e-6,是为了防止分母为0】\n\t' if ((a[0] >= a[2]) or (a[1] >= a[3]) or (b[0] >= b[2]) or (b[1] >= b[3])): return 0.0 area_i = intersection(a, b) area_u = union(a, b, area_i) return (float(area_i) / float((area_u + 1e-06)))
def iou(a, b): '\n\tif语句是要求右下角的点大于左上角的点,属于逻辑检查\n\tintersection:计算两个面积的交\n\tunion:计算两个面积的并\n\t最后返回交并并比【分母加上1e-6,是为了防止分母为0】\n\t' if ((a[0] >= a[2]) or (a[1] >= a[3]) or (b[0] >= b[2]) or (b[1] >= b[3])): return 0.0 area_i = intersection(a, b) area_u = union(a, b, area_i) return (float(area_i) / float((area_u + 1e-06)))<|docstring|>if语句是要求右下角的点大于左上角的点,属于逻辑检查 intersection:计算两个面积的交 union:计算两个面积的并 最后返回交并并比【分母加上1e-6,是为了防止分母为0】<|endoftext|>
1e54cfc7180dd94bd4db99a79c4c49e9c86d1374ffc8727209cbeefd9286248e
def get_new_img_size(width, height, img_min_side=600): '\n\t得到最短边,另一个边按比列缩放。\n\t' if (width <= height): f = (float(img_min_side) / width) resized_height = int((f * height)) resized_width = img_min_side else: f = (float(img_min_side) / height) resized_width = int((f * width)) resized_height = img_min_side return (resized_width, resized_height)
得到最短边,另一个边按比列缩放。
keras_frcnn/data_generators.py
get_new_img_size
zouzhen/simple-faster-rcnn
0
python
def get_new_img_size(width, height, img_min_side=600): '\n\t\n\t' if (width <= height): f = (float(img_min_side) / width) resized_height = int((f * height)) resized_width = img_min_side else: f = (float(img_min_side) / height) resized_width = int((f * width)) resized_height = img_min_side return (resized_width, resized_height)
def get_new_img_size(width, height, img_min_side=600): '\n\t\n\t' if (width <= height): f = (float(img_min_side) / width) resized_height = int((f * height)) resized_width = img_min_side else: f = (float(img_min_side) / height) resized_width = int((f * width)) resized_height = img_min_side return (resized_width, resized_height)<|docstring|>得到最短边,另一个边按比列缩放。<|endoftext|>
00b9f6e7cd1ed0ab5d3eed6f8861b93bc474a8862c1b34260018dcd7a61af8a1
def calc_rpn(C, img_data, width, height, resized_width, resized_height, img_length_calc_function): '\n\t函数输入:\n\t训练信息类(包含一张图片的路径,bbox的坐标和对应的分类)\n\t图片信息\n\t图片宽度\n\t图片高度(重新计算bboxes要用)\n\t规整化后图片宽度\n\t规整化后图片高度\n\t计算特征图大小函数\n\n\t函数输出:\n\t是否包含类\n\t相应的回归梯度\n\t' downscale = float(C.rpn_stride) anchor_sizes = C.anchor_box_scales anchor_ratios = C.anchor_box_ratios num_anchors = (len(anchor_sizes) * len(anchor_ratios)) (output_width, output_height) = img_length_calc_function(resized_width, resized_height) n_anchratios = len(anchor_ratios) y_rpn_overlap = np.zeros((output_height, output_width, num_anchors)) y_is_box_valid = np.zeros((output_height, output_width, num_anchors)) y_rpn_regr = np.zeros((output_height, output_width, (num_anchors * 4))) num_bboxes = len(img_data['bboxes']) num_anchors_for_bbox = np.zeros(num_bboxes).astype(int) best_anchor_for_bbox = ((- 1) * np.ones((num_bboxes, 4)).astype(int)) best_iou_for_bbox = np.zeros(num_bboxes).astype(np.float32) best_x_for_bbox = np.zeros((num_bboxes, 4)).astype(int) best_dx_for_bbox = np.zeros((num_bboxes, 4)).astype(np.float32) gta = np.zeros((num_bboxes, 4)) for (bbox_num, bbox) in enumerate(img_data['bboxes']): gta[(bbox_num, 0)] = (bbox['x1'] * (resized_width / float(width))) gta[(bbox_num, 1)] = (bbox['x2'] * (resized_width / float(width))) gta[(bbox_num, 2)] = (bbox['y1'] * (resized_height / float(height))) gta[(bbox_num, 3)] = (bbox['y2'] * (resized_height / float(height))) for anchor_size_idx in range(len(anchor_sizes)): for anchor_ratio_idx in range(n_anchratios): anchor_x = (anchor_sizes[anchor_size_idx] * anchor_ratios[anchor_ratio_idx][0]) anchor_y = (anchor_sizes[anchor_size_idx] * anchor_ratios[anchor_ratio_idx][1]) for ix in range(output_width): x1_anc = ((downscale * (ix + 0.5)) - (anchor_x / 2)) x2_anc = ((downscale * (ix + 0.5)) + (anchor_x / 2)) if ((x1_anc < 0) or (x2_anc > resized_width)): continue for jy in range(output_height): y1_anc = ((downscale * (jy + 0.5)) - (anchor_y / 2)) y2_anc = ((downscale * (jy + 0.5)) + (anchor_y / 2)) if ((y1_anc < 0) or (y2_anc > resized_height)): continue bbox_type = 'neg' best_iou_for_loc = 0.0 for bbox_num in range(num_bboxes): curr_iou = iou([gta[(bbox_num, 0)], gta[(bbox_num, 2)], gta[(bbox_num, 1)], gta[(bbox_num, 3)]], [x1_anc, y1_anc, x2_anc, y2_anc]) if ((curr_iou > best_iou_for_bbox[bbox_num]) or (curr_iou > C.rpn_max_overlap)): cx = ((gta[(bbox_num, 0)] + gta[(bbox_num, 1)]) / 2.0) cy = ((gta[(bbox_num, 2)] + gta[(bbox_num, 3)]) / 2.0) cxa = ((x1_anc + x2_anc) / 2.0) cya = ((y1_anc + y2_anc) / 2.0) tx = ((cx - cxa) / (x2_anc - x1_anc)) ty = ((cy - cya) / (y2_anc - y1_anc)) tw = np.log(((gta[(bbox_num, 1)] - gta[(bbox_num, 0)]) / (x2_anc - x1_anc))) th = np.log(((gta[(bbox_num, 3)] - gta[(bbox_num, 2)]) / (y2_anc - y1_anc))) if (img_data['bboxes'][bbox_num]['class'] != 'bg'): if (curr_iou > best_iou_for_bbox[bbox_num]): best_anchor_for_bbox[bbox_num] = [jy, ix, anchor_ratio_idx, anchor_size_idx] best_iou_for_bbox[bbox_num] = curr_iou best_x_for_bbox[(bbox_num, :)] = [x1_anc, x2_anc, y1_anc, y2_anc] best_dx_for_bbox[(bbox_num, :)] = [tx, ty, tw, th] if (curr_iou > C.rpn_max_overlap): bbox_type = 'pos' num_anchors_for_bbox[bbox_num] += 1 if (curr_iou > best_iou_for_loc): best_iou_for_loc = curr_iou best_regr = (tx, ty, tw, th) if (C.rpn_min_overlap < curr_iou < C.rpn_max_overlap): if (bbox_type != 'pos'): bbox_type = 'neutral' if (bbox_type == 'neg'): y_is_box_valid[(jy, ix, (anchor_ratio_idx + (n_anchratios * anchor_size_idx)))] = 1 y_rpn_overlap[(jy, ix, (anchor_ratio_idx + (n_anchratios * anchor_size_idx)))] = 0 elif (bbox_type == 'neutral'): y_is_box_valid[(jy, ix, (anchor_ratio_idx + (n_anchratios * anchor_size_idx)))] = 0 y_rpn_overlap[(jy, ix, (anchor_ratio_idx + (n_anchratios * anchor_size_idx)))] = 0 elif (bbox_type == 'pos'): y_is_box_valid[(jy, ix, (anchor_ratio_idx + (n_anchratios * anchor_size_idx)))] = 1 y_rpn_overlap[(jy, ix, (anchor_ratio_idx + (n_anchratios * anchor_size_idx)))] = 1 start = (4 * (anchor_ratio_idx + (n_anchratios * anchor_size_idx))) y_rpn_regr[(jy, ix, start:(start + 4))] = best_regr for idx in range(num_anchors_for_bbox.shape[0]): if (num_anchors_for_bbox[idx] == 0): if (best_anchor_for_bbox[(idx, 0)] == (- 1)): continue y_is_box_valid[(best_anchor_for_bbox[(idx, 0)], best_anchor_for_bbox[(idx, 1)], (best_anchor_for_bbox[(idx, 2)] + (n_anchratios * best_anchor_for_bbox[(idx, 3)])))] = 1 y_rpn_overlap[(best_anchor_for_bbox[(idx, 0)], best_anchor_for_bbox[(idx, 1)], (best_anchor_for_bbox[(idx, 2)] + (n_anchratios * best_anchor_for_bbox[(idx, 3)])))] = 1 start = (4 * (best_anchor_for_bbox[(idx, 2)] + (n_anchratios * best_anchor_for_bbox[(idx, 3)]))) y_rpn_regr[(best_anchor_for_bbox[(idx, 0)], best_anchor_for_bbox[(idx, 1)], start:(start + 4))] = best_dx_for_bbox[(idx, :)] y_rpn_overlap = np.transpose(y_rpn_overlap, (2, 0, 1)) y_rpn_overlap = np.expand_dims(y_rpn_overlap, axis=0) y_is_box_valid = np.transpose(y_is_box_valid, (2, 0, 1)) y_is_box_valid = np.expand_dims(y_is_box_valid, axis=0) y_rpn_regr = np.transpose(y_rpn_regr, (2, 0, 1)) y_rpn_regr = np.expand_dims(y_rpn_regr, axis=0) pos_locs = np.where(np.logical_and((y_rpn_overlap[(0, :, :, :)] == 1), (y_is_box_valid[(0, :, :, :)] == 1))) neg_locs = np.where(np.logical_and((y_rpn_overlap[(0, :, :, :)] == 0), (y_is_box_valid[(0, :, :, :)] == 1))) num_pos = len(pos_locs[0]) num_regions = 256 if (len(pos_locs[0]) > (num_regions / 2)): val_locs = random.sample(range(len(pos_locs[0])), (len(pos_locs[0]) - (num_regions / 2))) y_is_box_valid[(0, pos_locs[0][val_locs], pos_locs[1][val_locs], pos_locs[2][val_locs])] = 0 num_pos = (num_regions / 2) if ((len(neg_locs[0]) + num_pos) > num_regions): val_locs = random.sample(range(len(neg_locs[0])), (len(neg_locs[0]) - num_pos)) y_is_box_valid[(0, neg_locs[0][val_locs], neg_locs[1][val_locs], neg_locs[2][val_locs])] = 0 y_rpn_cls = np.concatenate([y_is_box_valid, y_rpn_overlap], axis=1) y_rpn_regr = np.concatenate([np.repeat(y_rpn_overlap, 4, axis=1), y_rpn_regr], axis=1) return (np.copy(y_rpn_cls), np.copy(y_rpn_regr))
函数输入: 训练信息类(包含一张图片的路径,bbox的坐标和对应的分类) 图片信息 图片宽度 图片高度(重新计算bboxes要用) 规整化后图片宽度 规整化后图片高度 计算特征图大小函数 函数输出: 是否包含类 相应的回归梯度
keras_frcnn/data_generators.py
calc_rpn
zouzhen/simple-faster-rcnn
0
python
def calc_rpn(C, img_data, width, height, resized_width, resized_height, img_length_calc_function): '\n\t函数输入:\n\t训练信息类(包含一张图片的路径,bbox的坐标和对应的分类)\n\t图片信息\n\t图片宽度\n\t图片高度(重新计算bboxes要用)\n\t规整化后图片宽度\n\t规整化后图片高度\n\t计算特征图大小函数\n\n\t函数输出:\n\t是否包含类\n\t相应的回归梯度\n\t' downscale = float(C.rpn_stride) anchor_sizes = C.anchor_box_scales anchor_ratios = C.anchor_box_ratios num_anchors = (len(anchor_sizes) * len(anchor_ratios)) (output_width, output_height) = img_length_calc_function(resized_width, resized_height) n_anchratios = len(anchor_ratios) y_rpn_overlap = np.zeros((output_height, output_width, num_anchors)) y_is_box_valid = np.zeros((output_height, output_width, num_anchors)) y_rpn_regr = np.zeros((output_height, output_width, (num_anchors * 4))) num_bboxes = len(img_data['bboxes']) num_anchors_for_bbox = np.zeros(num_bboxes).astype(int) best_anchor_for_bbox = ((- 1) * np.ones((num_bboxes, 4)).astype(int)) best_iou_for_bbox = np.zeros(num_bboxes).astype(np.float32) best_x_for_bbox = np.zeros((num_bboxes, 4)).astype(int) best_dx_for_bbox = np.zeros((num_bboxes, 4)).astype(np.float32) gta = np.zeros((num_bboxes, 4)) for (bbox_num, bbox) in enumerate(img_data['bboxes']): gta[(bbox_num, 0)] = (bbox['x1'] * (resized_width / float(width))) gta[(bbox_num, 1)] = (bbox['x2'] * (resized_width / float(width))) gta[(bbox_num, 2)] = (bbox['y1'] * (resized_height / float(height))) gta[(bbox_num, 3)] = (bbox['y2'] * (resized_height / float(height))) for anchor_size_idx in range(len(anchor_sizes)): for anchor_ratio_idx in range(n_anchratios): anchor_x = (anchor_sizes[anchor_size_idx] * anchor_ratios[anchor_ratio_idx][0]) anchor_y = (anchor_sizes[anchor_size_idx] * anchor_ratios[anchor_ratio_idx][1]) for ix in range(output_width): x1_anc = ((downscale * (ix + 0.5)) - (anchor_x / 2)) x2_anc = ((downscale * (ix + 0.5)) + (anchor_x / 2)) if ((x1_anc < 0) or (x2_anc > resized_width)): continue for jy in range(output_height): y1_anc = ((downscale * (jy + 0.5)) - (anchor_y / 2)) y2_anc = ((downscale * (jy + 0.5)) + (anchor_y / 2)) if ((y1_anc < 0) or (y2_anc > resized_height)): continue bbox_type = 'neg' best_iou_for_loc = 0.0 for bbox_num in range(num_bboxes): curr_iou = iou([gta[(bbox_num, 0)], gta[(bbox_num, 2)], gta[(bbox_num, 1)], gta[(bbox_num, 3)]], [x1_anc, y1_anc, x2_anc, y2_anc]) if ((curr_iou > best_iou_for_bbox[bbox_num]) or (curr_iou > C.rpn_max_overlap)): cx = ((gta[(bbox_num, 0)] + gta[(bbox_num, 1)]) / 2.0) cy = ((gta[(bbox_num, 2)] + gta[(bbox_num, 3)]) / 2.0) cxa = ((x1_anc + x2_anc) / 2.0) cya = ((y1_anc + y2_anc) / 2.0) tx = ((cx - cxa) / (x2_anc - x1_anc)) ty = ((cy - cya) / (y2_anc - y1_anc)) tw = np.log(((gta[(bbox_num, 1)] - gta[(bbox_num, 0)]) / (x2_anc - x1_anc))) th = np.log(((gta[(bbox_num, 3)] - gta[(bbox_num, 2)]) / (y2_anc - y1_anc))) if (img_data['bboxes'][bbox_num]['class'] != 'bg'): if (curr_iou > best_iou_for_bbox[bbox_num]): best_anchor_for_bbox[bbox_num] = [jy, ix, anchor_ratio_idx, anchor_size_idx] best_iou_for_bbox[bbox_num] = curr_iou best_x_for_bbox[(bbox_num, :)] = [x1_anc, x2_anc, y1_anc, y2_anc] best_dx_for_bbox[(bbox_num, :)] = [tx, ty, tw, th] if (curr_iou > C.rpn_max_overlap): bbox_type = 'pos' num_anchors_for_bbox[bbox_num] += 1 if (curr_iou > best_iou_for_loc): best_iou_for_loc = curr_iou best_regr = (tx, ty, tw, th) if (C.rpn_min_overlap < curr_iou < C.rpn_max_overlap): if (bbox_type != 'pos'): bbox_type = 'neutral' if (bbox_type == 'neg'): y_is_box_valid[(jy, ix, (anchor_ratio_idx + (n_anchratios * anchor_size_idx)))] = 1 y_rpn_overlap[(jy, ix, (anchor_ratio_idx + (n_anchratios * anchor_size_idx)))] = 0 elif (bbox_type == 'neutral'): y_is_box_valid[(jy, ix, (anchor_ratio_idx + (n_anchratios * anchor_size_idx)))] = 0 y_rpn_overlap[(jy, ix, (anchor_ratio_idx + (n_anchratios * anchor_size_idx)))] = 0 elif (bbox_type == 'pos'): y_is_box_valid[(jy, ix, (anchor_ratio_idx + (n_anchratios * anchor_size_idx)))] = 1 y_rpn_overlap[(jy, ix, (anchor_ratio_idx + (n_anchratios * anchor_size_idx)))] = 1 start = (4 * (anchor_ratio_idx + (n_anchratios * anchor_size_idx))) y_rpn_regr[(jy, ix, start:(start + 4))] = best_regr for idx in range(num_anchors_for_bbox.shape[0]): if (num_anchors_for_bbox[idx] == 0): if (best_anchor_for_bbox[(idx, 0)] == (- 1)): continue y_is_box_valid[(best_anchor_for_bbox[(idx, 0)], best_anchor_for_bbox[(idx, 1)], (best_anchor_for_bbox[(idx, 2)] + (n_anchratios * best_anchor_for_bbox[(idx, 3)])))] = 1 y_rpn_overlap[(best_anchor_for_bbox[(idx, 0)], best_anchor_for_bbox[(idx, 1)], (best_anchor_for_bbox[(idx, 2)] + (n_anchratios * best_anchor_for_bbox[(idx, 3)])))] = 1 start = (4 * (best_anchor_for_bbox[(idx, 2)] + (n_anchratios * best_anchor_for_bbox[(idx, 3)]))) y_rpn_regr[(best_anchor_for_bbox[(idx, 0)], best_anchor_for_bbox[(idx, 1)], start:(start + 4))] = best_dx_for_bbox[(idx, :)] y_rpn_overlap = np.transpose(y_rpn_overlap, (2, 0, 1)) y_rpn_overlap = np.expand_dims(y_rpn_overlap, axis=0) y_is_box_valid = np.transpose(y_is_box_valid, (2, 0, 1)) y_is_box_valid = np.expand_dims(y_is_box_valid, axis=0) y_rpn_regr = np.transpose(y_rpn_regr, (2, 0, 1)) y_rpn_regr = np.expand_dims(y_rpn_regr, axis=0) pos_locs = np.where(np.logical_and((y_rpn_overlap[(0, :, :, :)] == 1), (y_is_box_valid[(0, :, :, :)] == 1))) neg_locs = np.where(np.logical_and((y_rpn_overlap[(0, :, :, :)] == 0), (y_is_box_valid[(0, :, :, :)] == 1))) num_pos = len(pos_locs[0]) num_regions = 256 if (len(pos_locs[0]) > (num_regions / 2)): val_locs = random.sample(range(len(pos_locs[0])), (len(pos_locs[0]) - (num_regions / 2))) y_is_box_valid[(0, pos_locs[0][val_locs], pos_locs[1][val_locs], pos_locs[2][val_locs])] = 0 num_pos = (num_regions / 2) if ((len(neg_locs[0]) + num_pos) > num_regions): val_locs = random.sample(range(len(neg_locs[0])), (len(neg_locs[0]) - num_pos)) y_is_box_valid[(0, neg_locs[0][val_locs], neg_locs[1][val_locs], neg_locs[2][val_locs])] = 0 y_rpn_cls = np.concatenate([y_is_box_valid, y_rpn_overlap], axis=1) y_rpn_regr = np.concatenate([np.repeat(y_rpn_overlap, 4, axis=1), y_rpn_regr], axis=1) return (np.copy(y_rpn_cls), np.copy(y_rpn_regr))
def calc_rpn(C, img_data, width, height, resized_width, resized_height, img_length_calc_function): '\n\t函数输入:\n\t训练信息类(包含一张图片的路径,bbox的坐标和对应的分类)\n\t图片信息\n\t图片宽度\n\t图片高度(重新计算bboxes要用)\n\t规整化后图片宽度\n\t规整化后图片高度\n\t计算特征图大小函数\n\n\t函数输出:\n\t是否包含类\n\t相应的回归梯度\n\t' downscale = float(C.rpn_stride) anchor_sizes = C.anchor_box_scales anchor_ratios = C.anchor_box_ratios num_anchors = (len(anchor_sizes) * len(anchor_ratios)) (output_width, output_height) = img_length_calc_function(resized_width, resized_height) n_anchratios = len(anchor_ratios) y_rpn_overlap = np.zeros((output_height, output_width, num_anchors)) y_is_box_valid = np.zeros((output_height, output_width, num_anchors)) y_rpn_regr = np.zeros((output_height, output_width, (num_anchors * 4))) num_bboxes = len(img_data['bboxes']) num_anchors_for_bbox = np.zeros(num_bboxes).astype(int) best_anchor_for_bbox = ((- 1) * np.ones((num_bboxes, 4)).astype(int)) best_iou_for_bbox = np.zeros(num_bboxes).astype(np.float32) best_x_for_bbox = np.zeros((num_bboxes, 4)).astype(int) best_dx_for_bbox = np.zeros((num_bboxes, 4)).astype(np.float32) gta = np.zeros((num_bboxes, 4)) for (bbox_num, bbox) in enumerate(img_data['bboxes']): gta[(bbox_num, 0)] = (bbox['x1'] * (resized_width / float(width))) gta[(bbox_num, 1)] = (bbox['x2'] * (resized_width / float(width))) gta[(bbox_num, 2)] = (bbox['y1'] * (resized_height / float(height))) gta[(bbox_num, 3)] = (bbox['y2'] * (resized_height / float(height))) for anchor_size_idx in range(len(anchor_sizes)): for anchor_ratio_idx in range(n_anchratios): anchor_x = (anchor_sizes[anchor_size_idx] * anchor_ratios[anchor_ratio_idx][0]) anchor_y = (anchor_sizes[anchor_size_idx] * anchor_ratios[anchor_ratio_idx][1]) for ix in range(output_width): x1_anc = ((downscale * (ix + 0.5)) - (anchor_x / 2)) x2_anc = ((downscale * (ix + 0.5)) + (anchor_x / 2)) if ((x1_anc < 0) or (x2_anc > resized_width)): continue for jy in range(output_height): y1_anc = ((downscale * (jy + 0.5)) - (anchor_y / 2)) y2_anc = ((downscale * (jy + 0.5)) + (anchor_y / 2)) if ((y1_anc < 0) or (y2_anc > resized_height)): continue bbox_type = 'neg' best_iou_for_loc = 0.0 for bbox_num in range(num_bboxes): curr_iou = iou([gta[(bbox_num, 0)], gta[(bbox_num, 2)], gta[(bbox_num, 1)], gta[(bbox_num, 3)]], [x1_anc, y1_anc, x2_anc, y2_anc]) if ((curr_iou > best_iou_for_bbox[bbox_num]) or (curr_iou > C.rpn_max_overlap)): cx = ((gta[(bbox_num, 0)] + gta[(bbox_num, 1)]) / 2.0) cy = ((gta[(bbox_num, 2)] + gta[(bbox_num, 3)]) / 2.0) cxa = ((x1_anc + x2_anc) / 2.0) cya = ((y1_anc + y2_anc) / 2.0) tx = ((cx - cxa) / (x2_anc - x1_anc)) ty = ((cy - cya) / (y2_anc - y1_anc)) tw = np.log(((gta[(bbox_num, 1)] - gta[(bbox_num, 0)]) / (x2_anc - x1_anc))) th = np.log(((gta[(bbox_num, 3)] - gta[(bbox_num, 2)]) / (y2_anc - y1_anc))) if (img_data['bboxes'][bbox_num]['class'] != 'bg'): if (curr_iou > best_iou_for_bbox[bbox_num]): best_anchor_for_bbox[bbox_num] = [jy, ix, anchor_ratio_idx, anchor_size_idx] best_iou_for_bbox[bbox_num] = curr_iou best_x_for_bbox[(bbox_num, :)] = [x1_anc, x2_anc, y1_anc, y2_anc] best_dx_for_bbox[(bbox_num, :)] = [tx, ty, tw, th] if (curr_iou > C.rpn_max_overlap): bbox_type = 'pos' num_anchors_for_bbox[bbox_num] += 1 if (curr_iou > best_iou_for_loc): best_iou_for_loc = curr_iou best_regr = (tx, ty, tw, th) if (C.rpn_min_overlap < curr_iou < C.rpn_max_overlap): if (bbox_type != 'pos'): bbox_type = 'neutral' if (bbox_type == 'neg'): y_is_box_valid[(jy, ix, (anchor_ratio_idx + (n_anchratios * anchor_size_idx)))] = 1 y_rpn_overlap[(jy, ix, (anchor_ratio_idx + (n_anchratios * anchor_size_idx)))] = 0 elif (bbox_type == 'neutral'): y_is_box_valid[(jy, ix, (anchor_ratio_idx + (n_anchratios * anchor_size_idx)))] = 0 y_rpn_overlap[(jy, ix, (anchor_ratio_idx + (n_anchratios * anchor_size_idx)))] = 0 elif (bbox_type == 'pos'): y_is_box_valid[(jy, ix, (anchor_ratio_idx + (n_anchratios * anchor_size_idx)))] = 1 y_rpn_overlap[(jy, ix, (anchor_ratio_idx + (n_anchratios * anchor_size_idx)))] = 1 start = (4 * (anchor_ratio_idx + (n_anchratios * anchor_size_idx))) y_rpn_regr[(jy, ix, start:(start + 4))] = best_regr for idx in range(num_anchors_for_bbox.shape[0]): if (num_anchors_for_bbox[idx] == 0): if (best_anchor_for_bbox[(idx, 0)] == (- 1)): continue y_is_box_valid[(best_anchor_for_bbox[(idx, 0)], best_anchor_for_bbox[(idx, 1)], (best_anchor_for_bbox[(idx, 2)] + (n_anchratios * best_anchor_for_bbox[(idx, 3)])))] = 1 y_rpn_overlap[(best_anchor_for_bbox[(idx, 0)], best_anchor_for_bbox[(idx, 1)], (best_anchor_for_bbox[(idx, 2)] + (n_anchratios * best_anchor_for_bbox[(idx, 3)])))] = 1 start = (4 * (best_anchor_for_bbox[(idx, 2)] + (n_anchratios * best_anchor_for_bbox[(idx, 3)]))) y_rpn_regr[(best_anchor_for_bbox[(idx, 0)], best_anchor_for_bbox[(idx, 1)], start:(start + 4))] = best_dx_for_bbox[(idx, :)] y_rpn_overlap = np.transpose(y_rpn_overlap, (2, 0, 1)) y_rpn_overlap = np.expand_dims(y_rpn_overlap, axis=0) y_is_box_valid = np.transpose(y_is_box_valid, (2, 0, 1)) y_is_box_valid = np.expand_dims(y_is_box_valid, axis=0) y_rpn_regr = np.transpose(y_rpn_regr, (2, 0, 1)) y_rpn_regr = np.expand_dims(y_rpn_regr, axis=0) pos_locs = np.where(np.logical_and((y_rpn_overlap[(0, :, :, :)] == 1), (y_is_box_valid[(0, :, :, :)] == 1))) neg_locs = np.where(np.logical_and((y_rpn_overlap[(0, :, :, :)] == 0), (y_is_box_valid[(0, :, :, :)] == 1))) num_pos = len(pos_locs[0]) num_regions = 256 if (len(pos_locs[0]) > (num_regions / 2)): val_locs = random.sample(range(len(pos_locs[0])), (len(pos_locs[0]) - (num_regions / 2))) y_is_box_valid[(0, pos_locs[0][val_locs], pos_locs[1][val_locs], pos_locs[2][val_locs])] = 0 num_pos = (num_regions / 2) if ((len(neg_locs[0]) + num_pos) > num_regions): val_locs = random.sample(range(len(neg_locs[0])), (len(neg_locs[0]) - num_pos)) y_is_box_valid[(0, neg_locs[0][val_locs], neg_locs[1][val_locs], neg_locs[2][val_locs])] = 0 y_rpn_cls = np.concatenate([y_is_box_valid, y_rpn_overlap], axis=1) y_rpn_regr = np.concatenate([np.repeat(y_rpn_overlap, 4, axis=1), y_rpn_regr], axis=1) return (np.copy(y_rpn_cls), np.copy(y_rpn_regr))<|docstring|>函数输入: 训练信息类(包含一张图片的路径,bbox的坐标和对应的分类) 图片信息 图片宽度 图片高度(重新计算bboxes要用) 规整化后图片宽度 规整化后图片高度 计算特征图大小函数 函数输出: 是否包含类 相应的回归梯度<|endoftext|>
464c2d6b3ca5144f2266657521c6582119ae3c1e08bd726b0ec0248e7e674f1e
def threadsafe_generator(f): 'A decorator that takes a generator function and makes it thread-safe.\n\t' def g(*a, **kw): return threadsafe_iter(f(*a, **kw)) return g
A decorator that takes a generator function and makes it thread-safe.
keras_frcnn/data_generators.py
threadsafe_generator
zouzhen/simple-faster-rcnn
0
python
def threadsafe_generator(f): '\n\t' def g(*a, **kw): return threadsafe_iter(f(*a, **kw)) return g
def threadsafe_generator(f): '\n\t' def g(*a, **kw): return threadsafe_iter(f(*a, **kw)) return g<|docstring|>A decorator that takes a generator function and makes it thread-safe.<|endoftext|>