body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
c5083085d3a01d6614e45e3bf4db3a645976f8b16d471ba996bad562a9cc8e89
|
def get_db_versions(compartment_id: Optional[str]=None, db_system_id: Optional[str]=None, db_system_shape: Optional[str]=None, filters: Optional[Sequence[pulumi.InputType['GetDbVersionsFilterArgs']]]=None, is_database_software_image_supported: Optional[bool]=None, is_upgrade_supported: Optional[bool]=None, storage_management: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetDbVersionsResult:
'\n This data source provides the list of Db Versions in Oracle Cloud Infrastructure Database service.\n\n Gets a list of supported Oracle Database versions.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_oci as oci\n\n test_db_versions = oci.database.get_db_versions(compartment_id=var["compartment_id"],\n db_system_id=oci_database_db_system["test_db_system"]["id"],\n db_system_shape=var["db_version_db_system_shape"],\n is_database_software_image_supported=var["db_version_is_database_software_image_supported"],\n is_upgrade_supported=var["db_version_is_upgrade_supported"],\n storage_management=var["db_version_storage_management"])\n ```\n\n\n :param str compartment_id: The compartment [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).\n :param str db_system_id: The DB system [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm). If provided, filters the results to the set of database versions which are supported for the DB system.\n :param str db_system_shape: If provided, filters the results to the set of database versions which are supported for the given shape.\n :param bool is_database_software_image_supported: If true, filters the results to the set of Oracle Database versions that are supported for Oracle Cloud Infrastructure database software images.\n :param bool is_upgrade_supported: If provided, filters the results to the set of database versions which are supported for Upgrade.\n :param str storage_management: The DB system storage management option. Used to list database versions available for that storage manager. Valid values are:\n * ASM - Automatic storage management\n * LVM - Logical volume management\n '
__args__ = dict()
__args__['compartmentId'] = compartment_id
__args__['dbSystemId'] = db_system_id
__args__['dbSystemShape'] = db_system_shape
__args__['filters'] = filters
__args__['isDatabaseSoftwareImageSupported'] = is_database_software_image_supported
__args__['isUpgradeSupported'] = is_upgrade_supported
__args__['storageManagement'] = storage_management
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:database/getDbVersions:getDbVersions', __args__, opts=opts, typ=GetDbVersionsResult).value
return AwaitableGetDbVersionsResult(compartment_id=__ret__.compartment_id, db_system_id=__ret__.db_system_id, db_system_shape=__ret__.db_system_shape, db_versions=__ret__.db_versions, filters=__ret__.filters, id=__ret__.id, is_database_software_image_supported=__ret__.is_database_software_image_supported, is_upgrade_supported=__ret__.is_upgrade_supported, storage_management=__ret__.storage_management)
|
This data source provides the list of Db Versions in Oracle Cloud Infrastructure Database service.
Gets a list of supported Oracle Database versions.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_db_versions = oci.database.get_db_versions(compartment_id=var["compartment_id"],
db_system_id=oci_database_db_system["test_db_system"]["id"],
db_system_shape=var["db_version_db_system_shape"],
is_database_software_image_supported=var["db_version_is_database_software_image_supported"],
is_upgrade_supported=var["db_version_is_upgrade_supported"],
storage_management=var["db_version_storage_management"])
```
:param str compartment_id: The compartment [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
:param str db_system_id: The DB system [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm). If provided, filters the results to the set of database versions which are supported for the DB system.
:param str db_system_shape: If provided, filters the results to the set of database versions which are supported for the given shape.
:param bool is_database_software_image_supported: If true, filters the results to the set of Oracle Database versions that are supported for Oracle Cloud Infrastructure database software images.
:param bool is_upgrade_supported: If provided, filters the results to the set of database versions which are supported for Upgrade.
:param str storage_management: The DB system storage management option. Used to list database versions available for that storage manager. Valid values are:
* ASM - Automatic storage management
* LVM - Logical volume management
|
sdk/python/pulumi_oci/database/get_db_versions.py
|
get_db_versions
|
pellizzetti/pulumi-oci-dev
| 5 |
python
|
def get_db_versions(compartment_id: Optional[str]=None, db_system_id: Optional[str]=None, db_system_shape: Optional[str]=None, filters: Optional[Sequence[pulumi.InputType['GetDbVersionsFilterArgs']]]=None, is_database_software_image_supported: Optional[bool]=None, is_upgrade_supported: Optional[bool]=None, storage_management: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetDbVersionsResult:
'\n This data source provides the list of Db Versions in Oracle Cloud Infrastructure Database service.\n\n Gets a list of supported Oracle Database versions.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_oci as oci\n\n test_db_versions = oci.database.get_db_versions(compartment_id=var["compartment_id"],\n db_system_id=oci_database_db_system["test_db_system"]["id"],\n db_system_shape=var["db_version_db_system_shape"],\n is_database_software_image_supported=var["db_version_is_database_software_image_supported"],\n is_upgrade_supported=var["db_version_is_upgrade_supported"],\n storage_management=var["db_version_storage_management"])\n ```\n\n\n :param str compartment_id: The compartment [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).\n :param str db_system_id: The DB system [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm). If provided, filters the results to the set of database versions which are supported for the DB system.\n :param str db_system_shape: If provided, filters the results to the set of database versions which are supported for the given shape.\n :param bool is_database_software_image_supported: If true, filters the results to the set of Oracle Database versions that are supported for Oracle Cloud Infrastructure database software images.\n :param bool is_upgrade_supported: If provided, filters the results to the set of database versions which are supported for Upgrade.\n :param str storage_management: The DB system storage management option. Used to list database versions available for that storage manager. Valid values are:\n * ASM - Automatic storage management\n * LVM - Logical volume management\n '
__args__ = dict()
__args__['compartmentId'] = compartment_id
__args__['dbSystemId'] = db_system_id
__args__['dbSystemShape'] = db_system_shape
__args__['filters'] = filters
__args__['isDatabaseSoftwareImageSupported'] = is_database_software_image_supported
__args__['isUpgradeSupported'] = is_upgrade_supported
__args__['storageManagement'] = storage_management
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:database/getDbVersions:getDbVersions', __args__, opts=opts, typ=GetDbVersionsResult).value
return AwaitableGetDbVersionsResult(compartment_id=__ret__.compartment_id, db_system_id=__ret__.db_system_id, db_system_shape=__ret__.db_system_shape, db_versions=__ret__.db_versions, filters=__ret__.filters, id=__ret__.id, is_database_software_image_supported=__ret__.is_database_software_image_supported, is_upgrade_supported=__ret__.is_upgrade_supported, storage_management=__ret__.storage_management)
|
def get_db_versions(compartment_id: Optional[str]=None, db_system_id: Optional[str]=None, db_system_shape: Optional[str]=None, filters: Optional[Sequence[pulumi.InputType['GetDbVersionsFilterArgs']]]=None, is_database_software_image_supported: Optional[bool]=None, is_upgrade_supported: Optional[bool]=None, storage_management: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetDbVersionsResult:
'\n This data source provides the list of Db Versions in Oracle Cloud Infrastructure Database service.\n\n Gets a list of supported Oracle Database versions.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_oci as oci\n\n test_db_versions = oci.database.get_db_versions(compartment_id=var["compartment_id"],\n db_system_id=oci_database_db_system["test_db_system"]["id"],\n db_system_shape=var["db_version_db_system_shape"],\n is_database_software_image_supported=var["db_version_is_database_software_image_supported"],\n is_upgrade_supported=var["db_version_is_upgrade_supported"],\n storage_management=var["db_version_storage_management"])\n ```\n\n\n :param str compartment_id: The compartment [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).\n :param str db_system_id: The DB system [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm). If provided, filters the results to the set of database versions which are supported for the DB system.\n :param str db_system_shape: If provided, filters the results to the set of database versions which are supported for the given shape.\n :param bool is_database_software_image_supported: If true, filters the results to the set of Oracle Database versions that are supported for Oracle Cloud Infrastructure database software images.\n :param bool is_upgrade_supported: If provided, filters the results to the set of database versions which are supported for Upgrade.\n :param str storage_management: The DB system storage management option. Used to list database versions available for that storage manager. Valid values are:\n * ASM - Automatic storage management\n * LVM - Logical volume management\n '
__args__ = dict()
__args__['compartmentId'] = compartment_id
__args__['dbSystemId'] = db_system_id
__args__['dbSystemShape'] = db_system_shape
__args__['filters'] = filters
__args__['isDatabaseSoftwareImageSupported'] = is_database_software_image_supported
__args__['isUpgradeSupported'] = is_upgrade_supported
__args__['storageManagement'] = storage_management
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:database/getDbVersions:getDbVersions', __args__, opts=opts, typ=GetDbVersionsResult).value
return AwaitableGetDbVersionsResult(compartment_id=__ret__.compartment_id, db_system_id=__ret__.db_system_id, db_system_shape=__ret__.db_system_shape, db_versions=__ret__.db_versions, filters=__ret__.filters, id=__ret__.id, is_database_software_image_supported=__ret__.is_database_software_image_supported, is_upgrade_supported=__ret__.is_upgrade_supported, storage_management=__ret__.storage_management)<|docstring|>This data source provides the list of Db Versions in Oracle Cloud Infrastructure Database service.
Gets a list of supported Oracle Database versions.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_db_versions = oci.database.get_db_versions(compartment_id=var["compartment_id"],
db_system_id=oci_database_db_system["test_db_system"]["id"],
db_system_shape=var["db_version_db_system_shape"],
is_database_software_image_supported=var["db_version_is_database_software_image_supported"],
is_upgrade_supported=var["db_version_is_upgrade_supported"],
storage_management=var["db_version_storage_management"])
```
:param str compartment_id: The compartment [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
:param str db_system_id: The DB system [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm). If provided, filters the results to the set of database versions which are supported for the DB system.
:param str db_system_shape: If provided, filters the results to the set of database versions which are supported for the given shape.
:param bool is_database_software_image_supported: If true, filters the results to the set of Oracle Database versions that are supported for Oracle Cloud Infrastructure database software images.
:param bool is_upgrade_supported: If provided, filters the results to the set of database versions which are supported for Upgrade.
:param str storage_management: The DB system storage management option. Used to list database versions available for that storage manager. Valid values are:
* ASM - Automatic storage management
* LVM - Logical volume management<|endoftext|>
|
2a373f253e5685fbe0634864b1a5b1c27390955fc927d280a6ee94460eefe0e3
|
@property
@pulumi.getter(name='dbVersions')
def db_versions(self) -> Sequence['outputs.GetDbVersionsDbVersionResult']:
'\n The list of db_versions.\n '
return pulumi.get(self, 'db_versions')
|
The list of db_versions.
|
sdk/python/pulumi_oci/database/get_db_versions.py
|
db_versions
|
pellizzetti/pulumi-oci-dev
| 5 |
python
|
@property
@pulumi.getter(name='dbVersions')
def db_versions(self) -> Sequence['outputs.GetDbVersionsDbVersionResult']:
'\n \n '
return pulumi.get(self, 'db_versions')
|
@property
@pulumi.getter(name='dbVersions')
def db_versions(self) -> Sequence['outputs.GetDbVersionsDbVersionResult']:
'\n \n '
return pulumi.get(self, 'db_versions')<|docstring|>The list of db_versions.<|endoftext|>
|
bcf5b51a327014088b63f706e1dc3987198031e1f0241bd10b06cf4dd5bcb53c
|
@property
@pulumi.getter
def id(self) -> str:
'\n The provider-assigned unique ID for this managed resource.\n '
return pulumi.get(self, 'id')
|
The provider-assigned unique ID for this managed resource.
|
sdk/python/pulumi_oci/database/get_db_versions.py
|
id
|
pellizzetti/pulumi-oci-dev
| 5 |
python
|
@property
@pulumi.getter
def id(self) -> str:
'\n \n '
return pulumi.get(self, 'id')
|
@property
@pulumi.getter
def id(self) -> str:
'\n \n '
return pulumi.get(self, 'id')<|docstring|>The provider-assigned unique ID for this managed resource.<|endoftext|>
|
c00480ff2ef2e7a9e53600991794d5f514a0779124ca109362cc2713ea3ce4a7
|
@property
@pulumi.getter(name='isUpgradeSupported')
def is_upgrade_supported(self) -> Optional[bool]:
'\n True if this version of the Oracle Database software is supported for Upgrade.\n '
return pulumi.get(self, 'is_upgrade_supported')
|
True if this version of the Oracle Database software is supported for Upgrade.
|
sdk/python/pulumi_oci/database/get_db_versions.py
|
is_upgrade_supported
|
pellizzetti/pulumi-oci-dev
| 5 |
python
|
@property
@pulumi.getter(name='isUpgradeSupported')
def is_upgrade_supported(self) -> Optional[bool]:
'\n \n '
return pulumi.get(self, 'is_upgrade_supported')
|
@property
@pulumi.getter(name='isUpgradeSupported')
def is_upgrade_supported(self) -> Optional[bool]:
'\n \n '
return pulumi.get(self, 'is_upgrade_supported')<|docstring|>True if this version of the Oracle Database software is supported for Upgrade.<|endoftext|>
|
701e6e9a76e5150a384c49215ea9432916b0949a0d78858c068c2dba0c315973
|
@staticmethod
def duration(*args):
' Return the duration of the performance test. '
return datetime.timedelta(minutes=90, seconds=4)
|
Return the duration of the performance test.
|
backend/tests/unittests/metric/product/performance/performance_test_duration_tests.py
|
duration
|
ICTU/quality-report
| 25 |
python
|
@staticmethod
def duration(*args):
' '
return datetime.timedelta(minutes=90, seconds=4)
|
@staticmethod
def duration(*args):
' '
return datetime.timedelta(minutes=90, seconds=4)<|docstring|>Return the duration of the performance test.<|endoftext|>
|
edb82f2872bbebdbfd899d8de7209a0733df60cce2f23fbfd14605766c08694b
|
@staticmethod
def name():
' Return the name of the subject. '
return 'FakeSubject'
|
Return the name of the subject.
|
backend/tests/unittests/metric/product/performance/performance_test_duration_tests.py
|
name
|
ICTU/quality-report
| 25 |
python
|
@staticmethod
def name():
' '
return 'FakeSubject'
|
@staticmethod
def name():
' '
return 'FakeSubject'<|docstring|>Return the name of the subject.<|endoftext|>
|
d18905b1ed1a35a0d9d25b4fbe60ea3c1230bd23ebc6e9cb1a8bdd3cb5341eee
|
def metric_source_id(self, performance_report):
' Return the performance report id of the subject. '
return self.__performance_report_id
|
Return the performance report id of the subject.
|
backend/tests/unittests/metric/product/performance/performance_test_duration_tests.py
|
metric_source_id
|
ICTU/quality-report
| 25 |
python
|
def metric_source_id(self, performance_report):
' '
return self.__performance_report_id
|
def metric_source_id(self, performance_report):
' '
return self.__performance_report_id<|docstring|>Return the performance report id of the subject.<|endoftext|>
|
1cc5a2c867149e443360a85f737858c8735df3530158d350b346f4e367649c35
|
def test_value(self):
' Test that value of the metric equals the value reported by the performance report. '
self.assertEqual(90, self.__metric.value())
|
Test that value of the metric equals the value reported by the performance report.
|
backend/tests/unittests/metric/product/performance/performance_test_duration_tests.py
|
test_value
|
ICTU/quality-report
| 25 |
python
|
def test_value(self):
' '
self.assertEqual(90, self.__metric.value())
|
def test_value(self):
' '
self.assertEqual(90, self.__metric.value())<|docstring|>Test that value of the metric equals the value reported by the performance report.<|endoftext|>
|
ad17c098a8f9ec4029e1b003356ade19c73156a973a8a5fec99f62be03875350
|
def test_value_when_not_configured(self):
" Test that the value of the metric is -1 when the report hasn't been configured. "
performance_metric = self.metric_class(subject=FakeSubject(), project=domain.Project())
self.assertEqual((- 1), performance_metric.value())
|
Test that the value of the metric is -1 when the report hasn't been configured.
|
backend/tests/unittests/metric/product/performance/performance_test_duration_tests.py
|
test_value_when_not_configured
|
ICTU/quality-report
| 25 |
python
|
def test_value_when_not_configured(self):
" "
performance_metric = self.metric_class(subject=FakeSubject(), project=domain.Project())
self.assertEqual((- 1), performance_metric.value())
|
def test_value_when_not_configured(self):
" "
performance_metric = self.metric_class(subject=FakeSubject(), project=domain.Project())
self.assertEqual((- 1), performance_metric.value())<|docstring|>Test that the value of the metric is -1 when the report hasn't been configured.<|endoftext|>
|
edc8720f1d2fe031b7cf6ddc9bf5c806842b43955d779ed26d7d0948c8e61d39
|
def test_value_when_missing(self):
' Test that the value is negative when the test report is missing. '
class MissingPerformanceReport():
' Fake a missing performance report. '
@staticmethod
def duration(*args):
' Return a default value. '
return datetime.timedelta.max
project = domain.Project(metric_sources={self.metric_class.metric_source_class: MissingPerformanceReport()})
performance_metric = self.metric_class(subject=FakeSubject(), project=project)
self.assertEqual((- 1), performance_metric.value())
|
Test that the value is negative when the test report is missing.
|
backend/tests/unittests/metric/product/performance/performance_test_duration_tests.py
|
test_value_when_missing
|
ICTU/quality-report
| 25 |
python
|
def test_value_when_missing(self):
' '
class MissingPerformanceReport():
' Fake a missing performance report. '
@staticmethod
def duration(*args):
' Return a default value. '
return datetime.timedelta.max
project = domain.Project(metric_sources={self.metric_class.metric_source_class: MissingPerformanceReport()})
performance_metric = self.metric_class(subject=FakeSubject(), project=project)
self.assertEqual((- 1), performance_metric.value())
|
def test_value_when_missing(self):
' '
class MissingPerformanceReport():
' Fake a missing performance report. '
@staticmethod
def duration(*args):
' Return a default value. '
return datetime.timedelta.max
project = domain.Project(metric_sources={self.metric_class.metric_source_class: MissingPerformanceReport()})
performance_metric = self.metric_class(subject=FakeSubject(), project=project)
self.assertEqual((- 1), performance_metric.value())<|docstring|>Test that the value is negative when the test report is missing.<|endoftext|>
|
03c4d11a8b85fe76e227cec9f5b8b70b6aa5d5fe3c4209603b99defedd8937e4
|
def test_report(self):
' Test that the report for the metric is correct. '
self.assertEqual('De uitvoeringstijd van de {type} van FakeSubject is 90 minuten.'.format(type=self.test_type), self.__metric.report())
|
Test that the report for the metric is correct.
|
backend/tests/unittests/metric/product/performance/performance_test_duration_tests.py
|
test_report
|
ICTU/quality-report
| 25 |
python
|
def test_report(self):
' '
self.assertEqual('De uitvoeringstijd van de {type} van FakeSubject is 90 minuten.'.format(type=self.test_type), self.__metric.report())
|
def test_report(self):
' '
self.assertEqual('De uitvoeringstijd van de {type} van FakeSubject is 90 minuten.'.format(type=self.test_type), self.__metric.report())<|docstring|>Test that the report for the metric is correct.<|endoftext|>
|
10ce47536ab4b6aac715d1add56f982a9da77123105b47bf94f23424307384bf
|
def test_is_applicable(self):
' Test that the metric is applicable if the metric source can deliver the required information. '
self.assertFalse(self.__metric.is_applicable())
report = self.metric_source_class(report_url='http://report')
project = domain.Project(metric_sources={self.metric_class.metric_source_class: report})
performance_metric = self.metric_class(self.__subject, project)
self.assertTrue(performance_metric.is_applicable())
|
Test that the metric is applicable if the metric source can deliver the required information.
|
backend/tests/unittests/metric/product/performance/performance_test_duration_tests.py
|
test_is_applicable
|
ICTU/quality-report
| 25 |
python
|
def test_is_applicable(self):
' '
self.assertFalse(self.__metric.is_applicable())
report = self.metric_source_class(report_url='http://report')
project = domain.Project(metric_sources={self.metric_class.metric_source_class: report})
performance_metric = self.metric_class(self.__subject, project)
self.assertTrue(performance_metric.is_applicable())
|
def test_is_applicable(self):
' '
self.assertFalse(self.__metric.is_applicable())
report = self.metric_source_class(report_url='http://report')
project = domain.Project(metric_sources={self.metric_class.metric_source_class: report})
performance_metric = self.metric_class(self.__subject, project)
self.assertTrue(performance_metric.is_applicable())<|docstring|>Test that the metric is applicable if the metric source can deliver the required information.<|endoftext|>
|
f586d34d846ad777075b2977361d1ee9480ced6d88cf78831a8f0c054e6acf53
|
@staticmethod
def duration(*args):
' Return a default value. '
return datetime.timedelta.max
|
Return a default value.
|
backend/tests/unittests/metric/product/performance/performance_test_duration_tests.py
|
duration
|
ICTU/quality-report
| 25 |
python
|
@staticmethod
def duration(*args):
' '
return datetime.timedelta.max
|
@staticmethod
def duration(*args):
' '
return datetime.timedelta.max<|docstring|>Return a default value.<|endoftext|>
|
7d0c0720279a9d1b8287cfb75727dda3fec23b31745bdf08bdb5cf031e9319e0
|
def generate_sas_token(uri, policy, key, expiry=None):
'Create a shared access signiture token as a string literal.\n :returns: SAS token as string literal.\n :rtype: str\n '
if (not expiry):
expiry = (time.time() + 3600)
encoded_uri = quote_plus(uri)
ttl = int(expiry)
sign_key = ('%s\n%d' % (encoded_uri, ttl))
signature = b64encode(HMAC(b64decode(key), sign_key.encode('utf-8'), sha256).digest())
result = {'sr': uri, 'sig': signature, 'se': str(ttl)}
if policy:
result['skn'] = policy
return ('SharedAccessSignature ' + urlencode(result))
|
Create a shared access signiture token as a string literal.
:returns: SAS token as string literal.
:rtype: str
|
sdk/eventhub/azure-eventhub/samples/async_samples/iot_hub_connection_string_receive_async.py
|
generate_sas_token
|
cochi2/azure-sdk-for-python
| 2,728 |
python
|
def generate_sas_token(uri, policy, key, expiry=None):
'Create a shared access signiture token as a string literal.\n :returns: SAS token as string literal.\n :rtype: str\n '
if (not expiry):
expiry = (time.time() + 3600)
encoded_uri = quote_plus(uri)
ttl = int(expiry)
sign_key = ('%s\n%d' % (encoded_uri, ttl))
signature = b64encode(HMAC(b64decode(key), sign_key.encode('utf-8'), sha256).digest())
result = {'sr': uri, 'sig': signature, 'se': str(ttl)}
if policy:
result['skn'] = policy
return ('SharedAccessSignature ' + urlencode(result))
|
def generate_sas_token(uri, policy, key, expiry=None):
'Create a shared access signiture token as a string literal.\n :returns: SAS token as string literal.\n :rtype: str\n '
if (not expiry):
expiry = (time.time() + 3600)
encoded_uri = quote_plus(uri)
ttl = int(expiry)
sign_key = ('%s\n%d' % (encoded_uri, ttl))
signature = b64encode(HMAC(b64decode(key), sign_key.encode('utf-8'), sha256).digest())
result = {'sr': uri, 'sig': signature, 'se': str(ttl)}
if policy:
result['skn'] = policy
return ('SharedAccessSignature ' + urlencode(result))<|docstring|>Create a shared access signiture token as a string literal.
:returns: SAS token as string literal.
:rtype: str<|endoftext|>
|
ff1ec5be35ddb0902a92c8b746682f2c317a5c3a830b1a4168b6491f06f6d9da
|
async def receive_events_from_iothub(iothub_conn_str):
'Convert the iot hub connection string to the built-in eventhub connection string\n and receive events from the eventhub\n '
eventhub_conn_str = convert_iothub_to_eventhub_conn_str(iothub_conn_str)
consumer_client = EventHubConsumerClient.from_connection_string(eventhub_conn_str, consumer_group='$Default')
async def on_event_batch(partition_context, events):
print('received {} events from partition {}'.format(len(events), partition_context.partition_id))
async with consumer_client:
(await consumer_client.receive_batch(on_event_batch, starting_position=(- 1)))
|
Convert the iot hub connection string to the built-in eventhub connection string
and receive events from the eventhub
|
sdk/eventhub/azure-eventhub/samples/async_samples/iot_hub_connection_string_receive_async.py
|
receive_events_from_iothub
|
cochi2/azure-sdk-for-python
| 2,728 |
python
|
async def receive_events_from_iothub(iothub_conn_str):
'Convert the iot hub connection string to the built-in eventhub connection string\n and receive events from the eventhub\n '
eventhub_conn_str = convert_iothub_to_eventhub_conn_str(iothub_conn_str)
consumer_client = EventHubConsumerClient.from_connection_string(eventhub_conn_str, consumer_group='$Default')
async def on_event_batch(partition_context, events):
print('received {} events from partition {}'.format(len(events), partition_context.partition_id))
async with consumer_client:
(await consumer_client.receive_batch(on_event_batch, starting_position=(- 1)))
|
async def receive_events_from_iothub(iothub_conn_str):
'Convert the iot hub connection string to the built-in eventhub connection string\n and receive events from the eventhub\n '
eventhub_conn_str = convert_iothub_to_eventhub_conn_str(iothub_conn_str)
consumer_client = EventHubConsumerClient.from_connection_string(eventhub_conn_str, consumer_group='$Default')
async def on_event_batch(partition_context, events):
print('received {} events from partition {}'.format(len(events), partition_context.partition_id))
async with consumer_client:
(await consumer_client.receive_batch(on_event_batch, starting_position=(- 1)))<|docstring|>Convert the iot hub connection string to the built-in eventhub connection string
and receive events from the eventhub<|endoftext|>
|
f94caa89dd7fb56659ec20ef26ba2f0b4d692d4589f6f4861d047defa62c866a
|
def __init__(self, weight: int):
' 3 compartment dexmedetomidine Pk model\n\n Units:\n weight(kg)\n\n Reference:\n Hannivoort, L, et al \n Development of an Optimized Pharmacokinetic Model of Dexmedetomidine Using Target-controlled Infusion in Healthy Volunteers\n Anesthesiology 8 2015, Vol.123, 357-367. \n doi:10.1097/ALN.0000000000000740 \n '
self.v1 = (1.78 * (weight / 70))
self.v2 = (30.3 * (weight / 70))
self.v3 = (52.0 * (weight / 70))
self.Q1 = (0.686 * ((weight / 70) ** 0.75))
self.Q2 = (2.98 * ((self.v2 / 30.3) ** 0.75))
self.Q3 = (0.602 * ((self.v3 / 52.0) ** 0.75))
self.from_clearances()
self.keo = 0
self.setup()
|
3 compartment dexmedetomidine Pk model
Units:
weight(kg)
Reference:
Hannivoort, L, et al
Development of an Optimized Pharmacokinetic Model of Dexmedetomidine Using Target-controlled Infusion in Healthy Volunteers
Anesthesiology 8 2015, Vol.123, 357-367.
doi:10.1097/ALN.0000000000000740
|
PyTCI/models/dexmedetomidine.py
|
__init__
|
jcia2192/PyTCI
| 6 |
python
|
def __init__(self, weight: int):
' 3 compartment dexmedetomidine Pk model\n\n Units:\n weight(kg)\n\n Reference:\n Hannivoort, L, et al \n Development of an Optimized Pharmacokinetic Model of Dexmedetomidine Using Target-controlled Infusion in Healthy Volunteers\n Anesthesiology 8 2015, Vol.123, 357-367. \n doi:10.1097/ALN.0000000000000740 \n '
self.v1 = (1.78 * (weight / 70))
self.v2 = (30.3 * (weight / 70))
self.v3 = (52.0 * (weight / 70))
self.Q1 = (0.686 * ((weight / 70) ** 0.75))
self.Q2 = (2.98 * ((self.v2 / 30.3) ** 0.75))
self.Q3 = (0.602 * ((self.v3 / 52.0) ** 0.75))
self.from_clearances()
self.keo = 0
self.setup()
|
def __init__(self, weight: int):
' 3 compartment dexmedetomidine Pk model\n\n Units:\n weight(kg)\n\n Reference:\n Hannivoort, L, et al \n Development of an Optimized Pharmacokinetic Model of Dexmedetomidine Using Target-controlled Infusion in Healthy Volunteers\n Anesthesiology 8 2015, Vol.123, 357-367. \n doi:10.1097/ALN.0000000000000740 \n '
self.v1 = (1.78 * (weight / 70))
self.v2 = (30.3 * (weight / 70))
self.v3 = (52.0 * (weight / 70))
self.Q1 = (0.686 * ((weight / 70) ** 0.75))
self.Q2 = (2.98 * ((self.v2 / 30.3) ** 0.75))
self.Q3 = (0.602 * ((self.v3 / 52.0) ** 0.75))
self.from_clearances()
self.keo = 0
self.setup()<|docstring|>3 compartment dexmedetomidine Pk model
Units:
weight(kg)
Reference:
Hannivoort, L, et al
Development of an Optimized Pharmacokinetic Model of Dexmedetomidine Using Target-controlled Infusion in Healthy Volunteers
Anesthesiology 8 2015, Vol.123, 357-367.
doi:10.1097/ALN.0000000000000740<|endoftext|>
|
10f94d50b0fb80625ca5b5fa7d99ecdd5d741a4113516049f1a9253968f078c4
|
def __init__(self, height: int):
'\n\n Units:\n Height(cm)\n\n Reference:\n Dyck, JB, et al\n Computer-controlled infusion of intravenous dexmedetomidine hydrochloride in adult human volunteers\n Anesthesiology. 1993 May;78(5):821-8.\n PMID:8098191 DOI:10.1097/00000542-199305000-00003 \n '
self.v1 = 7.99
self.v2 = 13.8
self.v3 = 187
self.Q1 = round(((0.00791 * height) - 0.928), 4)
self.Q2 = 2.26
self.Q3 = 1.99
self.from_clearances()
self.keo = 0
self.setup()
|
Units:
Height(cm)
Reference:
Dyck, JB, et al
Computer-controlled infusion of intravenous dexmedetomidine hydrochloride in adult human volunteers
Anesthesiology. 1993 May;78(5):821-8.
PMID:8098191 DOI:10.1097/00000542-199305000-00003
|
PyTCI/models/dexmedetomidine.py
|
__init__
|
jcia2192/PyTCI
| 6 |
python
|
def __init__(self, height: int):
'\n\n Units:\n Height(cm)\n\n Reference:\n Dyck, JB, et al\n Computer-controlled infusion of intravenous dexmedetomidine hydrochloride in adult human volunteers\n Anesthesiology. 1993 May;78(5):821-8.\n PMID:8098191 DOI:10.1097/00000542-199305000-00003 \n '
self.v1 = 7.99
self.v2 = 13.8
self.v3 = 187
self.Q1 = round(((0.00791 * height) - 0.928), 4)
self.Q2 = 2.26
self.Q3 = 1.99
self.from_clearances()
self.keo = 0
self.setup()
|
def __init__(self, height: int):
'\n\n Units:\n Height(cm)\n\n Reference:\n Dyck, JB, et al\n Computer-controlled infusion of intravenous dexmedetomidine hydrochloride in adult human volunteers\n Anesthesiology. 1993 May;78(5):821-8.\n PMID:8098191 DOI:10.1097/00000542-199305000-00003 \n '
self.v1 = 7.99
self.v2 = 13.8
self.v3 = 187
self.Q1 = round(((0.00791 * height) - 0.928), 4)
self.Q2 = 2.26
self.Q3 = 1.99
self.from_clearances()
self.keo = 0
self.setup()<|docstring|>Units:
Height(cm)
Reference:
Dyck, JB, et al
Computer-controlled infusion of intravenous dexmedetomidine hydrochloride in adult human volunteers
Anesthesiology. 1993 May;78(5):821-8.
PMID:8098191 DOI:10.1097/00000542-199305000-00003<|endoftext|>
|
f15a0ec3e923a484fa1d3eb887128cb055beb5cf04a58c76d76d2bc87d819611
|
@property
def computed_token(self):
'\n | The computed token\n\n Type: str\n '
return self.__computed_token
|
| The computed token
Type: str
|
ingenico/direct/sdk/domain/external_token_linked.py
|
computed_token
|
Ingenico/direct-sdk-python2
| 0 |
python
|
@property
def computed_token(self):
'\n | The computed token\n\n Type: str\n '
return self.__computed_token
|
@property
def computed_token(self):
'\n | The computed token\n\n Type: str\n '
return self.__computed_token<|docstring|>| The computed token
Type: str<|endoftext|>
|
a7bf714a8324c4363574101c825dc7739825cbb42c4f3aa897d8d0b42cfae1d8
|
@property
def gts_computed_token(self):
'\n | Deprecated: Use the field ComputedToken instead.\n\n Type: str\n '
return self.__gts_computed_token
|
| Deprecated: Use the field ComputedToken instead.
Type: str
|
ingenico/direct/sdk/domain/external_token_linked.py
|
gts_computed_token
|
Ingenico/direct-sdk-python2
| 0 |
python
|
@property
def gts_computed_token(self):
'\n | Deprecated: Use the field ComputedToken instead.\n\n Type: str\n '
return self.__gts_computed_token
|
@property
def gts_computed_token(self):
'\n | Deprecated: Use the field ComputedToken instead.\n\n Type: str\n '
return self.__gts_computed_token<|docstring|>| Deprecated: Use the field ComputedToken instead.
Type: str<|endoftext|>
|
6529ff16876446095fbb04e51cdcd3d069604e8e45e5a6557a0a0146d1fd62ab
|
@property
def generated_token(self):
'\n | The generated token\n\n Type: str\n '
return self.__generated_token
|
| The generated token
Type: str
|
ingenico/direct/sdk/domain/external_token_linked.py
|
generated_token
|
Ingenico/direct-sdk-python2
| 0 |
python
|
@property
def generated_token(self):
'\n | The generated token\n\n Type: str\n '
return self.__generated_token
|
@property
def generated_token(self):
'\n | The generated token\n\n Type: str\n '
return self.__generated_token<|docstring|>| The generated token
Type: str<|endoftext|>
|
59203a65549ec4b8a39f63975bfc7f3d2c1f40d72366ab2973e3cfb50380981e
|
def setup_class(self):
'\n Prepare a few simulated datasets for a P212121 crystal.\n '
args = {'pdb_path': '/sdf/home/a/apeck/tomoxtal/examples/input/3j7b.pdb', 'resolution': 9.0, 'size': 500}
sf = cctbx_tools.reference_sf(args['pdb_path'], args['resolution'], expand_to_p1=True)
data = cctbx_tools.reformat_sf(sf)
keep_idx = np.unique(np.random.randint(0, high=data.shape[0], size=args['size']))
self.data = data[keep_idx]
(self.sg_symbol, sg_no, self.cell, cs) = cctbx_tools.unit_cell_info(args['pdb_path'])
|
Prepare a few simulated datasets for a P212121 crystal.
|
tests/test_locate_origin.py
|
setup_class
|
apeck12/tomoxtal
| 0 |
python
|
def setup_class(self):
'\n \n '
args = {'pdb_path': '/sdf/home/a/apeck/tomoxtal/examples/input/3j7b.pdb', 'resolution': 9.0, 'size': 500}
sf = cctbx_tools.reference_sf(args['pdb_path'], args['resolution'], expand_to_p1=True)
data = cctbx_tools.reformat_sf(sf)
keep_idx = np.unique(np.random.randint(0, high=data.shape[0], size=args['size']))
self.data = data[keep_idx]
(self.sg_symbol, sg_no, self.cell, cs) = cctbx_tools.unit_cell_info(args['pdb_path'])
|
def setup_class(self):
'\n \n '
args = {'pdb_path': '/sdf/home/a/apeck/tomoxtal/examples/input/3j7b.pdb', 'resolution': 9.0, 'size': 500}
sf = cctbx_tools.reference_sf(args['pdb_path'], args['resolution'], expand_to_p1=True)
data = cctbx_tools.reformat_sf(sf)
keep_idx = np.unique(np.random.randint(0, high=data.shape[0], size=args['size']))
self.data = data[keep_idx]
(self.sg_symbol, sg_no, self.cell, cs) = cctbx_tools.unit_cell_info(args['pdb_path'])<|docstring|>Prepare a few simulated datasets for a P212121 crystal.<|endoftext|>
|
aa8d9bf7dcee125e9886595d783a2bf4325a5337d423f73866110a5ce3caa32e
|
def test_residuals_unshifted(self):
'\n Check that known origins for space group 19 yield a phase residual of zero.\n '
eq_pos = np.array([0, 0.5, 1.0])
eq_pos = np.array(np.meshgrid(eq_pos, eq_pos, eq_pos)).T.reshape((- 1), 3)
fo = LocateXtalOrigin(self.data, self.sg_symbol, self.cell, weighted=False)
for fs in eq_pos:
assert np.isclose(fo.eval_origin(fs), 0, atol=1e-06)
|
Check that known origins for space group 19 yield a phase residual of zero.
|
tests/test_locate_origin.py
|
test_residuals_unshifted
|
apeck12/tomoxtal
| 0 |
python
|
def test_residuals_unshifted(self):
'\n \n '
eq_pos = np.array([0, 0.5, 1.0])
eq_pos = np.array(np.meshgrid(eq_pos, eq_pos, eq_pos)).T.reshape((- 1), 3)
fo = LocateXtalOrigin(self.data, self.sg_symbol, self.cell, weighted=False)
for fs in eq_pos:
assert np.isclose(fo.eval_origin(fs), 0, atol=1e-06)
|
def test_residuals_unshifted(self):
'\n \n '
eq_pos = np.array([0, 0.5, 1.0])
eq_pos = np.array(np.meshgrid(eq_pos, eq_pos, eq_pos)).T.reshape((- 1), 3)
fo = LocateXtalOrigin(self.data, self.sg_symbol, self.cell, weighted=False)
for fs in eq_pos:
assert np.isclose(fo.eval_origin(fs), 0, atol=1e-06)<|docstring|>Check that known origins for space group 19 yield a phase residual of zero.<|endoftext|>
|
cafb2116d14427dceba3f888a55fa3be09f073c0a8c98733f2e1325578dfa947
|
def test_scan_shifted(self):
'\n Check that correct origin is identified when a random phase shift is applied \n to data. Here use the scan_candidate_origins function with intensity-weighting.\n '
(self.data[(:, (- 1))], shifts) = phases_utils.add_random_phase_shift(self.data[(:, :3)], self.data[(:, (- 1))])
fshifts_list = np.random.uniform(size=(4, 3))
fshifts_list = np.vstack((fshifts_list, (1 - shifts)))
fo = LocateXtalOrigin(self.data, self.sg_symbol, self.cell, weighted=True)
(pred_shifts, scores) = fo.scan_candidate_origins(fshifts_list=fshifts_list, n_processes=1)
assert np.allclose((pred_shifts[0] + shifts), 1, atol=1e-06)
assert np.isclose(scores[0], 0, atol=1e-06)
assert (not any([np.isclose(s, 0, atol=1e-06) for s in scores[1:]]))
|
Check that correct origin is identified when a random phase shift is applied
to data. Here use the scan_candidate_origins function with intensity-weighting.
|
tests/test_locate_origin.py
|
test_scan_shifted
|
apeck12/tomoxtal
| 0 |
python
|
def test_scan_shifted(self):
'\n Check that correct origin is identified when a random phase shift is applied \n to data. Here use the scan_candidate_origins function with intensity-weighting.\n '
(self.data[(:, (- 1))], shifts) = phases_utils.add_random_phase_shift(self.data[(:, :3)], self.data[(:, (- 1))])
fshifts_list = np.random.uniform(size=(4, 3))
fshifts_list = np.vstack((fshifts_list, (1 - shifts)))
fo = LocateXtalOrigin(self.data, self.sg_symbol, self.cell, weighted=True)
(pred_shifts, scores) = fo.scan_candidate_origins(fshifts_list=fshifts_list, n_processes=1)
assert np.allclose((pred_shifts[0] + shifts), 1, atol=1e-06)
assert np.isclose(scores[0], 0, atol=1e-06)
assert (not any([np.isclose(s, 0, atol=1e-06) for s in scores[1:]]))
|
def test_scan_shifted(self):
'\n Check that correct origin is identified when a random phase shift is applied \n to data. Here use the scan_candidate_origins function with intensity-weighting.\n '
(self.data[(:, (- 1))], shifts) = phases_utils.add_random_phase_shift(self.data[(:, :3)], self.data[(:, (- 1))])
fshifts_list = np.random.uniform(size=(4, 3))
fshifts_list = np.vstack((fshifts_list, (1 - shifts)))
fo = LocateXtalOrigin(self.data, self.sg_symbol, self.cell, weighted=True)
(pred_shifts, scores) = fo.scan_candidate_origins(fshifts_list=fshifts_list, n_processes=1)
assert np.allclose((pred_shifts[0] + shifts), 1, atol=1e-06)
assert np.isclose(scores[0], 0, atol=1e-06)
assert (not any([np.isclose(s, 0, atol=1e-06) for s in scores[1:]]))<|docstring|>Check that correct origin is identified when a random phase shift is applied
to data. Here use the scan_candidate_origins function with intensity-weighting.<|endoftext|>
|
3ed0617cba3751b2ca1bb4108a5c12824b6ffa04da407fb4a1b7cbdbd734997a
|
def __init__(self, *args, **kwargs):
'\n Creates your memory instance\n Give it your caches configuration as a dictionary\n '
self.adapters = dict()
self.caches = dict()
self._cache_instances = dict()
self.config = dict(adapters=dict(), caches=dict())
if (args or kwargs):
self.init(*args, **kwargs)
|
Creates your memory instance
Give it your caches configuration as a dictionary
|
shiftmemory/memory.py
|
__init__
|
projectshift/shift-memory
| 0 |
python
|
def __init__(self, *args, **kwargs):
'\n Creates your memory instance\n Give it your caches configuration as a dictionary\n '
self.adapters = dict()
self.caches = dict()
self._cache_instances = dict()
self.config = dict(adapters=dict(), caches=dict())
if (args or kwargs):
self.init(*args, **kwargs)
|
def __init__(self, *args, **kwargs):
'\n Creates your memory instance\n Give it your caches configuration as a dictionary\n '
self.adapters = dict()
self.caches = dict()
self._cache_instances = dict()
self.config = dict(adapters=dict(), caches=dict())
if (args or kwargs):
self.init(*args, **kwargs)<|docstring|>Creates your memory instance
Give it your caches configuration as a dictionary<|endoftext|>
|
f64721441404072cb1d175a49b28c89f19cbf578a3f44fbf8ecdefe6fe70ab3c
|
def init(self, adapters=None, caches=None):
'\n Delayed initializer\n This can be called by __init__ or later.\n\n :param adapters: dict, adapters configuration\n :param caches: dict, caches configuration\n :return:\n '
if adapters:
self.adapters = adapters
if caches:
self.caches = caches
|
Delayed initializer
This can be called by __init__ or later.
:param adapters: dict, adapters configuration
:param caches: dict, caches configuration
:return:
|
shiftmemory/memory.py
|
init
|
projectshift/shift-memory
| 0 |
python
|
def init(self, adapters=None, caches=None):
'\n Delayed initializer\n This can be called by __init__ or later.\n\n :param adapters: dict, adapters configuration\n :param caches: dict, caches configuration\n :return:\n '
if adapters:
self.adapters = adapters
if caches:
self.caches = caches
|
def init(self, adapters=None, caches=None):
'\n Delayed initializer\n This can be called by __init__ or later.\n\n :param adapters: dict, adapters configuration\n :param caches: dict, caches configuration\n :return:\n '
if adapters:
self.adapters = adapters
if caches:
self.caches = caches<|docstring|>Delayed initializer
This can be called by __init__ or later.
:param adapters: dict, adapters configuration
:param caches: dict, caches configuration
:return:<|endoftext|>
|
04c03e7f6b710cd955db3f1bcf4a080848dfa434c09b1c7f48037e0a98b91fa7
|
def get_cache(self, cache_name):
'\n Get cache\n Checks if a cache was already created and returns that. Otherwise\n attempts to create a cache from configuration and preserve\n for future use\n '
if (cache_name in self._cache_instances):
return self._cache_instances[cache_name]
if (cache_name not in self.caches):
error = 'Cache [{}] is not configured'.format(cache_name)
raise exceptions.ConfigurationException(error)
cache_config = self.caches[cache_name]
adapter_name = cache_config['adapter']
if (adapter_name not in self.adapters):
error = 'Adapter [{}] is not configured'.format(adapter_name)
raise exceptions.ConfigurationException(error)
adapter_config = self.adapters[adapter_name]
adapter_type = adapter_config['type']
adapter_class = (adapter_type[0].upper() + adapter_type[1:])
if (not hasattr(adapter, adapter_class)):
error = 'Adapter class [{}] is missing'.format(adapter_class)
raise exceptions.AdapterMissingException(error)
cls = getattr(adapter, adapter_class)
adapter_params = dict(namespace=cache_name, ttl=cache_config['ttl'])
if ('config' in adapter_config):
adapter_params['config'] = adapter_config['config']
cache = cls(**adapter_params)
self._cache_instances[cache_name] = cache
return self._cache_instances[cache_name]
|
Get cache
Checks if a cache was already created and returns that. Otherwise
attempts to create a cache from configuration and preserve
for future use
|
shiftmemory/memory.py
|
get_cache
|
projectshift/shift-memory
| 0 |
python
|
def get_cache(self, cache_name):
'\n Get cache\n Checks if a cache was already created and returns that. Otherwise\n attempts to create a cache from configuration and preserve\n for future use\n '
if (cache_name in self._cache_instances):
return self._cache_instances[cache_name]
if (cache_name not in self.caches):
error = 'Cache [{}] is not configured'.format(cache_name)
raise exceptions.ConfigurationException(error)
cache_config = self.caches[cache_name]
adapter_name = cache_config['adapter']
if (adapter_name not in self.adapters):
error = 'Adapter [{}] is not configured'.format(adapter_name)
raise exceptions.ConfigurationException(error)
adapter_config = self.adapters[adapter_name]
adapter_type = adapter_config['type']
adapter_class = (adapter_type[0].upper() + adapter_type[1:])
if (not hasattr(adapter, adapter_class)):
error = 'Adapter class [{}] is missing'.format(adapter_class)
raise exceptions.AdapterMissingException(error)
cls = getattr(adapter, adapter_class)
adapter_params = dict(namespace=cache_name, ttl=cache_config['ttl'])
if ('config' in adapter_config):
adapter_params['config'] = adapter_config['config']
cache = cls(**adapter_params)
self._cache_instances[cache_name] = cache
return self._cache_instances[cache_name]
|
def get_cache(self, cache_name):
'\n Get cache\n Checks if a cache was already created and returns that. Otherwise\n attempts to create a cache from configuration and preserve\n for future use\n '
if (cache_name in self._cache_instances):
return self._cache_instances[cache_name]
if (cache_name not in self.caches):
error = 'Cache [{}] is not configured'.format(cache_name)
raise exceptions.ConfigurationException(error)
cache_config = self.caches[cache_name]
adapter_name = cache_config['adapter']
if (adapter_name not in self.adapters):
error = 'Adapter [{}] is not configured'.format(adapter_name)
raise exceptions.ConfigurationException(error)
adapter_config = self.adapters[adapter_name]
adapter_type = adapter_config['type']
adapter_class = (adapter_type[0].upper() + adapter_type[1:])
if (not hasattr(adapter, adapter_class)):
error = 'Adapter class [{}] is missing'.format(adapter_class)
raise exceptions.AdapterMissingException(error)
cls = getattr(adapter, adapter_class)
adapter_params = dict(namespace=cache_name, ttl=cache_config['ttl'])
if ('config' in adapter_config):
adapter_params['config'] = adapter_config['config']
cache = cls(**adapter_params)
self._cache_instances[cache_name] = cache
return self._cache_instances[cache_name]<|docstring|>Get cache
Checks if a cache was already created and returns that. Otherwise
attempts to create a cache from configuration and preserve
for future use<|endoftext|>
|
39dc8215da51cae420a62a047ed98e1c0d30364cf7cdaf3be2c5038bac20aa87
|
def drop_cache(self, name):
'\n Drop cache\n Deletes all items in cache by name\n '
cache = self.get_cache(name)
if (not hasattr(cache, 'delete_all')):
cls = type(cache)
error = 'Adapter [{}] can not drop cache by namespace'.format(cls)
raise exceptions.AdapterFeatureMissingException(error)
return cache.delete_all()
|
Drop cache
Deletes all items in cache by name
|
shiftmemory/memory.py
|
drop_cache
|
projectshift/shift-memory
| 0 |
python
|
def drop_cache(self, name):
'\n Drop cache\n Deletes all items in cache by name\n '
cache = self.get_cache(name)
if (not hasattr(cache, 'delete_all')):
cls = type(cache)
error = 'Adapter [{}] can not drop cache by namespace'.format(cls)
raise exceptions.AdapterFeatureMissingException(error)
return cache.delete_all()
|
def drop_cache(self, name):
'\n Drop cache\n Deletes all items in cache by name\n '
cache = self.get_cache(name)
if (not hasattr(cache, 'delete_all')):
cls = type(cache)
error = 'Adapter [{}] can not drop cache by namespace'.format(cls)
raise exceptions.AdapterFeatureMissingException(error)
return cache.delete_all()<|docstring|>Drop cache
Deletes all items in cache by name<|endoftext|>
|
0223192a771b09d8faefe1db0ef92fe317a8c961c17a0730d893951f4ac7df97
|
def drop_all_caches(self):
'\n Drop all caches\n Goes through every configured cache and drops all items. Will\n skip certain caches if they do not support drop all feature\n '
for name in self.caches.keys():
cache = self.get_cache(name)
if hasattr(cache, 'delete_all'):
cache.delete_all(name)
return True
|
Drop all caches
Goes through every configured cache and drops all items. Will
skip certain caches if they do not support drop all feature
|
shiftmemory/memory.py
|
drop_all_caches
|
projectshift/shift-memory
| 0 |
python
|
def drop_all_caches(self):
'\n Drop all caches\n Goes through every configured cache and drops all items. Will\n skip certain caches if they do not support drop all feature\n '
for name in self.caches.keys():
cache = self.get_cache(name)
if hasattr(cache, 'delete_all'):
cache.delete_all(name)
return True
|
def drop_all_caches(self):
'\n Drop all caches\n Goes through every configured cache and drops all items. Will\n skip certain caches if they do not support drop all feature\n '
for name in self.caches.keys():
cache = self.get_cache(name)
if hasattr(cache, 'delete_all'):
cache.delete_all(name)
return True<|docstring|>Drop all caches
Goes through every configured cache and drops all items. Will
skip certain caches if they do not support drop all feature<|endoftext|>
|
cb654066fd288749e53aa78bd7cdf370aa7e1fb1b419c9842775f6029286338e
|
def optimize_cache(self, name):
'\n Optimize cache\n Gets cache by name and performs optimization if supported\n '
cache = self.get_cache(name)
if (not hasattr(cache, 'optimize')):
cls = type(cache)
error = 'Adapter [{}] can not optimize itself'.format(cls)
raise exceptions.AdapterFeatureMissingException(error)
return cache.optimize()
|
Optimize cache
Gets cache by name and performs optimization if supported
|
shiftmemory/memory.py
|
optimize_cache
|
projectshift/shift-memory
| 0 |
python
|
def optimize_cache(self, name):
'\n Optimize cache\n Gets cache by name and performs optimization if supported\n '
cache = self.get_cache(name)
if (not hasattr(cache, 'optimize')):
cls = type(cache)
error = 'Adapter [{}] can not optimize itself'.format(cls)
raise exceptions.AdapterFeatureMissingException(error)
return cache.optimize()
|
def optimize_cache(self, name):
'\n Optimize cache\n Gets cache by name and performs optimization if supported\n '
cache = self.get_cache(name)
if (not hasattr(cache, 'optimize')):
cls = type(cache)
error = 'Adapter [{}] can not optimize itself'.format(cls)
raise exceptions.AdapterFeatureMissingException(error)
return cache.optimize()<|docstring|>Optimize cache
Gets cache by name and performs optimization if supported<|endoftext|>
|
08fc786a06ac2438ca0f9c9407a5af9af82df7247a05756be9b5893d550b8078
|
def optimize_all_caches(self):
'\n Optimize all caches\n Goes through every configured cache and optimizes. Will\n skip certain caches if they do not support optimization feature\n '
for name in self.caches.keys():
cache = self.get_cache(name)
if hasattr(cache, 'optimize'):
cache.optimize(name)
return True
|
Optimize all caches
Goes through every configured cache and optimizes. Will
skip certain caches if they do not support optimization feature
|
shiftmemory/memory.py
|
optimize_all_caches
|
projectshift/shift-memory
| 0 |
python
|
def optimize_all_caches(self):
'\n Optimize all caches\n Goes through every configured cache and optimizes. Will\n skip certain caches if they do not support optimization feature\n '
for name in self.caches.keys():
cache = self.get_cache(name)
if hasattr(cache, 'optimize'):
cache.optimize(name)
return True
|
def optimize_all_caches(self):
'\n Optimize all caches\n Goes through every configured cache and optimizes. Will\n skip certain caches if they do not support optimization feature\n '
for name in self.caches.keys():
cache = self.get_cache(name)
if hasattr(cache, 'optimize'):
cache.optimize(name)
return True<|docstring|>Optimize all caches
Goes through every configured cache and optimizes. Will
skip certain caches if they do not support optimization feature<|endoftext|>
|
57b4bd2795167fdc2109826c0e52ec0f254ab3e60d4526f46439aa944492341f
|
@staticmethod
def warp_boxes(boxes, mat):
' Warp point by proj matrix.\n Args:\n boxes (np.ndarray): in shape of [N, 4]\n mat (np.ndarray): in shape of [2, 3], projection matrix\n '
pts = boxes.reshape((- 1), 2)
pts_ext = np.concatenate((pts, np.ones((pts.shape[0], 1))), axis=1)
pts_ext = mat.dot(pts_ext.T).T
pts_ext = pts_ext.reshape((- 1), 4)
boxes = np.concatenate([np.min(pts_ext[(:, 0::2)], axis=1, keepdims=True), np.min(pts_ext[(:, 1::2)], axis=1, keepdims=True), np.max(pts_ext[(:, 0::2)], axis=1, keepdims=True), np.max(pts_ext[(:, 1::2)], axis=1, keepdims=True)], axis=1)
return boxes
|
Warp point by proj matrix.
Args:
boxes (np.ndarray): in shape of [N, 4]
mat (np.ndarray): in shape of [2, 3], projection matrix
|
pyvrl/models/pretraining/rot3d/rot3d_transforms.py
|
warp_boxes
|
LiZhenLiangLee/CtP-repro
| 37 |
python
|
@staticmethod
def warp_boxes(boxes, mat):
' Warp point by proj matrix.\n Args:\n boxes (np.ndarray): in shape of [N, 4]\n mat (np.ndarray): in shape of [2, 3], projection matrix\n '
pts = boxes.reshape((- 1), 2)
pts_ext = np.concatenate((pts, np.ones((pts.shape[0], 1))), axis=1)
pts_ext = mat.dot(pts_ext.T).T
pts_ext = pts_ext.reshape((- 1), 4)
boxes = np.concatenate([np.min(pts_ext[(:, 0::2)], axis=1, keepdims=True), np.min(pts_ext[(:, 1::2)], axis=1, keepdims=True), np.max(pts_ext[(:, 0::2)], axis=1, keepdims=True), np.max(pts_ext[(:, 1::2)], axis=1, keepdims=True)], axis=1)
return boxes
|
@staticmethod
def warp_boxes(boxes, mat):
' Warp point by proj matrix.\n Args:\n boxes (np.ndarray): in shape of [N, 4]\n mat (np.ndarray): in shape of [2, 3], projection matrix\n '
pts = boxes.reshape((- 1), 2)
pts_ext = np.concatenate((pts, np.ones((pts.shape[0], 1))), axis=1)
pts_ext = mat.dot(pts_ext.T).T
pts_ext = pts_ext.reshape((- 1), 4)
boxes = np.concatenate([np.min(pts_ext[(:, 0::2)], axis=1, keepdims=True), np.min(pts_ext[(:, 1::2)], axis=1, keepdims=True), np.max(pts_ext[(:, 0::2)], axis=1, keepdims=True), np.max(pts_ext[(:, 1::2)], axis=1, keepdims=True)], axis=1)
return boxes<|docstring|>Warp point by proj matrix.
Args:
boxes (np.ndarray): in shape of [N, 4]
mat (np.ndarray): in shape of [2, 3], projection matrix<|endoftext|>
|
1517865131b7a22b9416f7dbaa3ae5a29a906913be3fcd1eb6d840ab4eaabfb4
|
@staticmethod
def rotate(image, rot_flag):
"\n Rotates an OpenCV 2 / NumPy image about it's centre by the given angle\n "
if (rot_flag == 0):
return image
if isinstance(image, np.ndarray):
(h, w) = image.shape[0:2]
elif isinstance(image, list):
(h, w) = image[0].shape[0:2]
else:
raise NotImplementedError
center = ((w / 2), (h / 2))
if (rot_flag == 1):
M = cv2.getRotationMatrix2D(center, 90, 1.0)
elif (rot_flag == 2):
M = cv2.getRotationMatrix2D(center, 180, 1.0)
elif (rot_flag == 3):
M = cv2.getRotationMatrix2D(center, 270, 1.0)
else:
raise NotImplementedError
if isinstance(image, list):
image = [cv2.warpAffine(i, M, (h, w), borderMode=cv2.BORDER_REPLICATE) for i in image]
else:
image = cv2.warpAffine(image, M, (h, w), borderMode=cv2.BORDER_REPLICATE)
return image
|
Rotates an OpenCV 2 / NumPy image about it's centre by the given angle
|
pyvrl/models/pretraining/rot3d/rot3d_transforms.py
|
rotate
|
LiZhenLiangLee/CtP-repro
| 37 |
python
|
@staticmethod
def rotate(image, rot_flag):
"\n \n "
if (rot_flag == 0):
return image
if isinstance(image, np.ndarray):
(h, w) = image.shape[0:2]
elif isinstance(image, list):
(h, w) = image[0].shape[0:2]
else:
raise NotImplementedError
center = ((w / 2), (h / 2))
if (rot_flag == 1):
M = cv2.getRotationMatrix2D(center, 90, 1.0)
elif (rot_flag == 2):
M = cv2.getRotationMatrix2D(center, 180, 1.0)
elif (rot_flag == 3):
M = cv2.getRotationMatrix2D(center, 270, 1.0)
else:
raise NotImplementedError
if isinstance(image, list):
image = [cv2.warpAffine(i, M, (h, w), borderMode=cv2.BORDER_REPLICATE) for i in image]
else:
image = cv2.warpAffine(image, M, (h, w), borderMode=cv2.BORDER_REPLICATE)
return image
|
@staticmethod
def rotate(image, rot_flag):
"\n \n "
if (rot_flag == 0):
return image
if isinstance(image, np.ndarray):
(h, w) = image.shape[0:2]
elif isinstance(image, list):
(h, w) = image[0].shape[0:2]
else:
raise NotImplementedError
center = ((w / 2), (h / 2))
if (rot_flag == 1):
M = cv2.getRotationMatrix2D(center, 90, 1.0)
elif (rot_flag == 2):
M = cv2.getRotationMatrix2D(center, 180, 1.0)
elif (rot_flag == 3):
M = cv2.getRotationMatrix2D(center, 270, 1.0)
else:
raise NotImplementedError
if isinstance(image, list):
image = [cv2.warpAffine(i, M, (h, w), borderMode=cv2.BORDER_REPLICATE) for i in image]
else:
image = cv2.warpAffine(image, M, (h, w), borderMode=cv2.BORDER_REPLICATE)
return image<|docstring|>Rotates an OpenCV 2 / NumPy image about it's centre by the given angle<|endoftext|>
|
34ca3b520fa4c509ea5c0869425fe32ee1cf500bab874b721cb223c9ba170b01
|
def get_input(self, idx):
'\n Returns x for a given idx.\n '
img_filename = os.path.join(self.data_dir, self._input_array[idx])
x = Image.open(img_filename).convert('RGB')
return x
|
Returns x for a given idx.
|
wilds/datasets/waterbirds_dataset.py
|
get_input
|
sequoia-n9/wilds
| 355 |
python
|
def get_input(self, idx):
'\n \n '
img_filename = os.path.join(self.data_dir, self._input_array[idx])
x = Image.open(img_filename).convert('RGB')
return x
|
def get_input(self, idx):
'\n \n '
img_filename = os.path.join(self.data_dir, self._input_array[idx])
x = Image.open(img_filename).convert('RGB')
return x<|docstring|>Returns x for a given idx.<|endoftext|>
|
c054ebf531e1f34466435df355829bf6d85506642778e3daf5fa24f75a656bf1
|
def eval(self, y_pred, y_true, metadata, prediction_fn=None):
'\n Computes all evaluation metrics.\n Args:\n - y_pred (Tensor): Predictions from a model. By default, they are predicted labels (LongTensor).\n But they can also be other model outputs such that prediction_fn(y_pred)\n are predicted labels.\n - y_true (LongTensor): Ground-truth labels\n - metadata (Tensor): Metadata\n - prediction_fn (function): A function that turns y_pred into predicted labels\n Output:\n - results (dictionary): Dictionary of evaluation metrics\n - results_str (str): String summarizing the evaluation metrics\n '
metric = Accuracy(prediction_fn=prediction_fn)
(results, results_str) = self.standard_group_eval(metric, self._eval_grouper, y_pred, y_true, metadata)
results['adj_acc_avg'] = (((((results['acc_y:landbird_background:land'] * 3498) + (results['acc_y:landbird_background:water'] * 184)) + (results['acc_y:waterbird_background:land'] * 56)) + (results['acc_y:waterbird_background:water'] * 1057)) / (((3498 + 184) + 56) + 1057))
del results['acc_avg']
results_str = (f'''Adjusted average acc: {results['adj_acc_avg']:.3f}
''' + '\n'.join(results_str.split('\n')[1:]))
return (results, results_str)
|
Computes all evaluation metrics.
Args:
- y_pred (Tensor): Predictions from a model. By default, they are predicted labels (LongTensor).
But they can also be other model outputs such that prediction_fn(y_pred)
are predicted labels.
- y_true (LongTensor): Ground-truth labels
- metadata (Tensor): Metadata
- prediction_fn (function): A function that turns y_pred into predicted labels
Output:
- results (dictionary): Dictionary of evaluation metrics
- results_str (str): String summarizing the evaluation metrics
|
wilds/datasets/waterbirds_dataset.py
|
eval
|
sequoia-n9/wilds
| 355 |
python
|
def eval(self, y_pred, y_true, metadata, prediction_fn=None):
'\n Computes all evaluation metrics.\n Args:\n - y_pred (Tensor): Predictions from a model. By default, they are predicted labels (LongTensor).\n But they can also be other model outputs such that prediction_fn(y_pred)\n are predicted labels.\n - y_true (LongTensor): Ground-truth labels\n - metadata (Tensor): Metadata\n - prediction_fn (function): A function that turns y_pred into predicted labels\n Output:\n - results (dictionary): Dictionary of evaluation metrics\n - results_str (str): String summarizing the evaluation metrics\n '
metric = Accuracy(prediction_fn=prediction_fn)
(results, results_str) = self.standard_group_eval(metric, self._eval_grouper, y_pred, y_true, metadata)
results['adj_acc_avg'] = (((((results['acc_y:landbird_background:land'] * 3498) + (results['acc_y:landbird_background:water'] * 184)) + (results['acc_y:waterbird_background:land'] * 56)) + (results['acc_y:waterbird_background:water'] * 1057)) / (((3498 + 184) + 56) + 1057))
del results['acc_avg']
results_str = (f'Adjusted average acc: {results['adj_acc_avg']:.3f}
' + '\n'.join(results_str.split('\n')[1:]))
return (results, results_str)
|
def eval(self, y_pred, y_true, metadata, prediction_fn=None):
'\n Computes all evaluation metrics.\n Args:\n - y_pred (Tensor): Predictions from a model. By default, they are predicted labels (LongTensor).\n But they can also be other model outputs such that prediction_fn(y_pred)\n are predicted labels.\n - y_true (LongTensor): Ground-truth labels\n - metadata (Tensor): Metadata\n - prediction_fn (function): A function that turns y_pred into predicted labels\n Output:\n - results (dictionary): Dictionary of evaluation metrics\n - results_str (str): String summarizing the evaluation metrics\n '
metric = Accuracy(prediction_fn=prediction_fn)
(results, results_str) = self.standard_group_eval(metric, self._eval_grouper, y_pred, y_true, metadata)
results['adj_acc_avg'] = (((((results['acc_y:landbird_background:land'] * 3498) + (results['acc_y:landbird_background:water'] * 184)) + (results['acc_y:waterbird_background:land'] * 56)) + (results['acc_y:waterbird_background:water'] * 1057)) / (((3498 + 184) + 56) + 1057))
del results['acc_avg']
results_str = (f'Adjusted average acc: {results['adj_acc_avg']:.3f}
' + '\n'.join(results_str.split('\n')[1:]))
return (results, results_str)<|docstring|>Computes all evaluation metrics.
Args:
- y_pred (Tensor): Predictions from a model. By default, they are predicted labels (LongTensor).
But they can also be other model outputs such that prediction_fn(y_pred)
are predicted labels.
- y_true (LongTensor): Ground-truth labels
- metadata (Tensor): Metadata
- prediction_fn (function): A function that turns y_pred into predicted labels
Output:
- results (dictionary): Dictionary of evaluation metrics
- results_str (str): String summarizing the evaluation metrics<|endoftext|>
|
e5b4bf4d64449c9acf097b8223ce7a618520cc6be2e3e0e86df026e4e6fbdd73
|
def __init__(self, config_path, host, port):
'Creates a CommandRunner that connects to the specified |host| and |port|\n using the ssh config at the specified |config_path|.\n\n Args:\n config_path: Full path to SSH configuration.\n host: The hostname or IP address of the remote host.\n port: The port to connect to.'
self._config_path = config_path
self._host = host
self._port = port
|
Creates a CommandRunner that connects to the specified |host| and |port|
using the ssh config at the specified |config_path|.
Args:
config_path: Full path to SSH configuration.
host: The hostname or IP address of the remote host.
port: The port to connect to.
|
telemetry/telemetry/core/fuchsia_interface.py
|
__init__
|
alekzonder/catapult
| 0 |
python
|
def __init__(self, config_path, host, port):
'Creates a CommandRunner that connects to the specified |host| and |port|\n using the ssh config at the specified |config_path|.\n\n Args:\n config_path: Full path to SSH configuration.\n host: The hostname or IP address of the remote host.\n port: The port to connect to.'
self._config_path = config_path
self._host = host
self._port = port
|
def __init__(self, config_path, host, port):
'Creates a CommandRunner that connects to the specified |host| and |port|\n using the ssh config at the specified |config_path|.\n\n Args:\n config_path: Full path to SSH configuration.\n host: The hostname or IP address of the remote host.\n port: The port to connect to.'
self._config_path = config_path
self._host = host
self._port = port<|docstring|>Creates a CommandRunner that connects to the specified |host| and |port|
using the ssh config at the specified |config_path|.
Args:
config_path: Full path to SSH configuration.
host: The hostname or IP address of the remote host.
port: The port to connect to.<|endoftext|>
|
abc90f5beed49feb477475a29701e0c1c4994bb0da664a39923e9fed8a71652b
|
def RunCommandPiped(self, command=None, ssh_args=None, **kwargs):
"Executes an SSH command on the remote host and returns a process object\n with access to the command's stdio streams. Does not block.\n\n Args:\n command: A list of strings containing the command and its arguments.\n ssh_args: Arguments that will be passed to SSH.\n kwargs: A dictionary of parameters to be passed to subprocess.Popen().\n The parameters can be used to override stdin and stdout, for\n example.\n\n Returns:\n A Popen object for the command."
if (not command):
command = []
if (not ssh_args):
ssh_args = []
ssh_command = (((self._GetSshCommandLinePrefix() + ssh_args) + ['--']) + command)
logging.debug(' '.join(ssh_command))
return subprocess.Popen(ssh_command, **kwargs)
|
Executes an SSH command on the remote host and returns a process object
with access to the command's stdio streams. Does not block.
Args:
command: A list of strings containing the command and its arguments.
ssh_args: Arguments that will be passed to SSH.
kwargs: A dictionary of parameters to be passed to subprocess.Popen().
The parameters can be used to override stdin and stdout, for
example.
Returns:
A Popen object for the command.
|
telemetry/telemetry/core/fuchsia_interface.py
|
RunCommandPiped
|
alekzonder/catapult
| 0 |
python
|
def RunCommandPiped(self, command=None, ssh_args=None, **kwargs):
"Executes an SSH command on the remote host and returns a process object\n with access to the command's stdio streams. Does not block.\n\n Args:\n command: A list of strings containing the command and its arguments.\n ssh_args: Arguments that will be passed to SSH.\n kwargs: A dictionary of parameters to be passed to subprocess.Popen().\n The parameters can be used to override stdin and stdout, for\n example.\n\n Returns:\n A Popen object for the command."
if (not command):
command = []
if (not ssh_args):
ssh_args = []
ssh_command = (((self._GetSshCommandLinePrefix() + ssh_args) + ['--']) + command)
logging.debug(' '.join(ssh_command))
return subprocess.Popen(ssh_command, **kwargs)
|
def RunCommandPiped(self, command=None, ssh_args=None, **kwargs):
"Executes an SSH command on the remote host and returns a process object\n with access to the command's stdio streams. Does not block.\n\n Args:\n command: A list of strings containing the command and its arguments.\n ssh_args: Arguments that will be passed to SSH.\n kwargs: A dictionary of parameters to be passed to subprocess.Popen().\n The parameters can be used to override stdin and stdout, for\n example.\n\n Returns:\n A Popen object for the command."
if (not command):
command = []
if (not ssh_args):
ssh_args = []
ssh_command = (((self._GetSshCommandLinePrefix() + ssh_args) + ['--']) + command)
logging.debug(' '.join(ssh_command))
return subprocess.Popen(ssh_command, **kwargs)<|docstring|>Executes an SSH command on the remote host and returns a process object
with access to the command's stdio streams. Does not block.
Args:
command: A list of strings containing the command and its arguments.
ssh_args: Arguments that will be passed to SSH.
kwargs: A dictionary of parameters to be passed to subprocess.Popen().
The parameters can be used to override stdin and stdout, for
example.
Returns:
A Popen object for the command.<|endoftext|>
|
0c4223a57cb1e544cb72e6846a4876da59b0500733b7b5426dc4961a17f5955c
|
@classmethod
def _instantiate(cls, request, *args, **kwargs):
' Instantiates the form. Allows customization in subclasses. '
return cls(*args, **kwargs)
|
Instantiates the form. Allows customization in subclasses.
|
horizon/horizon/forms.py
|
_instantiate
|
usc-isi/horizon-old
| 3 |
python
|
@classmethod
def _instantiate(cls, request, *args, **kwargs):
' '
return cls(*args, **kwargs)
|
@classmethod
def _instantiate(cls, request, *args, **kwargs):
' '
return cls(*args, **kwargs)<|docstring|>Instantiates the form. Allows customization in subclasses.<|endoftext|>
|
dcae572e414028e5d05daf67ef71e8c3f9163584ae5ccc95d589bc19ed996e08
|
@classmethod
def maybe_handle(cls, request, *args, **kwargs):
'\n If the form is valid, :meth:`.maybe_handle` calls a\n ``handle(request, data)`` method on its subclass to\n determine what action to take.\n\n Any exceptions raised during processing are captured and\n converted to messages.\n '
if (cls.__name__ != request.POST.get('method')):
return (cls._instantiate(request, *args, **kwargs), None)
if request.FILES:
form = cls._instantiate(request, request.POST, request.FILES, *args, **kwargs)
else:
form = cls._instantiate(request, request.POST, *args, **kwargs)
if (not form.is_valid()):
return (form, None)
data = form.clean()
try:
return (form, form.handle(request, data))
except Exception as e:
LOG.exception(('Error while handling form "%s".' % cls.__name__))
if issubclass(e.__class__, exceptions.NotAuthorized):
raise
messages.error(request, (_('%s') % e.message))
return (form, None)
|
If the form is valid, :meth:`.maybe_handle` calls a
``handle(request, data)`` method on its subclass to
determine what action to take.
Any exceptions raised during processing are captured and
converted to messages.
|
horizon/horizon/forms.py
|
maybe_handle
|
usc-isi/horizon-old
| 3 |
python
|
@classmethod
def maybe_handle(cls, request, *args, **kwargs):
'\n If the form is valid, :meth:`.maybe_handle` calls a\n ``handle(request, data)`` method on its subclass to\n determine what action to take.\n\n Any exceptions raised during processing are captured and\n converted to messages.\n '
if (cls.__name__ != request.POST.get('method')):
return (cls._instantiate(request, *args, **kwargs), None)
if request.FILES:
form = cls._instantiate(request, request.POST, request.FILES, *args, **kwargs)
else:
form = cls._instantiate(request, request.POST, *args, **kwargs)
if (not form.is_valid()):
return (form, None)
data = form.clean()
try:
return (form, form.handle(request, data))
except Exception as e:
LOG.exception(('Error while handling form "%s".' % cls.__name__))
if issubclass(e.__class__, exceptions.NotAuthorized):
raise
messages.error(request, (_('%s') % e.message))
return (form, None)
|
@classmethod
def maybe_handle(cls, request, *args, **kwargs):
'\n If the form is valid, :meth:`.maybe_handle` calls a\n ``handle(request, data)`` method on its subclass to\n determine what action to take.\n\n Any exceptions raised during processing are captured and\n converted to messages.\n '
if (cls.__name__ != request.POST.get('method')):
return (cls._instantiate(request, *args, **kwargs), None)
if request.FILES:
form = cls._instantiate(request, request.POST, request.FILES, *args, **kwargs)
else:
form = cls._instantiate(request, request.POST, *args, **kwargs)
if (not form.is_valid()):
return (form, None)
data = form.clean()
try:
return (form, form.handle(request, data))
except Exception as e:
LOG.exception(('Error while handling form "%s".' % cls.__name__))
if issubclass(e.__class__, exceptions.NotAuthorized):
raise
messages.error(request, (_('%s') % e.message))
return (form, None)<|docstring|>If the form is valid, :meth:`.maybe_handle` calls a
``handle(request, data)`` method on its subclass to
determine what action to take.
Any exceptions raised during processing are captured and
converted to messages.<|endoftext|>
|
f4bd7f9beaaf69f2c3675993d19137aa64a45ec880f6bf0b48d635b8d33d0a0a
|
@beartype
def __init__(self, pdf_target: Union[(Callable, list[Callable])]=None, log_pdf_target: Union[(Callable, list[Callable])]=None, args_target: tuple=None, burn_length: Annotated[(int, Is[(lambda x: (x >= 0))])]=0, jump: int=1, dimension: int=None, seed: list=None, save_log_pdf: bool=False, concatenate_chains: bool=True, initial_covariance: float=None, covariance_update_rate: float=100, scale_parameter: float=None, delayed_rejection_scale: float=(1 / 5), save_covariance: bool=False, random_state: RandomStateType=None, n_chains: int=None, nsamples: int=None, nsamples_per_chain: int=None):
'\n Delayed Rejection Adaptive Metropolis algorithm :cite:`Dram1` :cite:`MCMC2`\n\n In this algorithm, the proposal density is Gaussian and its covariance C is being updated from samples as\n :code:`C = scale_parameter * C_sample` where :code:`C_sample` is the sample covariance. Also, the delayed\n rejection scheme is applied, i.e, if a candidate is not accepted another one is generated from the proposal with\n covariance :code:`delayed_rejection_scale ** 2 * C`.\n\n :param pdf_target: Target density function from which to draw random samples. Either `pdf_target` or\n `log_pdf_target` must be provided (the latter should be preferred for better numerical stability).\n\n If `pdf_target` is a callable, it refers to the joint pdf to sample from, it must take at least one input\n **x**, which are the point(s) at which to evaluate the pdf. Within :class:`.MCMC` the `pdf_target` is evaluated\n as:\n :code:`p(x) = pdf_target(x, \\*args_target)`\n\n where **x** is a :class:`numpy.ndarray of shape :code:`(nsamples, dimension)` and `args_target` are additional\n positional arguments that are provided to :class:`.MCMC` via its `args_target` input.\n\n If `pdf_target` is a list of callables, it refers to independent marginals to sample from. The marginal in\n dimension :code:`j` is evaluated as: :code:`p_j(xj) = pdf_target[j](xj, \\*args_target[j])` where **x** is a\n :class:`numpy.ndarray` of shape :code:`(nsamples, dimension)`\n :param log_pdf_target: Logarithm of the target density function from which to draw random samples.\n Either pdf_target or log_pdf_target must be provided (the latter should be preferred for better numerical\n stability).\n\n Same comments as for input `pdf_target`.\n :param args_target: Positional arguments of the pdf / log-pdf target function. See `pdf_target`\n :param burn_length: Length of burn-in - i.e., number of samples at the beginning of the chain to discard (note:\n no thinning during burn-in). Default is :math:`0`, no burn-in.\n :param jump: Thinning parameter, used to reduce correlation between samples. Setting :code:`jump=n` corresponds\n to skipping n-1 states between accepted states of the chain. Default is :math:`1` (no thinning).\n :param dimension: A scalar value defining the dimension of target density function. Either `dimension` and\n `n_chains` or `seed` must be provided.\n :param seed: Seed of the Markov chain(s), shape :code:`(n_chains, dimension)`.\n Default: :code:`zeros(n_chains x dimension)`.\n\n If `seed` is not provided, both `n_chains` and `dimension` must be provided.\n :param save_log_pdf: Boolean that indicates whether to save log-pdf values along with the samples.\n Default: :any:`False`\n :param concatenate_chains: Boolean that indicates whether to concatenate the chains after a run, i.e., samples\n are stored as a :class:`numpy.ndarray` of shape :code:`(nsamples * n_chains, dimension)` if :any:`True`,\n :code:`(nsamples, n_chains, dimension)` if :any:`False`.\n Default: :any:`True`\n :param n_chains: The number of Markov chains to generate. Either `dimension` and `n_chains` or `seed` must be\n provided.\n :param initial_covariance: Initial covariance for the gaussian proposal distribution. Default: I(dim)\n :param covariance_update_rate: Rate at which covariance is being updated, i.e., every k0 iterations.\n Default: :math:`100`\n :param scale_parameter: Scale parameter for covariance updating. Default: :math:`2.38^2/dim`\n :param delayed_rejection_scale: Scale parameter for delayed rejection. Default: :math:`1/5`\n :param save_covariance: If :any:`True`, updated covariance is saved in attribute :py:attr:`adaptive_covariance`.\n Default: :any:`False`\n :param random_state: Random seed used to initialize the pseudo-random number generator. Default is\n :any:`None`.\n :param nsamples: Number of samples to generate.\n :param nsamples_per_chain: Number of samples to generate per chain.\n '
self.nsamples = nsamples
self.nsamples_per_chain = nsamples_per_chain
super().__init__(pdf_target=pdf_target, log_pdf_target=log_pdf_target, args_target=args_target, dimension=dimension, seed=seed, burn_length=burn_length, jump=jump, save_log_pdf=save_log_pdf, concatenate_chains=concatenate_chains, random_state=random_state, n_chains=n_chains)
self.logger = logging.getLogger(__name__)
self.initial_covariance = initial_covariance
if (self.initial_covariance is None):
self.initial_covariance = np.eye(self.dimension)
elif (not (isinstance(self.initial_covariance, np.ndarray) and (self.initial_covariance == (self.dimension, self.dimension)))):
raise TypeError('UQpy: Input initial_covariance should be a 2D ndarray of shape (dimension, dimension)')
self.covariance_update_rate = covariance_update_rate
self.scale_parameter = scale_parameter
if (self.scale_parameter is None):
self.scale_parameter = ((2.38 ** 2) / self.dimension)
self.delayed_rejection_scale = delayed_rejection_scale
self.save_covariance = save_covariance
for (key, typ) in zip(['covariance_update_rate', 'scale_parameter', 'delayed_rejection_scale', 'save_covariance'], [int, float, float, bool]):
if (not isinstance(getattr(self, key), typ)):
raise TypeError(((('Input ' + key) + ' must be of type ') + typ.__name__))
self.current_covariance = np.tile(self.initial_covariance[(np.newaxis, ...)], (self.n_chains, 1, 1))
self.sample_mean = np.zeros((self.n_chains, self.dimension))
self.sample_covariance = np.zeros((self.n_chains, self.dimension, self.dimension))
if self.save_covariance:
self.adaptive_covariance = [self.current_covariance.copy()]
self.logger.info((('\nUQpy: Initialization of ' + self.__class__.__name__) + ' algorithm complete.'))
if ((nsamples is not None) or (nsamples_per_chain is not None)):
self.run(nsamples=nsamples, nsamples_per_chain=nsamples_per_chain)
|
Delayed Rejection Adaptive Metropolis algorithm :cite:`Dram1` :cite:`MCMC2`
In this algorithm, the proposal density is Gaussian and its covariance C is being updated from samples as
:code:`C = scale_parameter * C_sample` where :code:`C_sample` is the sample covariance. Also, the delayed
rejection scheme is applied, i.e, if a candidate is not accepted another one is generated from the proposal with
covariance :code:`delayed_rejection_scale ** 2 * C`.
:param pdf_target: Target density function from which to draw random samples. Either `pdf_target` or
`log_pdf_target` must be provided (the latter should be preferred for better numerical stability).
If `pdf_target` is a callable, it refers to the joint pdf to sample from, it must take at least one input
**x**, which are the point(s) at which to evaluate the pdf. Within :class:`.MCMC` the `pdf_target` is evaluated
as:
:code:`p(x) = pdf_target(x, \*args_target)`
where **x** is a :class:`numpy.ndarray of shape :code:`(nsamples, dimension)` and `args_target` are additional
positional arguments that are provided to :class:`.MCMC` via its `args_target` input.
If `pdf_target` is a list of callables, it refers to independent marginals to sample from. The marginal in
dimension :code:`j` is evaluated as: :code:`p_j(xj) = pdf_target[j](xj, \*args_target[j])` where **x** is a
:class:`numpy.ndarray` of shape :code:`(nsamples, dimension)`
:param log_pdf_target: Logarithm of the target density function from which to draw random samples.
Either pdf_target or log_pdf_target must be provided (the latter should be preferred for better numerical
stability).
Same comments as for input `pdf_target`.
:param args_target: Positional arguments of the pdf / log-pdf target function. See `pdf_target`
:param burn_length: Length of burn-in - i.e., number of samples at the beginning of the chain to discard (note:
no thinning during burn-in). Default is :math:`0`, no burn-in.
:param jump: Thinning parameter, used to reduce correlation between samples. Setting :code:`jump=n` corresponds
to skipping n-1 states between accepted states of the chain. Default is :math:`1` (no thinning).
:param dimension: A scalar value defining the dimension of target density function. Either `dimension` and
`n_chains` or `seed` must be provided.
:param seed: Seed of the Markov chain(s), shape :code:`(n_chains, dimension)`.
Default: :code:`zeros(n_chains x dimension)`.
If `seed` is not provided, both `n_chains` and `dimension` must be provided.
:param save_log_pdf: Boolean that indicates whether to save log-pdf values along with the samples.
Default: :any:`False`
:param concatenate_chains: Boolean that indicates whether to concatenate the chains after a run, i.e., samples
are stored as a :class:`numpy.ndarray` of shape :code:`(nsamples * n_chains, dimension)` if :any:`True`,
:code:`(nsamples, n_chains, dimension)` if :any:`False`.
Default: :any:`True`
:param n_chains: The number of Markov chains to generate. Either `dimension` and `n_chains` or `seed` must be
provided.
:param initial_covariance: Initial covariance for the gaussian proposal distribution. Default: I(dim)
:param covariance_update_rate: Rate at which covariance is being updated, i.e., every k0 iterations.
Default: :math:`100`
:param scale_parameter: Scale parameter for covariance updating. Default: :math:`2.38^2/dim`
:param delayed_rejection_scale: Scale parameter for delayed rejection. Default: :math:`1/5`
:param save_covariance: If :any:`True`, updated covariance is saved in attribute :py:attr:`adaptive_covariance`.
Default: :any:`False`
:param random_state: Random seed used to initialize the pseudo-random number generator. Default is
:any:`None`.
:param nsamples: Number of samples to generate.
:param nsamples_per_chain: Number of samples to generate per chain.
|
src/UQpy/sampling/mcmc/DRAM.py
|
__init__
|
SURGroup/UncertaintyQuantification
| 0 |
python
|
@beartype
def __init__(self, pdf_target: Union[(Callable, list[Callable])]=None, log_pdf_target: Union[(Callable, list[Callable])]=None, args_target: tuple=None, burn_length: Annotated[(int, Is[(lambda x: (x >= 0))])]=0, jump: int=1, dimension: int=None, seed: list=None, save_log_pdf: bool=False, concatenate_chains: bool=True, initial_covariance: float=None, covariance_update_rate: float=100, scale_parameter: float=None, delayed_rejection_scale: float=(1 / 5), save_covariance: bool=False, random_state: RandomStateType=None, n_chains: int=None, nsamples: int=None, nsamples_per_chain: int=None):
'\n Delayed Rejection Adaptive Metropolis algorithm :cite:`Dram1` :cite:`MCMC2`\n\n In this algorithm, the proposal density is Gaussian and its covariance C is being updated from samples as\n :code:`C = scale_parameter * C_sample` where :code:`C_sample` is the sample covariance. Also, the delayed\n rejection scheme is applied, i.e, if a candidate is not accepted another one is generated from the proposal with\n covariance :code:`delayed_rejection_scale ** 2 * C`.\n\n :param pdf_target: Target density function from which to draw random samples. Either `pdf_target` or\n `log_pdf_target` must be provided (the latter should be preferred for better numerical stability).\n\n If `pdf_target` is a callable, it refers to the joint pdf to sample from, it must take at least one input\n **x**, which are the point(s) at which to evaluate the pdf. Within :class:`.MCMC` the `pdf_target` is evaluated\n as:\n :code:`p(x) = pdf_target(x, \\*args_target)`\n\n where **x** is a :class:`numpy.ndarray of shape :code:`(nsamples, dimension)` and `args_target` are additional\n positional arguments that are provided to :class:`.MCMC` via its `args_target` input.\n\n If `pdf_target` is a list of callables, it refers to independent marginals to sample from. The marginal in\n dimension :code:`j` is evaluated as: :code:`p_j(xj) = pdf_target[j](xj, \\*args_target[j])` where **x** is a\n :class:`numpy.ndarray` of shape :code:`(nsamples, dimension)`\n :param log_pdf_target: Logarithm of the target density function from which to draw random samples.\n Either pdf_target or log_pdf_target must be provided (the latter should be preferred for better numerical\n stability).\n\n Same comments as for input `pdf_target`.\n :param args_target: Positional arguments of the pdf / log-pdf target function. See `pdf_target`\n :param burn_length: Length of burn-in - i.e., number of samples at the beginning of the chain to discard (note:\n no thinning during burn-in). Default is :math:`0`, no burn-in.\n :param jump: Thinning parameter, used to reduce correlation between samples. Setting :code:`jump=n` corresponds\n to skipping n-1 states between accepted states of the chain. Default is :math:`1` (no thinning).\n :param dimension: A scalar value defining the dimension of target density function. Either `dimension` and\n `n_chains` or `seed` must be provided.\n :param seed: Seed of the Markov chain(s), shape :code:`(n_chains, dimension)`.\n Default: :code:`zeros(n_chains x dimension)`.\n\n If `seed` is not provided, both `n_chains` and `dimension` must be provided.\n :param save_log_pdf: Boolean that indicates whether to save log-pdf values along with the samples.\n Default: :any:`False`\n :param concatenate_chains: Boolean that indicates whether to concatenate the chains after a run, i.e., samples\n are stored as a :class:`numpy.ndarray` of shape :code:`(nsamples * n_chains, dimension)` if :any:`True`,\n :code:`(nsamples, n_chains, dimension)` if :any:`False`.\n Default: :any:`True`\n :param n_chains: The number of Markov chains to generate. Either `dimension` and `n_chains` or `seed` must be\n provided.\n :param initial_covariance: Initial covariance for the gaussian proposal distribution. Default: I(dim)\n :param covariance_update_rate: Rate at which covariance is being updated, i.e., every k0 iterations.\n Default: :math:`100`\n :param scale_parameter: Scale parameter for covariance updating. Default: :math:`2.38^2/dim`\n :param delayed_rejection_scale: Scale parameter for delayed rejection. Default: :math:`1/5`\n :param save_covariance: If :any:`True`, updated covariance is saved in attribute :py:attr:`adaptive_covariance`.\n Default: :any:`False`\n :param random_state: Random seed used to initialize the pseudo-random number generator. Default is\n :any:`None`.\n :param nsamples: Number of samples to generate.\n :param nsamples_per_chain: Number of samples to generate per chain.\n '
self.nsamples = nsamples
self.nsamples_per_chain = nsamples_per_chain
super().__init__(pdf_target=pdf_target, log_pdf_target=log_pdf_target, args_target=args_target, dimension=dimension, seed=seed, burn_length=burn_length, jump=jump, save_log_pdf=save_log_pdf, concatenate_chains=concatenate_chains, random_state=random_state, n_chains=n_chains)
self.logger = logging.getLogger(__name__)
self.initial_covariance = initial_covariance
if (self.initial_covariance is None):
self.initial_covariance = np.eye(self.dimension)
elif (not (isinstance(self.initial_covariance, np.ndarray) and (self.initial_covariance == (self.dimension, self.dimension)))):
raise TypeError('UQpy: Input initial_covariance should be a 2D ndarray of shape (dimension, dimension)')
self.covariance_update_rate = covariance_update_rate
self.scale_parameter = scale_parameter
if (self.scale_parameter is None):
self.scale_parameter = ((2.38 ** 2) / self.dimension)
self.delayed_rejection_scale = delayed_rejection_scale
self.save_covariance = save_covariance
for (key, typ) in zip(['covariance_update_rate', 'scale_parameter', 'delayed_rejection_scale', 'save_covariance'], [int, float, float, bool]):
if (not isinstance(getattr(self, key), typ)):
raise TypeError(((('Input ' + key) + ' must be of type ') + typ.__name__))
self.current_covariance = np.tile(self.initial_covariance[(np.newaxis, ...)], (self.n_chains, 1, 1))
self.sample_mean = np.zeros((self.n_chains, self.dimension))
self.sample_covariance = np.zeros((self.n_chains, self.dimension, self.dimension))
if self.save_covariance:
self.adaptive_covariance = [self.current_covariance.copy()]
self.logger.info((('\nUQpy: Initialization of ' + self.__class__.__name__) + ' algorithm complete.'))
if ((nsamples is not None) or (nsamples_per_chain is not None)):
self.run(nsamples=nsamples, nsamples_per_chain=nsamples_per_chain)
|
@beartype
def __init__(self, pdf_target: Union[(Callable, list[Callable])]=None, log_pdf_target: Union[(Callable, list[Callable])]=None, args_target: tuple=None, burn_length: Annotated[(int, Is[(lambda x: (x >= 0))])]=0, jump: int=1, dimension: int=None, seed: list=None, save_log_pdf: bool=False, concatenate_chains: bool=True, initial_covariance: float=None, covariance_update_rate: float=100, scale_parameter: float=None, delayed_rejection_scale: float=(1 / 5), save_covariance: bool=False, random_state: RandomStateType=None, n_chains: int=None, nsamples: int=None, nsamples_per_chain: int=None):
'\n Delayed Rejection Adaptive Metropolis algorithm :cite:`Dram1` :cite:`MCMC2`\n\n In this algorithm, the proposal density is Gaussian and its covariance C is being updated from samples as\n :code:`C = scale_parameter * C_sample` where :code:`C_sample` is the sample covariance. Also, the delayed\n rejection scheme is applied, i.e, if a candidate is not accepted another one is generated from the proposal with\n covariance :code:`delayed_rejection_scale ** 2 * C`.\n\n :param pdf_target: Target density function from which to draw random samples. Either `pdf_target` or\n `log_pdf_target` must be provided (the latter should be preferred for better numerical stability).\n\n If `pdf_target` is a callable, it refers to the joint pdf to sample from, it must take at least one input\n **x**, which are the point(s) at which to evaluate the pdf. Within :class:`.MCMC` the `pdf_target` is evaluated\n as:\n :code:`p(x) = pdf_target(x, \\*args_target)`\n\n where **x** is a :class:`numpy.ndarray of shape :code:`(nsamples, dimension)` and `args_target` are additional\n positional arguments that are provided to :class:`.MCMC` via its `args_target` input.\n\n If `pdf_target` is a list of callables, it refers to independent marginals to sample from. The marginal in\n dimension :code:`j` is evaluated as: :code:`p_j(xj) = pdf_target[j](xj, \\*args_target[j])` where **x** is a\n :class:`numpy.ndarray` of shape :code:`(nsamples, dimension)`\n :param log_pdf_target: Logarithm of the target density function from which to draw random samples.\n Either pdf_target or log_pdf_target must be provided (the latter should be preferred for better numerical\n stability).\n\n Same comments as for input `pdf_target`.\n :param args_target: Positional arguments of the pdf / log-pdf target function. See `pdf_target`\n :param burn_length: Length of burn-in - i.e., number of samples at the beginning of the chain to discard (note:\n no thinning during burn-in). Default is :math:`0`, no burn-in.\n :param jump: Thinning parameter, used to reduce correlation between samples. Setting :code:`jump=n` corresponds\n to skipping n-1 states between accepted states of the chain. Default is :math:`1` (no thinning).\n :param dimension: A scalar value defining the dimension of target density function. Either `dimension` and\n `n_chains` or `seed` must be provided.\n :param seed: Seed of the Markov chain(s), shape :code:`(n_chains, dimension)`.\n Default: :code:`zeros(n_chains x dimension)`.\n\n If `seed` is not provided, both `n_chains` and `dimension` must be provided.\n :param save_log_pdf: Boolean that indicates whether to save log-pdf values along with the samples.\n Default: :any:`False`\n :param concatenate_chains: Boolean that indicates whether to concatenate the chains after a run, i.e., samples\n are stored as a :class:`numpy.ndarray` of shape :code:`(nsamples * n_chains, dimension)` if :any:`True`,\n :code:`(nsamples, n_chains, dimension)` if :any:`False`.\n Default: :any:`True`\n :param n_chains: The number of Markov chains to generate. Either `dimension` and `n_chains` or `seed` must be\n provided.\n :param initial_covariance: Initial covariance for the gaussian proposal distribution. Default: I(dim)\n :param covariance_update_rate: Rate at which covariance is being updated, i.e., every k0 iterations.\n Default: :math:`100`\n :param scale_parameter: Scale parameter for covariance updating. Default: :math:`2.38^2/dim`\n :param delayed_rejection_scale: Scale parameter for delayed rejection. Default: :math:`1/5`\n :param save_covariance: If :any:`True`, updated covariance is saved in attribute :py:attr:`adaptive_covariance`.\n Default: :any:`False`\n :param random_state: Random seed used to initialize the pseudo-random number generator. Default is\n :any:`None`.\n :param nsamples: Number of samples to generate.\n :param nsamples_per_chain: Number of samples to generate per chain.\n '
self.nsamples = nsamples
self.nsamples_per_chain = nsamples_per_chain
super().__init__(pdf_target=pdf_target, log_pdf_target=log_pdf_target, args_target=args_target, dimension=dimension, seed=seed, burn_length=burn_length, jump=jump, save_log_pdf=save_log_pdf, concatenate_chains=concatenate_chains, random_state=random_state, n_chains=n_chains)
self.logger = logging.getLogger(__name__)
self.initial_covariance = initial_covariance
if (self.initial_covariance is None):
self.initial_covariance = np.eye(self.dimension)
elif (not (isinstance(self.initial_covariance, np.ndarray) and (self.initial_covariance == (self.dimension, self.dimension)))):
raise TypeError('UQpy: Input initial_covariance should be a 2D ndarray of shape (dimension, dimension)')
self.covariance_update_rate = covariance_update_rate
self.scale_parameter = scale_parameter
if (self.scale_parameter is None):
self.scale_parameter = ((2.38 ** 2) / self.dimension)
self.delayed_rejection_scale = delayed_rejection_scale
self.save_covariance = save_covariance
for (key, typ) in zip(['covariance_update_rate', 'scale_parameter', 'delayed_rejection_scale', 'save_covariance'], [int, float, float, bool]):
if (not isinstance(getattr(self, key), typ)):
raise TypeError(((('Input ' + key) + ' must be of type ') + typ.__name__))
self.current_covariance = np.tile(self.initial_covariance[(np.newaxis, ...)], (self.n_chains, 1, 1))
self.sample_mean = np.zeros((self.n_chains, self.dimension))
self.sample_covariance = np.zeros((self.n_chains, self.dimension, self.dimension))
if self.save_covariance:
self.adaptive_covariance = [self.current_covariance.copy()]
self.logger.info((('\nUQpy: Initialization of ' + self.__class__.__name__) + ' algorithm complete.'))
if ((nsamples is not None) or (nsamples_per_chain is not None)):
self.run(nsamples=nsamples, nsamples_per_chain=nsamples_per_chain)<|docstring|>Delayed Rejection Adaptive Metropolis algorithm :cite:`Dram1` :cite:`MCMC2`
In this algorithm, the proposal density is Gaussian and its covariance C is being updated from samples as
:code:`C = scale_parameter * C_sample` where :code:`C_sample` is the sample covariance. Also, the delayed
rejection scheme is applied, i.e, if a candidate is not accepted another one is generated from the proposal with
covariance :code:`delayed_rejection_scale ** 2 * C`.
:param pdf_target: Target density function from which to draw random samples. Either `pdf_target` or
`log_pdf_target` must be provided (the latter should be preferred for better numerical stability).
If `pdf_target` is a callable, it refers to the joint pdf to sample from, it must take at least one input
**x**, which are the point(s) at which to evaluate the pdf. Within :class:`.MCMC` the `pdf_target` is evaluated
as:
:code:`p(x) = pdf_target(x, \*args_target)`
where **x** is a :class:`numpy.ndarray of shape :code:`(nsamples, dimension)` and `args_target` are additional
positional arguments that are provided to :class:`.MCMC` via its `args_target` input.
If `pdf_target` is a list of callables, it refers to independent marginals to sample from. The marginal in
dimension :code:`j` is evaluated as: :code:`p_j(xj) = pdf_target[j](xj, \*args_target[j])` where **x** is a
:class:`numpy.ndarray` of shape :code:`(nsamples, dimension)`
:param log_pdf_target: Logarithm of the target density function from which to draw random samples.
Either pdf_target or log_pdf_target must be provided (the latter should be preferred for better numerical
stability).
Same comments as for input `pdf_target`.
:param args_target: Positional arguments of the pdf / log-pdf target function. See `pdf_target`
:param burn_length: Length of burn-in - i.e., number of samples at the beginning of the chain to discard (note:
no thinning during burn-in). Default is :math:`0`, no burn-in.
:param jump: Thinning parameter, used to reduce correlation between samples. Setting :code:`jump=n` corresponds
to skipping n-1 states between accepted states of the chain. Default is :math:`1` (no thinning).
:param dimension: A scalar value defining the dimension of target density function. Either `dimension` and
`n_chains` or `seed` must be provided.
:param seed: Seed of the Markov chain(s), shape :code:`(n_chains, dimension)`.
Default: :code:`zeros(n_chains x dimension)`.
If `seed` is not provided, both `n_chains` and `dimension` must be provided.
:param save_log_pdf: Boolean that indicates whether to save log-pdf values along with the samples.
Default: :any:`False`
:param concatenate_chains: Boolean that indicates whether to concatenate the chains after a run, i.e., samples
are stored as a :class:`numpy.ndarray` of shape :code:`(nsamples * n_chains, dimension)` if :any:`True`,
:code:`(nsamples, n_chains, dimension)` if :any:`False`.
Default: :any:`True`
:param n_chains: The number of Markov chains to generate. Either `dimension` and `n_chains` or `seed` must be
provided.
:param initial_covariance: Initial covariance for the gaussian proposal distribution. Default: I(dim)
:param covariance_update_rate: Rate at which covariance is being updated, i.e., every k0 iterations.
Default: :math:`100`
:param scale_parameter: Scale parameter for covariance updating. Default: :math:`2.38^2/dim`
:param delayed_rejection_scale: Scale parameter for delayed rejection. Default: :math:`1/5`
:param save_covariance: If :any:`True`, updated covariance is saved in attribute :py:attr:`adaptive_covariance`.
Default: :any:`False`
:param random_state: Random seed used to initialize the pseudo-random number generator. Default is
:any:`None`.
:param nsamples: Number of samples to generate.
:param nsamples_per_chain: Number of samples to generate per chain.<|endoftext|>
|
6e9a89bd820d7f733394b5e4fa8e71f8e15beb9007ca9371cd249b3fc5cb8256
|
def run_one_iteration(self, current_state: np.ndarray, current_log_pdf: np.ndarray):
'\n Run one iteration of the mcmc chain for DRAM algorithm, starting at current state -\n see :class:`MCMC` class.\n '
from UQpy.distributions import MultivariateNormal
multivariate_normal = MultivariateNormal(mean=np.zeros(self.dimension), cov=1.0)
candidate = np.zeros_like(current_state)
for (nc, current_cov) in enumerate(self.current_covariance):
multivariate_normal.update_parameters(cov=current_cov)
candidate[(nc, :)] = (current_state[(nc, :)] + multivariate_normal.rvs(nsamples=1, random_state=self.random_state).reshape((self.dimension,)))
log_p_candidate = self.evaluate_log_target(candidate)
accept_vec = np.zeros((self.n_chains,))
delayed_chains_indices = []
unif_rvs = Uniform().rvs(nsamples=self.n_chains, random_state=self.random_state).reshape(((- 1),))
for (nc, (cand, log_p_cand, log_p_curr)) in enumerate(zip(candidate, log_p_candidate, current_log_pdf)):
accept = (np.log(unif_rvs[nc]) < (log_p_cand - log_p_curr))
if accept:
current_state[(nc, :)] = cand
current_log_pdf[nc] = log_p_cand
accept_vec[nc] += 1.0
else:
delayed_chains_indices.append(nc)
if delayed_chains_indices:
current_states_delayed = np.zeros((len(delayed_chains_indices), self.dimension))
candidates_delayed = np.zeros((len(delayed_chains_indices), self.dimension))
candidate2 = np.zeros((len(delayed_chains_indices), self.dimension))
for (i, nc) in enumerate(delayed_chains_indices):
current_states_delayed[(i, :)] = current_state[(nc, :)]
candidates_delayed[(i, :)] = candidate[(nc, :)]
multivariate_normal.update_parameters(cov=((self.delayed_rejection_scale ** 2) * self.current_covariance[nc]))
candidate2[(i, :)] = (current_states_delayed[(i, :)] + multivariate_normal.rvs(nsamples=1, random_state=self.random_state).reshape((self.dimension,)))
log_p_candidate2 = self.evaluate_log_target(candidate2)
log_prop_cand_cand2 = multivariate_normal.log_pdf((candidates_delayed - candidate2))
log_prop_cand_curr = multivariate_normal.log_pdf((candidates_delayed - current_states_delayed))
unif_rvs = Uniform().rvs(nsamples=len(delayed_chains_indices), random_state=self.random_state).reshape(((- 1),))
for (nc, cand2, log_p_cand2, j1, j2, u_rv) in zip(delayed_chains_indices, candidate2, log_p_candidate2, log_prop_cand_cand2, log_prop_cand_curr, unif_rvs):
alpha_cand_cand2 = min(1.0, np.exp((log_p_candidate[nc] - log_p_cand2)))
alpha_cand_curr = min(1.0, np.exp((log_p_candidate[nc] - current_log_pdf[nc])))
log_alpha2 = (((((log_p_cand2 - current_log_pdf[nc]) + j1) - j2) + np.log(max((1.0 - alpha_cand_cand2), (10 ** (- 320))))) - np.log(max((1.0 - alpha_cand_curr), (10 ** (- 320)))))
accept = (np.log(u_rv) < min(0.0, log_alpha2))
if accept:
current_state[(nc, :)] = cand2
current_log_pdf[nc] = log_p_cand2
accept_vec[nc] += 1.0
for nc in range(self.n_chains):
(self.sample_mean[nc], self.sample_covariance[nc]) = self._recursive_update_mean_covariance(nsamples=self.iterations_number, new_sample=current_state[(nc, :)], previous_mean=self.sample_mean[nc], previous_covariance=self.sample_covariance[nc])
if ((self.iterations_number > 1) and ((self.iterations_number % self.covariance_update_rate) == 0)):
self.current_covariance[nc] = ((self.scale_parameter * self.sample_covariance[nc]) + (1e-06 * np.eye(self.dimension)))
if (self.save_covariance and ((self.iterations_number > 1) and ((self.iterations_number % self.covariance_update_rate) == 0))):
self.adaptive_covariance.append(self.current_covariance.copy())
self._update_acceptance_rate(accept_vec)
return (current_state, current_log_pdf)
|
Run one iteration of the mcmc chain for DRAM algorithm, starting at current state -
see :class:`MCMC` class.
|
src/UQpy/sampling/mcmc/DRAM.py
|
run_one_iteration
|
SURGroup/UncertaintyQuantification
| 0 |
python
|
def run_one_iteration(self, current_state: np.ndarray, current_log_pdf: np.ndarray):
'\n Run one iteration of the mcmc chain for DRAM algorithm, starting at current state -\n see :class:`MCMC` class.\n '
from UQpy.distributions import MultivariateNormal
multivariate_normal = MultivariateNormal(mean=np.zeros(self.dimension), cov=1.0)
candidate = np.zeros_like(current_state)
for (nc, current_cov) in enumerate(self.current_covariance):
multivariate_normal.update_parameters(cov=current_cov)
candidate[(nc, :)] = (current_state[(nc, :)] + multivariate_normal.rvs(nsamples=1, random_state=self.random_state).reshape((self.dimension,)))
log_p_candidate = self.evaluate_log_target(candidate)
accept_vec = np.zeros((self.n_chains,))
delayed_chains_indices = []
unif_rvs = Uniform().rvs(nsamples=self.n_chains, random_state=self.random_state).reshape(((- 1),))
for (nc, (cand, log_p_cand, log_p_curr)) in enumerate(zip(candidate, log_p_candidate, current_log_pdf)):
accept = (np.log(unif_rvs[nc]) < (log_p_cand - log_p_curr))
if accept:
current_state[(nc, :)] = cand
current_log_pdf[nc] = log_p_cand
accept_vec[nc] += 1.0
else:
delayed_chains_indices.append(nc)
if delayed_chains_indices:
current_states_delayed = np.zeros((len(delayed_chains_indices), self.dimension))
candidates_delayed = np.zeros((len(delayed_chains_indices), self.dimension))
candidate2 = np.zeros((len(delayed_chains_indices), self.dimension))
for (i, nc) in enumerate(delayed_chains_indices):
current_states_delayed[(i, :)] = current_state[(nc, :)]
candidates_delayed[(i, :)] = candidate[(nc, :)]
multivariate_normal.update_parameters(cov=((self.delayed_rejection_scale ** 2) * self.current_covariance[nc]))
candidate2[(i, :)] = (current_states_delayed[(i, :)] + multivariate_normal.rvs(nsamples=1, random_state=self.random_state).reshape((self.dimension,)))
log_p_candidate2 = self.evaluate_log_target(candidate2)
log_prop_cand_cand2 = multivariate_normal.log_pdf((candidates_delayed - candidate2))
log_prop_cand_curr = multivariate_normal.log_pdf((candidates_delayed - current_states_delayed))
unif_rvs = Uniform().rvs(nsamples=len(delayed_chains_indices), random_state=self.random_state).reshape(((- 1),))
for (nc, cand2, log_p_cand2, j1, j2, u_rv) in zip(delayed_chains_indices, candidate2, log_p_candidate2, log_prop_cand_cand2, log_prop_cand_curr, unif_rvs):
alpha_cand_cand2 = min(1.0, np.exp((log_p_candidate[nc] - log_p_cand2)))
alpha_cand_curr = min(1.0, np.exp((log_p_candidate[nc] - current_log_pdf[nc])))
log_alpha2 = (((((log_p_cand2 - current_log_pdf[nc]) + j1) - j2) + np.log(max((1.0 - alpha_cand_cand2), (10 ** (- 320))))) - np.log(max((1.0 - alpha_cand_curr), (10 ** (- 320)))))
accept = (np.log(u_rv) < min(0.0, log_alpha2))
if accept:
current_state[(nc, :)] = cand2
current_log_pdf[nc] = log_p_cand2
accept_vec[nc] += 1.0
for nc in range(self.n_chains):
(self.sample_mean[nc], self.sample_covariance[nc]) = self._recursive_update_mean_covariance(nsamples=self.iterations_number, new_sample=current_state[(nc, :)], previous_mean=self.sample_mean[nc], previous_covariance=self.sample_covariance[nc])
if ((self.iterations_number > 1) and ((self.iterations_number % self.covariance_update_rate) == 0)):
self.current_covariance[nc] = ((self.scale_parameter * self.sample_covariance[nc]) + (1e-06 * np.eye(self.dimension)))
if (self.save_covariance and ((self.iterations_number > 1) and ((self.iterations_number % self.covariance_update_rate) == 0))):
self.adaptive_covariance.append(self.current_covariance.copy())
self._update_acceptance_rate(accept_vec)
return (current_state, current_log_pdf)
|
def run_one_iteration(self, current_state: np.ndarray, current_log_pdf: np.ndarray):
'\n Run one iteration of the mcmc chain for DRAM algorithm, starting at current state -\n see :class:`MCMC` class.\n '
from UQpy.distributions import MultivariateNormal
multivariate_normal = MultivariateNormal(mean=np.zeros(self.dimension), cov=1.0)
candidate = np.zeros_like(current_state)
for (nc, current_cov) in enumerate(self.current_covariance):
multivariate_normal.update_parameters(cov=current_cov)
candidate[(nc, :)] = (current_state[(nc, :)] + multivariate_normal.rvs(nsamples=1, random_state=self.random_state).reshape((self.dimension,)))
log_p_candidate = self.evaluate_log_target(candidate)
accept_vec = np.zeros((self.n_chains,))
delayed_chains_indices = []
unif_rvs = Uniform().rvs(nsamples=self.n_chains, random_state=self.random_state).reshape(((- 1),))
for (nc, (cand, log_p_cand, log_p_curr)) in enumerate(zip(candidate, log_p_candidate, current_log_pdf)):
accept = (np.log(unif_rvs[nc]) < (log_p_cand - log_p_curr))
if accept:
current_state[(nc, :)] = cand
current_log_pdf[nc] = log_p_cand
accept_vec[nc] += 1.0
else:
delayed_chains_indices.append(nc)
if delayed_chains_indices:
current_states_delayed = np.zeros((len(delayed_chains_indices), self.dimension))
candidates_delayed = np.zeros((len(delayed_chains_indices), self.dimension))
candidate2 = np.zeros((len(delayed_chains_indices), self.dimension))
for (i, nc) in enumerate(delayed_chains_indices):
current_states_delayed[(i, :)] = current_state[(nc, :)]
candidates_delayed[(i, :)] = candidate[(nc, :)]
multivariate_normal.update_parameters(cov=((self.delayed_rejection_scale ** 2) * self.current_covariance[nc]))
candidate2[(i, :)] = (current_states_delayed[(i, :)] + multivariate_normal.rvs(nsamples=1, random_state=self.random_state).reshape((self.dimension,)))
log_p_candidate2 = self.evaluate_log_target(candidate2)
log_prop_cand_cand2 = multivariate_normal.log_pdf((candidates_delayed - candidate2))
log_prop_cand_curr = multivariate_normal.log_pdf((candidates_delayed - current_states_delayed))
unif_rvs = Uniform().rvs(nsamples=len(delayed_chains_indices), random_state=self.random_state).reshape(((- 1),))
for (nc, cand2, log_p_cand2, j1, j2, u_rv) in zip(delayed_chains_indices, candidate2, log_p_candidate2, log_prop_cand_cand2, log_prop_cand_curr, unif_rvs):
alpha_cand_cand2 = min(1.0, np.exp((log_p_candidate[nc] - log_p_cand2)))
alpha_cand_curr = min(1.0, np.exp((log_p_candidate[nc] - current_log_pdf[nc])))
log_alpha2 = (((((log_p_cand2 - current_log_pdf[nc]) + j1) - j2) + np.log(max((1.0 - alpha_cand_cand2), (10 ** (- 320))))) - np.log(max((1.0 - alpha_cand_curr), (10 ** (- 320)))))
accept = (np.log(u_rv) < min(0.0, log_alpha2))
if accept:
current_state[(nc, :)] = cand2
current_log_pdf[nc] = log_p_cand2
accept_vec[nc] += 1.0
for nc in range(self.n_chains):
(self.sample_mean[nc], self.sample_covariance[nc]) = self._recursive_update_mean_covariance(nsamples=self.iterations_number, new_sample=current_state[(nc, :)], previous_mean=self.sample_mean[nc], previous_covariance=self.sample_covariance[nc])
if ((self.iterations_number > 1) and ((self.iterations_number % self.covariance_update_rate) == 0)):
self.current_covariance[nc] = ((self.scale_parameter * self.sample_covariance[nc]) + (1e-06 * np.eye(self.dimension)))
if (self.save_covariance and ((self.iterations_number > 1) and ((self.iterations_number % self.covariance_update_rate) == 0))):
self.adaptive_covariance.append(self.current_covariance.copy())
self._update_acceptance_rate(accept_vec)
return (current_state, current_log_pdf)<|docstring|>Run one iteration of the mcmc chain for DRAM algorithm, starting at current state -
see :class:`MCMC` class.<|endoftext|>
|
eac4d7b3c8044a8904d896fe96239edef2f53e2f7bca9532e92eff1a78ce29af
|
@staticmethod
def _recursive_update_mean_covariance(nsamples, new_sample, previous_mean, previous_covariance=None):
'\n Iterative formula to compute a new sample mean and covariance based on previous ones and new sample.\n\n New covariance is computed only of previous_covariance is provided.\n\n **Inputs:**\n\n * n (int): Number of samples used to compute the new mean\n * new_sample (ndarray (dim, )): new sample\n * previous_mean (ndarray (dim, )): Previous sample mean, to be updated with new sample value\n * previous_covariance (ndarray (dim, dim)): Previous sample covariance, to be updated with new sample value\n\n **Output/Returns:**\n\n * new_mean (ndarray (dim, )): Updated sample mean\n * new_covariance (ndarray (dim, dim)): Updated sample covariance\n\n '
new_mean = ((((nsamples - 1) / nsamples) * previous_mean) + ((1 / nsamples) * new_sample))
if (previous_covariance is None):
return new_mean
dimensions = new_sample.size
if (nsamples == 1):
new_covariance = np.zeros((dimensions, dimensions))
else:
delta_n = (new_sample - previous_mean).reshape((dimensions, 1))
new_covariance = ((((nsamples - 2) / (nsamples - 1)) * previous_covariance) + ((1 / nsamples) * np.matmul(delta_n, delta_n.T)))
return (new_mean, new_covariance)
|
Iterative formula to compute a new sample mean and covariance based on previous ones and new sample.
New covariance is computed only of previous_covariance is provided.
**Inputs:**
* n (int): Number of samples used to compute the new mean
* new_sample (ndarray (dim, )): new sample
* previous_mean (ndarray (dim, )): Previous sample mean, to be updated with new sample value
* previous_covariance (ndarray (dim, dim)): Previous sample covariance, to be updated with new sample value
**Output/Returns:**
* new_mean (ndarray (dim, )): Updated sample mean
* new_covariance (ndarray (dim, dim)): Updated sample covariance
|
src/UQpy/sampling/mcmc/DRAM.py
|
_recursive_update_mean_covariance
|
SURGroup/UncertaintyQuantification
| 0 |
python
|
@staticmethod
def _recursive_update_mean_covariance(nsamples, new_sample, previous_mean, previous_covariance=None):
'\n Iterative formula to compute a new sample mean and covariance based on previous ones and new sample.\n\n New covariance is computed only of previous_covariance is provided.\n\n **Inputs:**\n\n * n (int): Number of samples used to compute the new mean\n * new_sample (ndarray (dim, )): new sample\n * previous_mean (ndarray (dim, )): Previous sample mean, to be updated with new sample value\n * previous_covariance (ndarray (dim, dim)): Previous sample covariance, to be updated with new sample value\n\n **Output/Returns:**\n\n * new_mean (ndarray (dim, )): Updated sample mean\n * new_covariance (ndarray (dim, dim)): Updated sample covariance\n\n '
new_mean = ((((nsamples - 1) / nsamples) * previous_mean) + ((1 / nsamples) * new_sample))
if (previous_covariance is None):
return new_mean
dimensions = new_sample.size
if (nsamples == 1):
new_covariance = np.zeros((dimensions, dimensions))
else:
delta_n = (new_sample - previous_mean).reshape((dimensions, 1))
new_covariance = ((((nsamples - 2) / (nsamples - 1)) * previous_covariance) + ((1 / nsamples) * np.matmul(delta_n, delta_n.T)))
return (new_mean, new_covariance)
|
@staticmethod
def _recursive_update_mean_covariance(nsamples, new_sample, previous_mean, previous_covariance=None):
'\n Iterative formula to compute a new sample mean and covariance based on previous ones and new sample.\n\n New covariance is computed only of previous_covariance is provided.\n\n **Inputs:**\n\n * n (int): Number of samples used to compute the new mean\n * new_sample (ndarray (dim, )): new sample\n * previous_mean (ndarray (dim, )): Previous sample mean, to be updated with new sample value\n * previous_covariance (ndarray (dim, dim)): Previous sample covariance, to be updated with new sample value\n\n **Output/Returns:**\n\n * new_mean (ndarray (dim, )): Updated sample mean\n * new_covariance (ndarray (dim, dim)): Updated sample covariance\n\n '
new_mean = ((((nsamples - 1) / nsamples) * previous_mean) + ((1 / nsamples) * new_sample))
if (previous_covariance is None):
return new_mean
dimensions = new_sample.size
if (nsamples == 1):
new_covariance = np.zeros((dimensions, dimensions))
else:
delta_n = (new_sample - previous_mean).reshape((dimensions, 1))
new_covariance = ((((nsamples - 2) / (nsamples - 1)) * previous_covariance) + ((1 / nsamples) * np.matmul(delta_n, delta_n.T)))
return (new_mean, new_covariance)<|docstring|>Iterative formula to compute a new sample mean and covariance based on previous ones and new sample.
New covariance is computed only of previous_covariance is provided.
**Inputs:**
* n (int): Number of samples used to compute the new mean
* new_sample (ndarray (dim, )): new sample
* previous_mean (ndarray (dim, )): Previous sample mean, to be updated with new sample value
* previous_covariance (ndarray (dim, dim)): Previous sample covariance, to be updated with new sample value
**Output/Returns:**
* new_mean (ndarray (dim, )): Updated sample mean
* new_covariance (ndarray (dim, dim)): Updated sample covariance<|endoftext|>
|
493af6c9b42bfc0a3fe37d83042b44fe7553dcaf0380da7e0c0a161a266f9bc6
|
def test_random_arithmetic(self):
'Compose some random formulas and evaluate it as\n Python float and vec1 and compare'
for i in range(10000):
startval = self._randint(100)
term = self._build_random_term('_start', self.rnd.uniform(0.1, 0.99))
py_str = term.replace('_start', ('float(%i)' % startval))
vec_str = ('(%s)[0]' % term.replace('_start', ('vec(%i)' % startval)))
self._compare_eval(py_str, vec_str)
|
Compose some random formulas and evaluate it as
Python float and vec1 and compare
|
vector/tests.py
|
test_random_arithmetic
|
defgsus/lolpig
| 0 |
python
|
def test_random_arithmetic(self):
'Compose some random formulas and evaluate it as\n Python float and vec1 and compare'
for i in range(10000):
startval = self._randint(100)
term = self._build_random_term('_start', self.rnd.uniform(0.1, 0.99))
py_str = term.replace('_start', ('float(%i)' % startval))
vec_str = ('(%s)[0]' % term.replace('_start', ('vec(%i)' % startval)))
self._compare_eval(py_str, vec_str)
|
def test_random_arithmetic(self):
'Compose some random formulas and evaluate it as\n Python float and vec1 and compare'
for i in range(10000):
startval = self._randint(100)
term = self._build_random_term('_start', self.rnd.uniform(0.1, 0.99))
py_str = term.replace('_start', ('float(%i)' % startval))
vec_str = ('(%s)[0]' % term.replace('_start', ('vec(%i)' % startval)))
self._compare_eval(py_str, vec_str)<|docstring|>Compose some random formulas and evaluate it as
Python float and vec1 and compare<|endoftext|>
|
77c1438d276d70cc4f9d2ec4a3e6a87b950cecfdb0d204cf8ddac87a4e871e50
|
def __gather_minions(self):
'\n Return a list of minions to use for the batch run\n '
args = [self.opts['tgt'], 'test.ping', [], self.opts['timeout']]
selected_target_option = self.opts.get('selected_target_option', None)
if (selected_target_option is not None):
args.append(selected_target_option)
else:
args.append(self.opts.get('expr_form', 'glob'))
ping_gen = self.local.cmd_iter(*args, **self.eauth)
fret = set()
try:
for ret in ping_gen:
m = next(six.iterkeys(ret))
if (m is not None):
fret.add(m)
return (list(fret), ping_gen)
except StopIteration:
raise salt.exceptions.SaltClientError('No minions matched the target.')
|
Return a list of minions to use for the batch run
|
salt/cli/batch.py
|
__gather_minions
|
belvedere-trading/salt
| 3 |
python
|
def __gather_minions(self):
'\n \n '
args = [self.opts['tgt'], 'test.ping', [], self.opts['timeout']]
selected_target_option = self.opts.get('selected_target_option', None)
if (selected_target_option is not None):
args.append(selected_target_option)
else:
args.append(self.opts.get('expr_form', 'glob'))
ping_gen = self.local.cmd_iter(*args, **self.eauth)
fret = set()
try:
for ret in ping_gen:
m = next(six.iterkeys(ret))
if (m is not None):
fret.add(m)
return (list(fret), ping_gen)
except StopIteration:
raise salt.exceptions.SaltClientError('No minions matched the target.')
|
def __gather_minions(self):
'\n \n '
args = [self.opts['tgt'], 'test.ping', [], self.opts['timeout']]
selected_target_option = self.opts.get('selected_target_option', None)
if (selected_target_option is not None):
args.append(selected_target_option)
else:
args.append(self.opts.get('expr_form', 'glob'))
ping_gen = self.local.cmd_iter(*args, **self.eauth)
fret = set()
try:
for ret in ping_gen:
m = next(six.iterkeys(ret))
if (m is not None):
fret.add(m)
return (list(fret), ping_gen)
except StopIteration:
raise salt.exceptions.SaltClientError('No minions matched the target.')<|docstring|>Return a list of minions to use for the batch run<|endoftext|>
|
049a4987de83ef49ddab95434042d7ac10978431a71d968bbd754c2465810643
|
def get_bnum(self):
'\n Return the active number of minions to maintain\n '
partition = (lambda x: ((float(x) / 100.0) * len(self.minions)))
try:
if ('%' in self.opts['batch']):
res = partition(float(self.opts['batch'].strip('%')))
if (res < 1):
return int(math.ceil(res))
else:
return int(res)
else:
return int(self.opts['batch'])
except ValueError:
if (not self.quiet):
print_cli('Invalid batch data sent: {0}\nData must be in the form of %10, 10% or 3'.format(self.opts['batch']))
|
Return the active number of minions to maintain
|
salt/cli/batch.py
|
get_bnum
|
belvedere-trading/salt
| 3 |
python
|
def get_bnum(self):
'\n \n '
partition = (lambda x: ((float(x) / 100.0) * len(self.minions)))
try:
if ('%' in self.opts['batch']):
res = partition(float(self.opts['batch'].strip('%')))
if (res < 1):
return int(math.ceil(res))
else:
return int(res)
else:
return int(self.opts['batch'])
except ValueError:
if (not self.quiet):
print_cli('Invalid batch data sent: {0}\nData must be in the form of %10, 10% or 3'.format(self.opts['batch']))
|
def get_bnum(self):
'\n \n '
partition = (lambda x: ((float(x) / 100.0) * len(self.minions)))
try:
if ('%' in self.opts['batch']):
res = partition(float(self.opts['batch'].strip('%')))
if (res < 1):
return int(math.ceil(res))
else:
return int(res)
else:
return int(self.opts['batch'])
except ValueError:
if (not self.quiet):
print_cli('Invalid batch data sent: {0}\nData must be in the form of %10, 10% or 3'.format(self.opts['batch']))<|docstring|>Return the active number of minions to maintain<|endoftext|>
|
7262e1a9cea311df32c3762898dd76d4cedf1b5d20c5571036ea64e175adcc25
|
def run(self):
'\n Execute the batch run\n '
args = [[], self.opts['fun'], self.opts['arg'], self.opts['timeout'], 'list']
bnum = self.get_bnum()
to_run = copy.deepcopy(self.minions)
active = []
ret = {}
iters = []
minion_tracker = {}
while (len(ret) < len(self.minions)):
next_ = []
if ((len(to_run) <= bnum) and (not active)):
while to_run:
next_.append(to_run.pop())
else:
for i in range((bnum - len(active))):
if to_run:
minion_id = to_run.pop()
if isinstance(minion_id, dict):
next_.append(minion_id.keys()[0])
else:
next_.append(minion_id)
active += next_
args[0] = next_
if next_:
if (not self.quiet):
print_cli('\nExecuting run on {0}\n'.format(next_))
new_iter = self.local.cmd_iter_no_block(*args, raw=self.opts.get('raw', False), ret=self.opts.get('return', ''), **self.eauth)
iters.append(new_iter)
minion_tracker[new_iter] = {}
minion_tracker[new_iter]['minions'] = next_
minion_tracker[new_iter]['active'] = True
else:
time.sleep(0.02)
parts = {}
for ping_ret in self.ping_gen:
if (ping_ret is None):
break
m = next(ping_ret.iterkeys())
if (m not in self.minions):
self.minions.append(m)
to_run.append(m)
for queue in iters:
try:
ncnt = 0
while True:
part = next(queue)
if (part is None):
time.sleep(0.01)
ncnt += 1
if (ncnt > 5):
break
continue
if self.opts.get('raw'):
parts.update({part['id']: part})
minion_tracker[queue]['minions'].remove(part['id'])
else:
parts.update(part)
for id in part.keys():
minion_tracker[queue]['minions'].remove(id)
except StopIteration:
if (queue in minion_tracker):
minion_tracker[queue]['active'] = False
for minion in minion_tracker[queue]['minions']:
if (minion not in parts):
parts[minion] = {}
parts[minion]['ret'] = {}
for (minion, data) in six.iteritems(parts):
if (minion in active):
active.remove(minion)
if self.opts.get('raw'):
(yield data)
else:
ret[minion] = data['ret']
(yield {minion: data['ret']})
if (not self.quiet):
ret[minion] = data['ret']
data[minion] = data.pop('ret')
if ('out' in data):
out = data.pop('out')
else:
out = None
salt.output.display_output(data, out, self.opts)
for queue in minion_tracker:
if ((not minion_tracker[queue]['active']) and (queue in iters)):
iters.remove(queue)
for minion in minion_tracker[queue]['minions']:
if (minion in active):
active.remove(minion)
|
Execute the batch run
|
salt/cli/batch.py
|
run
|
belvedere-trading/salt
| 3 |
python
|
def run(self):
'\n \n '
args = [[], self.opts['fun'], self.opts['arg'], self.opts['timeout'], 'list']
bnum = self.get_bnum()
to_run = copy.deepcopy(self.minions)
active = []
ret = {}
iters = []
minion_tracker = {}
while (len(ret) < len(self.minions)):
next_ = []
if ((len(to_run) <= bnum) and (not active)):
while to_run:
next_.append(to_run.pop())
else:
for i in range((bnum - len(active))):
if to_run:
minion_id = to_run.pop()
if isinstance(minion_id, dict):
next_.append(minion_id.keys()[0])
else:
next_.append(minion_id)
active += next_
args[0] = next_
if next_:
if (not self.quiet):
print_cli('\nExecuting run on {0}\n'.format(next_))
new_iter = self.local.cmd_iter_no_block(*args, raw=self.opts.get('raw', False), ret=self.opts.get('return', ), **self.eauth)
iters.append(new_iter)
minion_tracker[new_iter] = {}
minion_tracker[new_iter]['minions'] = next_
minion_tracker[new_iter]['active'] = True
else:
time.sleep(0.02)
parts = {}
for ping_ret in self.ping_gen:
if (ping_ret is None):
break
m = next(ping_ret.iterkeys())
if (m not in self.minions):
self.minions.append(m)
to_run.append(m)
for queue in iters:
try:
ncnt = 0
while True:
part = next(queue)
if (part is None):
time.sleep(0.01)
ncnt += 1
if (ncnt > 5):
break
continue
if self.opts.get('raw'):
parts.update({part['id']: part})
minion_tracker[queue]['minions'].remove(part['id'])
else:
parts.update(part)
for id in part.keys():
minion_tracker[queue]['minions'].remove(id)
except StopIteration:
if (queue in minion_tracker):
minion_tracker[queue]['active'] = False
for minion in minion_tracker[queue]['minions']:
if (minion not in parts):
parts[minion] = {}
parts[minion]['ret'] = {}
for (minion, data) in six.iteritems(parts):
if (minion in active):
active.remove(minion)
if self.opts.get('raw'):
(yield data)
else:
ret[minion] = data['ret']
(yield {minion: data['ret']})
if (not self.quiet):
ret[minion] = data['ret']
data[minion] = data.pop('ret')
if ('out' in data):
out = data.pop('out')
else:
out = None
salt.output.display_output(data, out, self.opts)
for queue in minion_tracker:
if ((not minion_tracker[queue]['active']) and (queue in iters)):
iters.remove(queue)
for minion in minion_tracker[queue]['minions']:
if (minion in active):
active.remove(minion)
|
def run(self):
'\n \n '
args = [[], self.opts['fun'], self.opts['arg'], self.opts['timeout'], 'list']
bnum = self.get_bnum()
to_run = copy.deepcopy(self.minions)
active = []
ret = {}
iters = []
minion_tracker = {}
while (len(ret) < len(self.minions)):
next_ = []
if ((len(to_run) <= bnum) and (not active)):
while to_run:
next_.append(to_run.pop())
else:
for i in range((bnum - len(active))):
if to_run:
minion_id = to_run.pop()
if isinstance(minion_id, dict):
next_.append(minion_id.keys()[0])
else:
next_.append(minion_id)
active += next_
args[0] = next_
if next_:
if (not self.quiet):
print_cli('\nExecuting run on {0}\n'.format(next_))
new_iter = self.local.cmd_iter_no_block(*args, raw=self.opts.get('raw', False), ret=self.opts.get('return', ), **self.eauth)
iters.append(new_iter)
minion_tracker[new_iter] = {}
minion_tracker[new_iter]['minions'] = next_
minion_tracker[new_iter]['active'] = True
else:
time.sleep(0.02)
parts = {}
for ping_ret in self.ping_gen:
if (ping_ret is None):
break
m = next(ping_ret.iterkeys())
if (m not in self.minions):
self.minions.append(m)
to_run.append(m)
for queue in iters:
try:
ncnt = 0
while True:
part = next(queue)
if (part is None):
time.sleep(0.01)
ncnt += 1
if (ncnt > 5):
break
continue
if self.opts.get('raw'):
parts.update({part['id']: part})
minion_tracker[queue]['minions'].remove(part['id'])
else:
parts.update(part)
for id in part.keys():
minion_tracker[queue]['minions'].remove(id)
except StopIteration:
if (queue in minion_tracker):
minion_tracker[queue]['active'] = False
for minion in minion_tracker[queue]['minions']:
if (minion not in parts):
parts[minion] = {}
parts[minion]['ret'] = {}
for (minion, data) in six.iteritems(parts):
if (minion in active):
active.remove(minion)
if self.opts.get('raw'):
(yield data)
else:
ret[minion] = data['ret']
(yield {minion: data['ret']})
if (not self.quiet):
ret[minion] = data['ret']
data[minion] = data.pop('ret')
if ('out' in data):
out = data.pop('out')
else:
out = None
salt.output.display_output(data, out, self.opts)
for queue in minion_tracker:
if ((not minion_tracker[queue]['active']) and (queue in iters)):
iters.remove(queue)
for minion in minion_tracker[queue]['minions']:
if (minion in active):
active.remove(minion)<|docstring|>Execute the batch run<|endoftext|>
|
d2c78c21c894414edd8e076adb3cce9d238aff3c2c18a59aee831ce2d9e671a1
|
@property
def version(self):
'\n Sets the attribute value of version\n '
return self.__version
|
Sets the attribute value of version
|
mercadopago/config/config.py
|
version
|
mercadopago/sdk-python
| 100 |
python
|
@property
def version(self):
'\n \n '
return self.__version
|
@property
def version(self):
'\n \n '
return self.__version<|docstring|>Sets the attribute value of version<|endoftext|>
|
7abe64a71daec36ab6c327a40221267d0615d4b94f6d002c264716817f0e6082
|
@property
def user_agent(self):
'\n Sets the attribute value of user agent\n '
return self.__user_agent
|
Sets the attribute value of user agent
|
mercadopago/config/config.py
|
user_agent
|
mercadopago/sdk-python
| 100 |
python
|
@property
def user_agent(self):
'\n \n '
return self.__user_agent
|
@property
def user_agent(self):
'\n \n '
return self.__user_agent<|docstring|>Sets the attribute value of user agent<|endoftext|>
|
63c56e02203d01bc1e80da7275cbb527e74c4db60b75a649a41e798cd2a4465f
|
@property
def product_id(self):
'\n Sets the attribute value of product id\n '
return self.__product_id
|
Sets the attribute value of product id
|
mercadopago/config/config.py
|
product_id
|
mercadopago/sdk-python
| 100 |
python
|
@property
def product_id(self):
'\n \n '
return self.__product_id
|
@property
def product_id(self):
'\n \n '
return self.__product_id<|docstring|>Sets the attribute value of product id<|endoftext|>
|
b93ab0cd6d8ad7e902548823f9fcf18c22a104487a846871bdbda6870eb6a087
|
@property
def tracking_id(self):
'\n Sets the attribute value of tracking id\n '
return self.__tracking_id
|
Sets the attribute value of tracking id
|
mercadopago/config/config.py
|
tracking_id
|
mercadopago/sdk-python
| 100 |
python
|
@property
def tracking_id(self):
'\n \n '
return self.__tracking_id
|
@property
def tracking_id(self):
'\n \n '
return self.__tracking_id<|docstring|>Sets the attribute value of tracking id<|endoftext|>
|
83f5b7675a10fd4046a39f9edbf2f37a56d5649dda2b956fd4e8eb6ea1960b84
|
@property
def api_base_url(self):
'\n Sets the attribute value of api base url\n '
return self.__api_base_url
|
Sets the attribute value of api base url
|
mercadopago/config/config.py
|
api_base_url
|
mercadopago/sdk-python
| 100 |
python
|
@property
def api_base_url(self):
'\n \n '
return self.__api_base_url
|
@property
def api_base_url(self):
'\n \n '
return self.__api_base_url<|docstring|>Sets the attribute value of api base url<|endoftext|>
|
23965edca5b4f73d5b8faed8f8116ce21904cfa6fa53a4843e52abc3d9cb93a4
|
@property
def mime_json(self):
'\n Sets the attribute value of mime json\n '
return self.__mime_json
|
Sets the attribute value of mime json
|
mercadopago/config/config.py
|
mime_json
|
mercadopago/sdk-python
| 100 |
python
|
@property
def mime_json(self):
'\n \n '
return self.__mime_json
|
@property
def mime_json(self):
'\n \n '
return self.__mime_json<|docstring|>Sets the attribute value of mime json<|endoftext|>
|
17a65bb566939797083b8c979ec9782cbc8c59dbec9dcf06b391d38045856188
|
@property
def mime_form(self):
'\n Sets the attribute value of mime form\n '
return self.__mime_form
|
Sets the attribute value of mime form
|
mercadopago/config/config.py
|
mime_form
|
mercadopago/sdk-python
| 100 |
python
|
@property
def mime_form(self):
'\n \n '
return self.__mime_form
|
@property
def mime_form(self):
'\n \n '
return self.__mime_form<|docstring|>Sets the attribute value of mime form<|endoftext|>
|
01331f06f1e6efa5f547a5c81bb47baf46ca587bbd9fb41fe40f8d02fd1100a0
|
@classmethod
def filter_create_attributes(cls, network, context):
'Filter out network attributes not required for a create.'
odl_utils.try_del(network, ['status', 'subnets'])
cls._filter_unmapped_null(network, cls._UNMAPPED_KEYS)
|
Filter out network attributes not required for a create.
|
networking_odl/ml2/mech_driver.py
|
filter_create_attributes
|
gokarslan/networking-odl2
| 0 |
python
|
@classmethod
def filter_create_attributes(cls, network, context):
odl_utils.try_del(network, ['status', 'subnets'])
cls._filter_unmapped_null(network, cls._UNMAPPED_KEYS)
|
@classmethod
def filter_create_attributes(cls, network, context):
odl_utils.try_del(network, ['status', 'subnets'])
cls._filter_unmapped_null(network, cls._UNMAPPED_KEYS)<|docstring|>Filter out network attributes not required for a create.<|endoftext|>
|
4eb081855307b3d2c65078a4b13f96af7b4954ca02e2cd71d5cadabfe3ed2d01
|
@classmethod
def filter_update_attributes(cls, network, context):
'Filter out network attributes for an update operation.'
odl_utils.try_del(network, ['id', 'status', 'subnets', 'tenant_id'])
cls._filter_unmapped_null(network, cls._UNMAPPED_KEYS)
|
Filter out network attributes for an update operation.
|
networking_odl/ml2/mech_driver.py
|
filter_update_attributes
|
gokarslan/networking-odl2
| 0 |
python
|
@classmethod
def filter_update_attributes(cls, network, context):
odl_utils.try_del(network, ['id', 'status', 'subnets', 'tenant_id'])
cls._filter_unmapped_null(network, cls._UNMAPPED_KEYS)
|
@classmethod
def filter_update_attributes(cls, network, context):
odl_utils.try_del(network, ['id', 'status', 'subnets', 'tenant_id'])
cls._filter_unmapped_null(network, cls._UNMAPPED_KEYS)<|docstring|>Filter out network attributes for an update operation.<|endoftext|>
|
4e68229fea21f769a64c3d869ddb45855b5412085c80ebb25200c69e2cc466ef
|
@classmethod
def filter_create_attributes(cls, subnet, context):
'Filter out subnet attributes not required for a create.'
cls._filter_unmapped_null(subnet, cls._UNMAPPED_KEYS)
|
Filter out subnet attributes not required for a create.
|
networking_odl/ml2/mech_driver.py
|
filter_create_attributes
|
gokarslan/networking-odl2
| 0 |
python
|
@classmethod
def filter_create_attributes(cls, subnet, context):
cls._filter_unmapped_null(subnet, cls._UNMAPPED_KEYS)
|
@classmethod
def filter_create_attributes(cls, subnet, context):
cls._filter_unmapped_null(subnet, cls._UNMAPPED_KEYS)<|docstring|>Filter out subnet attributes not required for a create.<|endoftext|>
|
9a15849db7079e3b4796f9fe35629b3f0f306a0faa5baf73aa531f6d34f4811a
|
@classmethod
def filter_update_attributes(cls, subnet, context):
'Filter out subnet attributes for an update operation.'
odl_utils.try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr', 'allocation_pools', 'tenant_id'])
cls._filter_unmapped_null(subnet, cls._UNMAPPED_KEYS)
|
Filter out subnet attributes for an update operation.
|
networking_odl/ml2/mech_driver.py
|
filter_update_attributes
|
gokarslan/networking-odl2
| 0 |
python
|
@classmethod
def filter_update_attributes(cls, subnet, context):
odl_utils.try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr', 'allocation_pools', 'tenant_id'])
cls._filter_unmapped_null(subnet, cls._UNMAPPED_KEYS)
|
@classmethod
def filter_update_attributes(cls, subnet, context):
odl_utils.try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr', 'allocation_pools', 'tenant_id'])
cls._filter_unmapped_null(subnet, cls._UNMAPPED_KEYS)<|docstring|>Filter out subnet attributes for an update operation.<|endoftext|>
|
5b8cb39b3112d4c31580820fc8a891a9381447b51fb15d6d9c99999c1d3e1c04
|
@staticmethod
def _add_security_groups(port, context):
"Populate the 'security_groups' field with entire records."
dbcontext = context._plugin_context
groups = [context._plugin.get_security_group(dbcontext, sg) for sg in port['security_groups']]
port['security_groups'] = groups
|
Populate the 'security_groups' field with entire records.
|
networking_odl/ml2/mech_driver.py
|
_add_security_groups
|
gokarslan/networking-odl2
| 0 |
python
|
@staticmethod
def _add_security_groups(port, context):
dbcontext = context._plugin_context
groups = [context._plugin.get_security_group(dbcontext, sg) for sg in port['security_groups']]
port['security_groups'] = groups
|
@staticmethod
def _add_security_groups(port, context):
dbcontext = context._plugin_context
groups = [context._plugin.get_security_group(dbcontext, sg) for sg in port['security_groups']]
port['security_groups'] = groups<|docstring|>Populate the 'security_groups' field with entire records.<|endoftext|>
|
5034ae4f389fe38f63ebaa758db027a6a629354655568cb08c5303bbbb0b61cd
|
@classmethod
def _fixup_allowed_ipaddress_pairs(cls, allowed_address_pairs):
'unify (ip address or network address) into network address'
for address_pair in allowed_address_pairs:
ip_address = address_pair['ip_address']
network_address = str(netaddr.IPNetwork(ip_address))
address_pair['ip_address'] = network_address
|
unify (ip address or network address) into network address
|
networking_odl/ml2/mech_driver.py
|
_fixup_allowed_ipaddress_pairs
|
gokarslan/networking-odl2
| 0 |
python
|
@classmethod
def _fixup_allowed_ipaddress_pairs(cls, allowed_address_pairs):
for address_pair in allowed_address_pairs:
ip_address = address_pair['ip_address']
network_address = str(netaddr.IPNetwork(ip_address))
address_pair['ip_address'] = network_address
|
@classmethod
def _fixup_allowed_ipaddress_pairs(cls, allowed_address_pairs):
for address_pair in allowed_address_pairs:
ip_address = address_pair['ip_address']
network_address = str(netaddr.IPNetwork(ip_address))
address_pair['ip_address'] = network_address<|docstring|>unify (ip address or network address) into network address<|endoftext|>
|
a74295e843628c8d17d1a123a1a92dd1736864485236e6479fb444394c67b7df
|
@classmethod
def filter_create_attributes(cls, port, context):
'Filter out port attributes not required for a create.'
cls._add_security_groups(port, context)
cls._fixup_allowed_ipaddress_pairs(port[addr_apidef.ADDRESS_PAIRS])
cls._filter_unmapped_null(port, cls._UNMAPPED_KEYS)
odl_utils.try_del(port, ['status'])
if (port['tenant_id'] == ''):
LOG.debug('empty string was passed for tenant_id: %s(port)', port)
port['tenant_id'] = context._network_context._network['tenant_id']
|
Filter out port attributes not required for a create.
|
networking_odl/ml2/mech_driver.py
|
filter_create_attributes
|
gokarslan/networking-odl2
| 0 |
python
|
@classmethod
def filter_create_attributes(cls, port, context):
cls._add_security_groups(port, context)
cls._fixup_allowed_ipaddress_pairs(port[addr_apidef.ADDRESS_PAIRS])
cls._filter_unmapped_null(port, cls._UNMAPPED_KEYS)
odl_utils.try_del(port, ['status'])
if (port['tenant_id'] == ):
LOG.debug('empty string was passed for tenant_id: %s(port)', port)
port['tenant_id'] = context._network_context._network['tenant_id']
|
@classmethod
def filter_create_attributes(cls, port, context):
cls._add_security_groups(port, context)
cls._fixup_allowed_ipaddress_pairs(port[addr_apidef.ADDRESS_PAIRS])
cls._filter_unmapped_null(port, cls._UNMAPPED_KEYS)
odl_utils.try_del(port, ['status'])
if (port['tenant_id'] == ):
LOG.debug('empty string was passed for tenant_id: %s(port)', port)
port['tenant_id'] = context._network_context._network['tenant_id']<|docstring|>Filter out port attributes not required for a create.<|endoftext|>
|
05007d1cda65b1f294e31bb12d72f281ffd77c1577a9fd2017660bbd45c745fc
|
@classmethod
def filter_update_attributes(cls, port, context):
'Filter out port attributes for an update operation.'
cls._add_security_groups(port, context)
cls._fixup_allowed_ipaddress_pairs(port[addr_apidef.ADDRESS_PAIRS])
cls._filter_unmapped_null(port, cls._UNMAPPED_KEYS)
odl_utils.try_del(port, ['network_id', 'id', 'status', 'tenant_id'])
|
Filter out port attributes for an update operation.
|
networking_odl/ml2/mech_driver.py
|
filter_update_attributes
|
gokarslan/networking-odl2
| 0 |
python
|
@classmethod
def filter_update_attributes(cls, port, context):
cls._add_security_groups(port, context)
cls._fixup_allowed_ipaddress_pairs(port[addr_apidef.ADDRESS_PAIRS])
cls._filter_unmapped_null(port, cls._UNMAPPED_KEYS)
odl_utils.try_del(port, ['network_id', 'id', 'status', 'tenant_id'])
|
@classmethod
def filter_update_attributes(cls, port, context):
cls._add_security_groups(port, context)
cls._fixup_allowed_ipaddress_pairs(port[addr_apidef.ADDRESS_PAIRS])
cls._filter_unmapped_null(port, cls._UNMAPPED_KEYS)
odl_utils.try_del(port, ['network_id', 'id', 'status', 'tenant_id'])<|docstring|>Filter out port attributes for an update operation.<|endoftext|>
|
a3b185267153c5290e4f676dd8d4897967568404c09998b776a9c3fab811560d
|
@staticmethod
def filter_create_attributes(sg, context):
'Filter out security-group attributes not required for a create.'
pass
|
Filter out security-group attributes not required for a create.
|
networking_odl/ml2/mech_driver.py
|
filter_create_attributes
|
gokarslan/networking-odl2
| 0 |
python
|
@staticmethod
def filter_create_attributes(sg, context):
pass
|
@staticmethod
def filter_create_attributes(sg, context):
pass<|docstring|>Filter out security-group attributes not required for a create.<|endoftext|>
|
73ea8a71c5c2d1fb6d4bdcb8bade549a322f11fc7679e8e4b6b6406b718dc60d
|
@staticmethod
def filter_update_attributes(sg, context):
'Filter out security-group attributes for an update operation.'
pass
|
Filter out security-group attributes for an update operation.
|
networking_odl/ml2/mech_driver.py
|
filter_update_attributes
|
gokarslan/networking-odl2
| 0 |
python
|
@staticmethod
def filter_update_attributes(sg, context):
pass
|
@staticmethod
def filter_update_attributes(sg, context):
pass<|docstring|>Filter out security-group attributes for an update operation.<|endoftext|>
|
bcf764df41173aa519a25744a2d824bf9f03f27a1a42eb981a4e7df8daac5339
|
@staticmethod
def filter_create_attributes(sg_rule, context):
'Filter out sg-rule attributes not required for a create.'
filters.filter_security_group_rule(sg_rule)
|
Filter out sg-rule attributes not required for a create.
|
networking_odl/ml2/mech_driver.py
|
filter_create_attributes
|
gokarslan/networking-odl2
| 0 |
python
|
@staticmethod
def filter_create_attributes(sg_rule, context):
filters.filter_security_group_rule(sg_rule)
|
@staticmethod
def filter_create_attributes(sg_rule, context):
filters.filter_security_group_rule(sg_rule)<|docstring|>Filter out sg-rule attributes not required for a create.<|endoftext|>
|
3716cec40d67121ff9d4d5b85af82d6df81d1a362aee8f9ab3e0cb22eca745a0
|
@staticmethod
def filter_update_attributes(sg_rule, context):
'Filter out sg-rule attributes for an update operation.'
filters.filter_security_group_rule(sg_rule)
|
Filter out sg-rule attributes for an update operation.
|
networking_odl/ml2/mech_driver.py
|
filter_update_attributes
|
gokarslan/networking-odl2
| 0 |
python
|
@staticmethod
def filter_update_attributes(sg_rule, context):
filters.filter_security_group_rule(sg_rule)
|
@staticmethod
def filter_update_attributes(sg_rule, context):
filters.filter_security_group_rule(sg_rule)<|docstring|>Filter out sg-rule attributes for an update operation.<|endoftext|>
|
9eda935d210b71f4219f192fee9d137a62b4f7590039b7646a37b8c09d0b2476
|
def synchronize(self, operation, object_type, context):
'Synchronize ODL with Neutron following a configuration change.'
if self.out_of_sync:
self.sync_full(context._plugin)
if (operation in [odl_const.ODL_UPDATE, odl_const.ODL_DELETE]):
self.sync_single_resource(operation, object_type, context)
else:
self.sync_single_resource(operation, object_type, context)
|
Synchronize ODL with Neutron following a configuration change.
|
networking_odl/ml2/mech_driver.py
|
synchronize
|
gokarslan/networking-odl2
| 0 |
python
|
def synchronize(self, operation, object_type, context):
if self.out_of_sync:
self.sync_full(context._plugin)
if (operation in [odl_const.ODL_UPDATE, odl_const.ODL_DELETE]):
self.sync_single_resource(operation, object_type, context)
else:
self.sync_single_resource(operation, object_type, context)
|
def synchronize(self, operation, object_type, context):
if self.out_of_sync:
self.sync_full(context._plugin)
if (operation in [odl_const.ODL_UPDATE, odl_const.ODL_DELETE]):
self.sync_single_resource(operation, object_type, context)
else:
self.sync_single_resource(operation, object_type, context)<|docstring|>Synchronize ODL with Neutron following a configuration change.<|endoftext|>
|
cf2826bb0a80aba57cbc643dc6c7eb64f88480874a2c6883acd6af42cdf14277
|
def sync_resources(self, plugin, dbcontext, collection_name):
'Sync objects from Neutron over to OpenDaylight.\n\n This will handle syncing networks, subnets, and ports from Neutron to\n OpenDaylight. It also filters out the requisite items which are not\n valid for create API operations.\n '
filter_cls = self.FILTER_MAP[collection_name]
to_be_synced = []
obj_getter = getattr(plugin, ('get_%s' % collection_name))
if (collection_name == odl_const.ODL_SGS):
resources = obj_getter(dbcontext, default_sg=True)
else:
resources = obj_getter(dbcontext)
for resource in resources:
try:
collection_name_url = odl_utils.neutronify(collection_name)
urlpath = ((collection_name_url + '/') + resource['id'])
self.client.sendjson('get', urlpath, None)
except requests.exceptions.HTTPError as e:
with excutils.save_and_reraise_exception() as ctx:
if (e.response.status_code == requests.codes.not_found):
filter_cls.filter_create_attributes_with_plugin(resource, plugin, dbcontext)
to_be_synced.append(resource)
ctx.reraise = False
else:
pass
if to_be_synced:
key = (collection_name[:(- 1)] if (len(to_be_synced) == 1) else collection_name)
collection_name_url = odl_utils.neutronify(collection_name)
self.client.sendjson('post', collection_name_url, {key: to_be_synced})
|
Sync objects from Neutron over to OpenDaylight.
This will handle syncing networks, subnets, and ports from Neutron to
OpenDaylight. It also filters out the requisite items which are not
valid for create API operations.
|
networking_odl/ml2/mech_driver.py
|
sync_resources
|
gokarslan/networking-odl2
| 0 |
python
|
def sync_resources(self, plugin, dbcontext, collection_name):
'Sync objects from Neutron over to OpenDaylight.\n\n This will handle syncing networks, subnets, and ports from Neutron to\n OpenDaylight. It also filters out the requisite items which are not\n valid for create API operations.\n '
filter_cls = self.FILTER_MAP[collection_name]
to_be_synced = []
obj_getter = getattr(plugin, ('get_%s' % collection_name))
if (collection_name == odl_const.ODL_SGS):
resources = obj_getter(dbcontext, default_sg=True)
else:
resources = obj_getter(dbcontext)
for resource in resources:
try:
collection_name_url = odl_utils.neutronify(collection_name)
urlpath = ((collection_name_url + '/') + resource['id'])
self.client.sendjson('get', urlpath, None)
except requests.exceptions.HTTPError as e:
with excutils.save_and_reraise_exception() as ctx:
if (e.response.status_code == requests.codes.not_found):
filter_cls.filter_create_attributes_with_plugin(resource, plugin, dbcontext)
to_be_synced.append(resource)
ctx.reraise = False
else:
pass
if to_be_synced:
key = (collection_name[:(- 1)] if (len(to_be_synced) == 1) else collection_name)
collection_name_url = odl_utils.neutronify(collection_name)
self.client.sendjson('post', collection_name_url, {key: to_be_synced})
|
def sync_resources(self, plugin, dbcontext, collection_name):
'Sync objects from Neutron over to OpenDaylight.\n\n This will handle syncing networks, subnets, and ports from Neutron to\n OpenDaylight. It also filters out the requisite items which are not\n valid for create API operations.\n '
filter_cls = self.FILTER_MAP[collection_name]
to_be_synced = []
obj_getter = getattr(plugin, ('get_%s' % collection_name))
if (collection_name == odl_const.ODL_SGS):
resources = obj_getter(dbcontext, default_sg=True)
else:
resources = obj_getter(dbcontext)
for resource in resources:
try:
collection_name_url = odl_utils.neutronify(collection_name)
urlpath = ((collection_name_url + '/') + resource['id'])
self.client.sendjson('get', urlpath, None)
except requests.exceptions.HTTPError as e:
with excutils.save_and_reraise_exception() as ctx:
if (e.response.status_code == requests.codes.not_found):
filter_cls.filter_create_attributes_with_plugin(resource, plugin, dbcontext)
to_be_synced.append(resource)
ctx.reraise = False
else:
pass
if to_be_synced:
key = (collection_name[:(- 1)] if (len(to_be_synced) == 1) else collection_name)
collection_name_url = odl_utils.neutronify(collection_name)
self.client.sendjson('post', collection_name_url, {key: to_be_synced})<|docstring|>Sync objects from Neutron over to OpenDaylight.
This will handle syncing networks, subnets, and ports from Neutron to
OpenDaylight. It also filters out the requisite items which are not
valid for create API operations.<|endoftext|>
|
92757af3305f47b06004e24db8f4e9f50dd9a56c9a1d837878bd5a9caeddde53
|
@runtime.synchronized('odl-sync-full')
def sync_full(self, plugin):
'Resync the entire database to ODL.\n\n Transition to the in-sync state on success.\n Note: we only allow a single thread in here at a time.\n '
if (not self.out_of_sync):
return
dbcontext = neutron_context.get_admin_context()
for collection_name in [odl_const.ODL_SGS, odl_const.ODL_SG_RULES, odl_const.ODL_NETWORKS, odl_const.ODL_SUBNETS, odl_const.ODL_PORTS]:
self.sync_resources(plugin, dbcontext, collection_name)
self.out_of_sync = False
|
Resync the entire database to ODL.
Transition to the in-sync state on success.
Note: we only allow a single thread in here at a time.
|
networking_odl/ml2/mech_driver.py
|
sync_full
|
gokarslan/networking-odl2
| 0 |
python
|
@runtime.synchronized('odl-sync-full')
def sync_full(self, plugin):
'Resync the entire database to ODL.\n\n Transition to the in-sync state on success.\n Note: we only allow a single thread in here at a time.\n '
if (not self.out_of_sync):
return
dbcontext = neutron_context.get_admin_context()
for collection_name in [odl_const.ODL_SGS, odl_const.ODL_SG_RULES, odl_const.ODL_NETWORKS, odl_const.ODL_SUBNETS, odl_const.ODL_PORTS]:
self.sync_resources(plugin, dbcontext, collection_name)
self.out_of_sync = False
|
@runtime.synchronized('odl-sync-full')
def sync_full(self, plugin):
'Resync the entire database to ODL.\n\n Transition to the in-sync state on success.\n Note: we only allow a single thread in here at a time.\n '
if (not self.out_of_sync):
return
dbcontext = neutron_context.get_admin_context()
for collection_name in [odl_const.ODL_SGS, odl_const.ODL_SG_RULES, odl_const.ODL_NETWORKS, odl_const.ODL_SUBNETS, odl_const.ODL_PORTS]:
self.sync_resources(plugin, dbcontext, collection_name)
self.out_of_sync = False<|docstring|>Resync the entire database to ODL.
Transition to the in-sync state on success.
Note: we only allow a single thread in here at a time.<|endoftext|>
|
742397cb8b8f7979a6eb3c5995543a9b56fef446db6f697e274555205a8730d2
|
def sync_single_resource(self, operation, object_type, context):
'Sync over a single resource from Neutron to OpenDaylight.\n\n Handle syncing a single operation over to OpenDaylight, and correctly\n filter attributes out which are not required for the requisite\n operation (create or update) being handled.\n '
object_type_url = odl_utils.neutronify(object_type)
try:
obj_id = context.current['id']
if (operation == odl_const.ODL_DELETE):
self.out_of_sync |= (not self.client.try_delete(((object_type_url + '/') + obj_id)))
else:
filter_cls = self.FILTER_MAP[object_type]
if (operation == odl_const.ODL_CREATE):
urlpath = object_type_url
method = 'post'
attr_filter = filter_cls.filter_create_attributes
elif (operation == odl_const.ODL_UPDATE):
urlpath = ((object_type_url + '/') + obj_id)
method = 'put'
attr_filter = filter_cls.filter_update_attributes
resource = copy.deepcopy(context.current)
attr_filter(resource, context)
self.client.sendjson(method, urlpath, {object_type_url[:(- 1)]: resource})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('Unable to perform %(operation)s on %(object_type)s %(object_id)s', {'operation': operation, 'object_type': object_type, 'object_id': obj_id})
self.out_of_sync = True
|
Sync over a single resource from Neutron to OpenDaylight.
Handle syncing a single operation over to OpenDaylight, and correctly
filter attributes out which are not required for the requisite
operation (create or update) being handled.
|
networking_odl/ml2/mech_driver.py
|
sync_single_resource
|
gokarslan/networking-odl2
| 0 |
python
|
def sync_single_resource(self, operation, object_type, context):
'Sync over a single resource from Neutron to OpenDaylight.\n\n Handle syncing a single operation over to OpenDaylight, and correctly\n filter attributes out which are not required for the requisite\n operation (create or update) being handled.\n '
object_type_url = odl_utils.neutronify(object_type)
try:
obj_id = context.current['id']
if (operation == odl_const.ODL_DELETE):
self.out_of_sync |= (not self.client.try_delete(((object_type_url + '/') + obj_id)))
else:
filter_cls = self.FILTER_MAP[object_type]
if (operation == odl_const.ODL_CREATE):
urlpath = object_type_url
method = 'post'
attr_filter = filter_cls.filter_create_attributes
elif (operation == odl_const.ODL_UPDATE):
urlpath = ((object_type_url + '/') + obj_id)
method = 'put'
attr_filter = filter_cls.filter_update_attributes
resource = copy.deepcopy(context.current)
attr_filter(resource, context)
self.client.sendjson(method, urlpath, {object_type_url[:(- 1)]: resource})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('Unable to perform %(operation)s on %(object_type)s %(object_id)s', {'operation': operation, 'object_type': object_type, 'object_id': obj_id})
self.out_of_sync = True
|
def sync_single_resource(self, operation, object_type, context):
'Sync over a single resource from Neutron to OpenDaylight.\n\n Handle syncing a single operation over to OpenDaylight, and correctly\n filter attributes out which are not required for the requisite\n operation (create or update) being handled.\n '
object_type_url = odl_utils.neutronify(object_type)
try:
obj_id = context.current['id']
if (operation == odl_const.ODL_DELETE):
self.out_of_sync |= (not self.client.try_delete(((object_type_url + '/') + obj_id)))
else:
filter_cls = self.FILTER_MAP[object_type]
if (operation == odl_const.ODL_CREATE):
urlpath = object_type_url
method = 'post'
attr_filter = filter_cls.filter_create_attributes
elif (operation == odl_const.ODL_UPDATE):
urlpath = ((object_type_url + '/') + obj_id)
method = 'put'
attr_filter = filter_cls.filter_update_attributes
resource = copy.deepcopy(context.current)
attr_filter(resource, context)
self.client.sendjson(method, urlpath, {object_type_url[:(- 1)]: resource})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('Unable to perform %(operation)s on %(object_type)s %(object_id)s', {'operation': operation, 'object_type': object_type, 'object_id': obj_id})
self.out_of_sync = True<|docstring|>Sync over a single resource from Neutron to OpenDaylight.
Handle syncing a single operation over to OpenDaylight, and correctly
filter attributes out which are not required for the requisite
operation (create or update) being handled.<|endoftext|>
|
6f58516908296ab0629c5d22aff05c7df5f3f01ae220d269a009c955d60b1785
|
def bind_port(self, port_context):
'Set binding for a valid segments\n\n '
self.port_binding_controller.bind_port(port_context)
|
Set binding for a valid segments
|
networking_odl/ml2/mech_driver.py
|
bind_port
|
gokarslan/networking-odl2
| 0 |
python
|
def bind_port(self, port_context):
'\n\n '
self.port_binding_controller.bind_port(port_context)
|
def bind_port(self, port_context):
'\n\n '
self.port_binding_controller.bind_port(port_context)<|docstring|>Set binding for a valid segments<|endoftext|>
|
c0b862eb53523ac7f0d3286cb85636e240f785df02d079098a779489138ddbf3
|
def check_vlan_transparency(self, context):
'Check VLAN transparency\n\n '
VLAN_TRANSPARENT_NETWORK_TYPES = [p_const.TYPE_VXLAN]
network = context.current
if (providernet.NETWORK_TYPE in network):
return (network[providernet.NETWORK_TYPE] in VLAN_TRANSPARENT_NETWORK_TYPES)
segments = network.get(mpnet.SEGMENTS)
if (segments is None):
return True
return all(((segment[providernet.NETWORK_TYPE] in VLAN_TRANSPARENT_NETWORK_TYPES) for segment in segments))
|
Check VLAN transparency
|
networking_odl/ml2/mech_driver.py
|
check_vlan_transparency
|
gokarslan/networking-odl2
| 0 |
python
|
def check_vlan_transparency(self, context):
'\n\n '
VLAN_TRANSPARENT_NETWORK_TYPES = [p_const.TYPE_VXLAN]
network = context.current
if (providernet.NETWORK_TYPE in network):
return (network[providernet.NETWORK_TYPE] in VLAN_TRANSPARENT_NETWORK_TYPES)
segments = network.get(mpnet.SEGMENTS)
if (segments is None):
return True
return all(((segment[providernet.NETWORK_TYPE] in VLAN_TRANSPARENT_NETWORK_TYPES) for segment in segments))
|
def check_vlan_transparency(self, context):
'\n\n '
VLAN_TRANSPARENT_NETWORK_TYPES = [p_const.TYPE_VXLAN]
network = context.current
if (providernet.NETWORK_TYPE in network):
return (network[providernet.NETWORK_TYPE] in VLAN_TRANSPARENT_NETWORK_TYPES)
segments = network.get(mpnet.SEGMENTS)
if (segments is None):
return True
return all(((segment[providernet.NETWORK_TYPE] in VLAN_TRANSPARENT_NETWORK_TYPES) for segment in segments))<|docstring|>Check VLAN transparency<|endoftext|>
|
f7344443ffd67771fcb651f6c0bc8dd0a619e6bffa634a8fed2838c7c61b8f4c
|
def setUp(self):
'Set up test fixtures'
self.options = {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': 2, 'p': 2}
self.safe_bounds = (np.array([(- 5), (- 5)]), np.array([5, 5]))
self.optimizer = LocalBestPSO(10, 2, options=self.options)
|
Set up test fixtures
|
tests/optimizers/test_local_best.py
|
setUp
|
ahcantao/pyswarms
| 0 |
python
|
def setUp(self):
self.options = {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': 2, 'p': 2}
self.safe_bounds = (np.array([(- 5), (- 5)]), np.array([5, 5]))
self.optimizer = LocalBestPSO(10, 2, options=self.options)
|
def setUp(self):
self.options = {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': 2, 'p': 2}
self.safe_bounds = (np.array([(- 5), (- 5)]), np.array([5, 5]))
self.optimizer = LocalBestPSO(10, 2, options=self.options)<|docstring|>Set up test fixtures<|endoftext|>
|
96c70d2963c45981f275d89ebf0c775614a66c08859820fbdd41c2445043cb24
|
def test_keyword_check_fail(self):
'Tests if exceptions are thrown when keywords are missing'
check_c1 = {'c2': 0.7, 'w': 0.5, 'k': 2, 'p': 2}
check_c2 = {'c1': 0.5, 'w': 0.5, 'k': 2, 'p': 2}
check_m = {'c1': 0.5, 'c2': 0.7, 'k': 2, 'p': 2}
check_k = {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'p': 2}
check_p = {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': 2}
with self.assertRaises(KeyError):
optimizer = LocalBestPSO(5, 2, options=check_c1)
with self.assertRaises(KeyError):
optimizer = LocalBestPSO(5, 2, options=check_c2)
with self.assertRaises(KeyError):
optimizer = LocalBestPSO(5, 2, options=check_m)
with self.assertRaises(KeyError):
optimizer = LocalBestPSO(5, 2, options=check_k)
with self.assertRaises(KeyError):
optimizer = LocalBestPSO(5, 2, options=check_p)
|
Tests if exceptions are thrown when keywords are missing
|
tests/optimizers/test_local_best.py
|
test_keyword_check_fail
|
ahcantao/pyswarms
| 0 |
python
|
def test_keyword_check_fail(self):
check_c1 = {'c2': 0.7, 'w': 0.5, 'k': 2, 'p': 2}
check_c2 = {'c1': 0.5, 'w': 0.5, 'k': 2, 'p': 2}
check_m = {'c1': 0.5, 'c2': 0.7, 'k': 2, 'p': 2}
check_k = {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'p': 2}
check_p = {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': 2}
with self.assertRaises(KeyError):
optimizer = LocalBestPSO(5, 2, options=check_c1)
with self.assertRaises(KeyError):
optimizer = LocalBestPSO(5, 2, options=check_c2)
with self.assertRaises(KeyError):
optimizer = LocalBestPSO(5, 2, options=check_m)
with self.assertRaises(KeyError):
optimizer = LocalBestPSO(5, 2, options=check_k)
with self.assertRaises(KeyError):
optimizer = LocalBestPSO(5, 2, options=check_p)
|
def test_keyword_check_fail(self):
check_c1 = {'c2': 0.7, 'w': 0.5, 'k': 2, 'p': 2}
check_c2 = {'c1': 0.5, 'w': 0.5, 'k': 2, 'p': 2}
check_m = {'c1': 0.5, 'c2': 0.7, 'k': 2, 'p': 2}
check_k = {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'p': 2}
check_p = {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': 2}
with self.assertRaises(KeyError):
optimizer = LocalBestPSO(5, 2, options=check_c1)
with self.assertRaises(KeyError):
optimizer = LocalBestPSO(5, 2, options=check_c2)
with self.assertRaises(KeyError):
optimizer = LocalBestPSO(5, 2, options=check_m)
with self.assertRaises(KeyError):
optimizer = LocalBestPSO(5, 2, options=check_k)
with self.assertRaises(KeyError):
optimizer = LocalBestPSO(5, 2, options=check_p)<|docstring|>Tests if exceptions are thrown when keywords are missing<|endoftext|>
|
ce500ec09d57e02b2bc8fbb1cac8c604c4df52747ceeca2468e9983263a13003
|
def test_bound_size_fail(self):
'Tests if exception is thrown when bound length is not 2'
bounds = tuple(np.array([(- 5), (- 5)]))
with self.assertRaises(IndexError):
optimizer = LocalBestPSO(5, 2, bounds=bounds, options=self.options)
|
Tests if exception is thrown when bound length is not 2
|
tests/optimizers/test_local_best.py
|
test_bound_size_fail
|
ahcantao/pyswarms
| 0 |
python
|
def test_bound_size_fail(self):
bounds = tuple(np.array([(- 5), (- 5)]))
with self.assertRaises(IndexError):
optimizer = LocalBestPSO(5, 2, bounds=bounds, options=self.options)
|
def test_bound_size_fail(self):
bounds = tuple(np.array([(- 5), (- 5)]))
with self.assertRaises(IndexError):
optimizer = LocalBestPSO(5, 2, bounds=bounds, options=self.options)<|docstring|>Tests if exception is thrown when bound length is not 2<|endoftext|>
|
aefadaa1a03004543334cc320acdce92272e25ab61a888940d4072cde6670cbc
|
def test_bound_type_fail(self):
'Tests if exception is thrown when bound type is not tuple'
bounds = [np.array([(- 5), (- 5)]), np.array([5, 5])]
with self.assertRaises(TypeError):
optimizer = LocalBestPSO(5, 2, bounds=bounds, options=self.options)
|
Tests if exception is thrown when bound type is not tuple
|
tests/optimizers/test_local_best.py
|
test_bound_type_fail
|
ahcantao/pyswarms
| 0 |
python
|
def test_bound_type_fail(self):
bounds = [np.array([(- 5), (- 5)]), np.array([5, 5])]
with self.assertRaises(TypeError):
optimizer = LocalBestPSO(5, 2, bounds=bounds, options=self.options)
|
def test_bound_type_fail(self):
bounds = [np.array([(- 5), (- 5)]), np.array([5, 5])]
with self.assertRaises(TypeError):
optimizer = LocalBestPSO(5, 2, bounds=bounds, options=self.options)<|docstring|>Tests if exception is thrown when bound type is not tuple<|endoftext|>
|
211dcc619425fe6b77b56002f0c6955a6855158c4170455fdb5064c3b011a351
|
def test_bound_maxmin_fail(self):
'Tests if exception is thrown when min max of the bound is\n wrong.'
bounds_1 = (np.array([5, 5]), np.array([(- 5), (- 5)]))
bounds_2 = (np.array([5, (- 5)]), np.array([(- 5), 5]))
with self.assertRaises(ValueError):
optimizer = LocalBestPSO(5, 2, bounds=bounds_1, options=self.options)
with self.assertRaises(ValueError):
optimizer = LocalBestPSO(5, 2, bounds=bounds_2, options=self.options)
|
Tests if exception is thrown when min max of the bound is
wrong.
|
tests/optimizers/test_local_best.py
|
test_bound_maxmin_fail
|
ahcantao/pyswarms
| 0 |
python
|
def test_bound_maxmin_fail(self):
'Tests if exception is thrown when min max of the bound is\n wrong.'
bounds_1 = (np.array([5, 5]), np.array([(- 5), (- 5)]))
bounds_2 = (np.array([5, (- 5)]), np.array([(- 5), 5]))
with self.assertRaises(ValueError):
optimizer = LocalBestPSO(5, 2, bounds=bounds_1, options=self.options)
with self.assertRaises(ValueError):
optimizer = LocalBestPSO(5, 2, bounds=bounds_2, options=self.options)
|
def test_bound_maxmin_fail(self):
'Tests if exception is thrown when min max of the bound is\n wrong.'
bounds_1 = (np.array([5, 5]), np.array([(- 5), (- 5)]))
bounds_2 = (np.array([5, (- 5)]), np.array([(- 5), 5]))
with self.assertRaises(ValueError):
optimizer = LocalBestPSO(5, 2, bounds=bounds_1, options=self.options)
with self.assertRaises(ValueError):
optimizer = LocalBestPSO(5, 2, bounds=bounds_2, options=self.options)<|docstring|>Tests if exception is thrown when min max of the bound is
wrong.<|endoftext|>
|
f8ed45ae329f8d1e3bb705ce0d23a4807a7d963dc1c6e2936532f814328be5b7
|
def test_bound_shapes_fail(self):
'Tests if exception is thrown when bounds are of unequal\n shapes.'
bounds = (np.array([(- 5), (- 5), (- 5)]), np.array([5, 5]))
with self.assertRaises(IndexError):
optimizer = LocalBestPSO(5, 2, bounds=bounds, options=self.options)
|
Tests if exception is thrown when bounds are of unequal
shapes.
|
tests/optimizers/test_local_best.py
|
test_bound_shapes_fail
|
ahcantao/pyswarms
| 0 |
python
|
def test_bound_shapes_fail(self):
'Tests if exception is thrown when bounds are of unequal\n shapes.'
bounds = (np.array([(- 5), (- 5), (- 5)]), np.array([5, 5]))
with self.assertRaises(IndexError):
optimizer = LocalBestPSO(5, 2, bounds=bounds, options=self.options)
|
def test_bound_shapes_fail(self):
'Tests if exception is thrown when bounds are of unequal\n shapes.'
bounds = (np.array([(- 5), (- 5), (- 5)]), np.array([5, 5]))
with self.assertRaises(IndexError):
optimizer = LocalBestPSO(5, 2, bounds=bounds, options=self.options)<|docstring|>Tests if exception is thrown when bounds are of unequal
shapes.<|endoftext|>
|
315188ad65f16eb34289b8384e9e5d2b2aef2f709c3ebed39ad566702c6b96c3
|
def test_bound_shape_dims_fail(self):
'Tests if exception is thrown when bound shape is not equal\n to dimensions.'
bounds = (np.array([(- 5), (- 5), (- 5)]), np.array([5, 5, 5]))
with self.assertRaises(IndexError):
optimizer = LocalBestPSO(5, 2, bounds=bounds, options=self.options)
|
Tests if exception is thrown when bound shape is not equal
to dimensions.
|
tests/optimizers/test_local_best.py
|
test_bound_shape_dims_fail
|
ahcantao/pyswarms
| 0 |
python
|
def test_bound_shape_dims_fail(self):
'Tests if exception is thrown when bound shape is not equal\n to dimensions.'
bounds = (np.array([(- 5), (- 5), (- 5)]), np.array([5, 5, 5]))
with self.assertRaises(IndexError):
optimizer = LocalBestPSO(5, 2, bounds=bounds, options=self.options)
|
def test_bound_shape_dims_fail(self):
'Tests if exception is thrown when bound shape is not equal\n to dimensions.'
bounds = (np.array([(- 5), (- 5), (- 5)]), np.array([5, 5, 5]))
with self.assertRaises(IndexError):
optimizer = LocalBestPSO(5, 2, bounds=bounds, options=self.options)<|docstring|>Tests if exception is thrown when bound shape is not equal
to dimensions.<|endoftext|>
|
539b26db3cb9ed67529aef0deb1e3b9788c6c832e62744568bb8303d03b949b5
|
def test_k_fail(self):
'Tests if exception is thrown when feeding an invalid k.'
k_less_than_min = {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': (- 1), 'p': 2}
k_more_than_max = {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': 6, 'p': 2}
with self.assertRaises(ValueError):
optimizer = LocalBestPSO(5, 2, bounds=self.safe_bounds, options=k_less_than_min)
with self.assertRaises(ValueError):
optimizer = LocalBestPSO(5, 2, bounds=self.safe_bounds, options=k_more_than_max)
|
Tests if exception is thrown when feeding an invalid k.
|
tests/optimizers/test_local_best.py
|
test_k_fail
|
ahcantao/pyswarms
| 0 |
python
|
def test_k_fail(self):
k_less_than_min = {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': (- 1), 'p': 2}
k_more_than_max = {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': 6, 'p': 2}
with self.assertRaises(ValueError):
optimizer = LocalBestPSO(5, 2, bounds=self.safe_bounds, options=k_less_than_min)
with self.assertRaises(ValueError):
optimizer = LocalBestPSO(5, 2, bounds=self.safe_bounds, options=k_more_than_max)
|
def test_k_fail(self):
k_less_than_min = {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': (- 1), 'p': 2}
k_more_than_max = {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': 6, 'p': 2}
with self.assertRaises(ValueError):
optimizer = LocalBestPSO(5, 2, bounds=self.safe_bounds, options=k_less_than_min)
with self.assertRaises(ValueError):
optimizer = LocalBestPSO(5, 2, bounds=self.safe_bounds, options=k_more_than_max)<|docstring|>Tests if exception is thrown when feeding an invalid k.<|endoftext|>
|
f882520055fc01730a5b63f1c3d6c9b59f17264ef0af9075d4c864f661162cdb
|
def test_p_fail(self):
'Tests if exception is thrown when feeding an invalid p.'
p_fail = {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': 2, 'p': 5}
with self.assertRaises(ValueError):
optimizer = LocalBestPSO(5, 2, options=p_fail)
|
Tests if exception is thrown when feeding an invalid p.
|
tests/optimizers/test_local_best.py
|
test_p_fail
|
ahcantao/pyswarms
| 0 |
python
|
def test_p_fail(self):
p_fail = {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': 2, 'p': 5}
with self.assertRaises(ValueError):
optimizer = LocalBestPSO(5, 2, options=p_fail)
|
def test_p_fail(self):
p_fail = {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': 2, 'p': 5}
with self.assertRaises(ValueError):
optimizer = LocalBestPSO(5, 2, options=p_fail)<|docstring|>Tests if exception is thrown when feeding an invalid p.<|endoftext|>
|
30b7437327ebad3e46c1cc93e228fd90f36b5ce5c976472980b4cf291e8fee4f
|
def test_vclamp_type_fail(self):
'Tests if exception is thrown when velocity_clamp is not a tuple.'
velocity_clamp = [1, 3]
with self.assertRaises(TypeError):
optimizer = LocalBestPSO(5, 2, velocity_clamp=velocity_clamp, options=self.options)
|
Tests if exception is thrown when velocity_clamp is not a tuple.
|
tests/optimizers/test_local_best.py
|
test_vclamp_type_fail
|
ahcantao/pyswarms
| 0 |
python
|
def test_vclamp_type_fail(self):
velocity_clamp = [1, 3]
with self.assertRaises(TypeError):
optimizer = LocalBestPSO(5, 2, velocity_clamp=velocity_clamp, options=self.options)
|
def test_vclamp_type_fail(self):
velocity_clamp = [1, 3]
with self.assertRaises(TypeError):
optimizer = LocalBestPSO(5, 2, velocity_clamp=velocity_clamp, options=self.options)<|docstring|>Tests if exception is thrown when velocity_clamp is not a tuple.<|endoftext|>
|
16add44400dbcbefe0c5c6d8cd9498130d193f6d4a378b89bcf423711024bd78
|
def test_vclamp_shape_fail(self):
'Tests if exception is thrown when velocity_clamp is not equal to 2'
velocity_clamp = (1, 1, 1)
with self.assertRaises(IndexError):
optimizer = LocalBestPSO(5, 2, velocity_clamp=velocity_clamp, options=self.options)
|
Tests if exception is thrown when velocity_clamp is not equal to 2
|
tests/optimizers/test_local_best.py
|
test_vclamp_shape_fail
|
ahcantao/pyswarms
| 0 |
python
|
def test_vclamp_shape_fail(self):
velocity_clamp = (1, 1, 1)
with self.assertRaises(IndexError):
optimizer = LocalBestPSO(5, 2, velocity_clamp=velocity_clamp, options=self.options)
|
def test_vclamp_shape_fail(self):
velocity_clamp = (1, 1, 1)
with self.assertRaises(IndexError):
optimizer = LocalBestPSO(5, 2, velocity_clamp=velocity_clamp, options=self.options)<|docstring|>Tests if exception is thrown when velocity_clamp is not equal to 2<|endoftext|>
|
826f942a8e848bf4a3f0771e09c49ce4769cb1b572ac79bab1e92d4972971222
|
def test_vclamp_minmax_fail(self):
"Tests if exception is thrown when velocity_clamp's minmax is wrong"
velocity_clamp = (3, 2)
with self.assertRaises(ValueError):
optimizer = LocalBestPSO(5, 2, velocity_clamp=velocity_clamp, options=self.options)
|
Tests if exception is thrown when velocity_clamp's minmax is wrong
|
tests/optimizers/test_local_best.py
|
test_vclamp_minmax_fail
|
ahcantao/pyswarms
| 0 |
python
|
def test_vclamp_minmax_fail(self):
velocity_clamp = (3, 2)
with self.assertRaises(ValueError):
optimizer = LocalBestPSO(5, 2, velocity_clamp=velocity_clamp, options=self.options)
|
def test_vclamp_minmax_fail(self):
velocity_clamp = (3, 2)
with self.assertRaises(ValueError):
optimizer = LocalBestPSO(5, 2, velocity_clamp=velocity_clamp, options=self.options)<|docstring|>Tests if exception is thrown when velocity_clamp's minmax is wrong<|endoftext|>
|
a9deb83dc04f6c553fc871dbd29dd97ff2716f36156e45550bd6da3244771454
|
def test_init_pos_type_fail(self):
'Tests if exception is thrown when init_pos is not a list.'
init_pos = (0.1, 1.5)
with self.assertRaises(TypeError):
optimizer = LocalBestPSO(5, 2, init_pos=init_pos, options=self.options)
|
Tests if exception is thrown when init_pos is not a list.
|
tests/optimizers/test_local_best.py
|
test_init_pos_type_fail
|
ahcantao/pyswarms
| 0 |
python
|
def test_init_pos_type_fail(self):
init_pos = (0.1, 1.5)
with self.assertRaises(TypeError):
optimizer = LocalBestPSO(5, 2, init_pos=init_pos, options=self.options)
|
def test_init_pos_type_fail(self):
init_pos = (0.1, 1.5)
with self.assertRaises(TypeError):
optimizer = LocalBestPSO(5, 2, init_pos=init_pos, options=self.options)<|docstring|>Tests if exception is thrown when init_pos is not a list.<|endoftext|>
|
5ab632cdb081c54981c54869c8092db70dc0c524a2eb23a443ba4b436bea5b97
|
def test_init_pos_shape_fail(self):
'Tests if exception is thrown when init_pos dimension is not equal\n to dimensions'
init_pos = [1.5, 3.2, 2.5]
with self.assertRaises(IndexError):
optimizer = LocalBestPSO(5, 2, init_pos=init_pos, options=self.options)
|
Tests if exception is thrown when init_pos dimension is not equal
to dimensions
|
tests/optimizers/test_local_best.py
|
test_init_pos_shape_fail
|
ahcantao/pyswarms
| 0 |
python
|
def test_init_pos_shape_fail(self):
'Tests if exception is thrown when init_pos dimension is not equal\n to dimensions'
init_pos = [1.5, 3.2, 2.5]
with self.assertRaises(IndexError):
optimizer = LocalBestPSO(5, 2, init_pos=init_pos, options=self.options)
|
def test_init_pos_shape_fail(self):
'Tests if exception is thrown when init_pos dimension is not equal\n to dimensions'
init_pos = [1.5, 3.2, 2.5]
with self.assertRaises(IndexError):
optimizer = LocalBestPSO(5, 2, init_pos=init_pos, options=self.options)<|docstring|>Tests if exception is thrown when init_pos dimension is not equal
to dimensions<|endoftext|>
|
688acb81bc4b102500c18f7e43e3813f09b4c400b664a63cc014a3c7ff807506
|
def test_reset_best_cost_inf(self):
'Tests if best cost is set to infinity when reset() is called'
optimizer = LocalBestPSO(5, 2, options=self.options)
optimizer.optimize(sphere_func, 100, verbose=0)
optimizer.reset()
self.assertEqual(optimizer.best_cost, np.inf)
|
Tests if best cost is set to infinity when reset() is called
|
tests/optimizers/test_local_best.py
|
test_reset_best_cost_inf
|
ahcantao/pyswarms
| 0 |
python
|
def test_reset_best_cost_inf(self):
optimizer = LocalBestPSO(5, 2, options=self.options)
optimizer.optimize(sphere_func, 100, verbose=0)
optimizer.reset()
self.assertEqual(optimizer.best_cost, np.inf)
|
def test_reset_best_cost_inf(self):
optimizer = LocalBestPSO(5, 2, options=self.options)
optimizer.optimize(sphere_func, 100, verbose=0)
optimizer.reset()
self.assertEqual(optimizer.best_cost, np.inf)<|docstring|>Tests if best cost is set to infinity when reset() is called<|endoftext|>
|
17618043161448b71ddd652b832d2f006f72a11e1116849fe7ef1fc27e45f0cb
|
def test_reset_best_pos_none(self):
'Tests if best pos is set to NoneType when reset() is called'
optimizer = LocalBestPSO(5, 2, options=self.options)
optimizer.optimize(sphere_func, 100, verbose=0)
optimizer.reset()
self.assertIsNone(optimizer.best_pos)
|
Tests if best pos is set to NoneType when reset() is called
|
tests/optimizers/test_local_best.py
|
test_reset_best_pos_none
|
ahcantao/pyswarms
| 0 |
python
|
def test_reset_best_pos_none(self):
optimizer = LocalBestPSO(5, 2, options=self.options)
optimizer.optimize(sphere_func, 100, verbose=0)
optimizer.reset()
self.assertIsNone(optimizer.best_pos)
|
def test_reset_best_pos_none(self):
optimizer = LocalBestPSO(5, 2, options=self.options)
optimizer.optimize(sphere_func, 100, verbose=0)
optimizer.reset()
self.assertIsNone(optimizer.best_pos)<|docstring|>Tests if best pos is set to NoneType when reset() is called<|endoftext|>
|
4a4e308dc51d299f98eeed524c50e7397956e07cb59d2b6e710d8ac7acf94cb3
|
def test_run_optimize(self):
'Perform a single run.'
try:
self.optimizer.optimize(sphere_func, 1000, verbose=0)
trigger = True
except:
print('Execution failed.')
trigger = False
self.assertTrue(trigger)
|
Perform a single run.
|
tests/optimizers/test_local_best.py
|
test_run_optimize
|
ahcantao/pyswarms
| 0 |
python
|
def test_run_optimize(self):
try:
self.optimizer.optimize(sphere_func, 1000, verbose=0)
trigger = True
except:
print('Execution failed.')
trigger = False
self.assertTrue(trigger)
|
def test_run_optimize(self):
try:
self.optimizer.optimize(sphere_func, 1000, verbose=0)
trigger = True
except:
print('Execution failed.')
trigger = False
self.assertTrue(trigger)<|docstring|>Perform a single run.<|endoftext|>
|
1490fdfc6bccb3d2b0a09580bc57685caf1239e88c4a263a0e78b7a8054600ec
|
def test_cost_history_size(self):
'Check the size of the cost_history.'
self.optimizer.optimize(sphere_func, 1000, verbose=0)
cost_hist = self.optimizer.get_cost_history
self.assertEqual(cost_hist.shape, (1000,))
|
Check the size of the cost_history.
|
tests/optimizers/test_local_best.py
|
test_cost_history_size
|
ahcantao/pyswarms
| 0 |
python
|
def test_cost_history_size(self):
self.optimizer.optimize(sphere_func, 1000, verbose=0)
cost_hist = self.optimizer.get_cost_history
self.assertEqual(cost_hist.shape, (1000,))
|
def test_cost_history_size(self):
self.optimizer.optimize(sphere_func, 1000, verbose=0)
cost_hist = self.optimizer.get_cost_history
self.assertEqual(cost_hist.shape, (1000,))<|docstring|>Check the size of the cost_history.<|endoftext|>
|
e75b428abbbba5c24d150945efc852aea48d5e53d617fdd91e176261863f9d61
|
def test_mean_pbest_history_size(self):
'Check the size of the mean_pbest_history.'
self.optimizer.optimize(sphere_func, 1000, verbose=0)
mean_pbest_hist = self.optimizer.get_mean_pbest_history
self.assertEqual(mean_pbest_hist.shape, (1000,))
|
Check the size of the mean_pbest_history.
|
tests/optimizers/test_local_best.py
|
test_mean_pbest_history_size
|
ahcantao/pyswarms
| 0 |
python
|
def test_mean_pbest_history_size(self):
self.optimizer.optimize(sphere_func, 1000, verbose=0)
mean_pbest_hist = self.optimizer.get_mean_pbest_history
self.assertEqual(mean_pbest_hist.shape, (1000,))
|
def test_mean_pbest_history_size(self):
self.optimizer.optimize(sphere_func, 1000, verbose=0)
mean_pbest_hist = self.optimizer.get_mean_pbest_history
self.assertEqual(mean_pbest_hist.shape, (1000,))<|docstring|>Check the size of the mean_pbest_history.<|endoftext|>
|
ee8d3a78d7e73aefa04d7e3a13146fa48e0af81c2b722816270792c1c6dc73a0
|
def test_mean_neighbor_history_size(self):
'Check the size of the mean neighborhood history.'
self.optimizer.optimize(sphere_func, 1000, verbose=0)
mean_neighbor_hist = self.optimizer.get_mean_neighbor_history
self.assertEqual(mean_neighbor_hist.shape, (1000,))
|
Check the size of the mean neighborhood history.
|
tests/optimizers/test_local_best.py
|
test_mean_neighbor_history_size
|
ahcantao/pyswarms
| 0 |
python
|
def test_mean_neighbor_history_size(self):
self.optimizer.optimize(sphere_func, 1000, verbose=0)
mean_neighbor_hist = self.optimizer.get_mean_neighbor_history
self.assertEqual(mean_neighbor_hist.shape, (1000,))
|
def test_mean_neighbor_history_size(self):
self.optimizer.optimize(sphere_func, 1000, verbose=0)
mean_neighbor_hist = self.optimizer.get_mean_neighbor_history
self.assertEqual(mean_neighbor_hist.shape, (1000,))<|docstring|>Check the size of the mean neighborhood history.<|endoftext|>
|
bb920536ca4d591c99bcc16208f36f1d2ce4eae3d81e6c4adfeb83561cdeade4
|
def test_pos_history_size(self):
'Check the size of the pos_history.'
self.optimizer.optimize(sphere_func, 1000, verbose=0)
pos_hist = self.optimizer.get_pos_history
self.assertEqual(pos_hist.shape, (1000, 10, 2))
|
Check the size of the pos_history.
|
tests/optimizers/test_local_best.py
|
test_pos_history_size
|
ahcantao/pyswarms
| 0 |
python
|
def test_pos_history_size(self):
self.optimizer.optimize(sphere_func, 1000, verbose=0)
pos_hist = self.optimizer.get_pos_history
self.assertEqual(pos_hist.shape, (1000, 10, 2))
|
def test_pos_history_size(self):
self.optimizer.optimize(sphere_func, 1000, verbose=0)
pos_hist = self.optimizer.get_pos_history
self.assertEqual(pos_hist.shape, (1000, 10, 2))<|docstring|>Check the size of the pos_history.<|endoftext|>
|
b99ad300385132ccf81cf9b1a1711b366d6fa3a814ef225dd52065a668c839b9
|
def test_velocity_history_size(self):
'Check the size of the velocity_history.'
self.optimizer.optimize(sphere_func, 1000, verbose=0)
velocity_hist = self.optimizer.get_velocity_history
self.assertEqual(velocity_hist.shape, (1000, 10, 2))
|
Check the size of the velocity_history.
|
tests/optimizers/test_local_best.py
|
test_velocity_history_size
|
ahcantao/pyswarms
| 0 |
python
|
def test_velocity_history_size(self):
self.optimizer.optimize(sphere_func, 1000, verbose=0)
velocity_hist = self.optimizer.get_velocity_history
self.assertEqual(velocity_hist.shape, (1000, 10, 2))
|
def test_velocity_history_size(self):
self.optimizer.optimize(sphere_func, 1000, verbose=0)
velocity_hist = self.optimizer.get_velocity_history
self.assertEqual(velocity_hist.shape, (1000, 10, 2))<|docstring|>Check the size of the velocity_history.<|endoftext|>
|
d57ec1cdcc7a50f33fc24573b0bb58b08fb8835616a7ab915dc47a7773c0315c
|
def test_ftol_effect(self):
'Check if setting ftol breaks the optimization process\n accordingly.'
optimizer = LocalBestPSO(10, 2, options=self.options, ftol=0.1)
optimizer.optimize(sphere_func, 5000, verbose=0)
cost_hist = optimizer.get_cost_history
self.assertNotEqual(cost_hist.shape, (5000,))
|
Check if setting ftol breaks the optimization process
accordingly.
|
tests/optimizers/test_local_best.py
|
test_ftol_effect
|
ahcantao/pyswarms
| 0 |
python
|
def test_ftol_effect(self):
'Check if setting ftol breaks the optimization process\n accordingly.'
optimizer = LocalBestPSO(10, 2, options=self.options, ftol=0.1)
optimizer.optimize(sphere_func, 5000, verbose=0)
cost_hist = optimizer.get_cost_history
self.assertNotEqual(cost_hist.shape, (5000,))
|
def test_ftol_effect(self):
'Check if setting ftol breaks the optimization process\n accordingly.'
optimizer = LocalBestPSO(10, 2, options=self.options, ftol=0.1)
optimizer.optimize(sphere_func, 5000, verbose=0)
cost_hist = optimizer.get_cost_history
self.assertNotEqual(cost_hist.shape, (5000,))<|docstring|>Check if setting ftol breaks the optimization process
accordingly.<|endoftext|>
|
de4de603feba75af7059b03e3af37fd4240b0bcd3c54d3fff22d46a4730e604a
|
def with_traced_module(func):
'Helper for providing tracing essentials (module and pin) for tracing\n wrappers.\n\n This helper enables tracing wrappers to dynamically be disabled when the\n corresponding pin is disabled.\n\n Usage::\n\n @with_traced_module\n def my_traced_wrapper(django, pin, func, instance, args, kwargs):\n # Do tracing stuff\n pass\n\n def patch():\n import django\n wrap(django.somefunc, my_traced_wrapper(django))\n '
def with_mod(mod):
def wrapper(wrapped, instance, args, kwargs):
pin = Pin._find(instance, mod)
if (pin and (not pin.enabled())):
return wrapped(*args, **kwargs)
elif (not pin):
log.debug('Pin not found for traced method %r', wrapped)
return wrapped(*args, **kwargs)
return func(mod, pin, wrapped, instance, args, kwargs)
return wrapper
return with_mod
|
Helper for providing tracing essentials (module and pin) for tracing
wrappers.
This helper enables tracing wrappers to dynamically be disabled when the
corresponding pin is disabled.
Usage::
@with_traced_module
def my_traced_wrapper(django, pin, func, instance, args, kwargs):
# Do tracing stuff
pass
def patch():
import django
wrap(django.somefunc, my_traced_wrapper(django))
|
ddtrace/contrib/trace_utils.py
|
with_traced_module
|
sjhewitt/dd-trace-py
| 0 |
python
|
def with_traced_module(func):
'Helper for providing tracing essentials (module and pin) for tracing\n wrappers.\n\n This helper enables tracing wrappers to dynamically be disabled when the\n corresponding pin is disabled.\n\n Usage::\n\n @with_traced_module\n def my_traced_wrapper(django, pin, func, instance, args, kwargs):\n # Do tracing stuff\n pass\n\n def patch():\n import django\n wrap(django.somefunc, my_traced_wrapper(django))\n '
def with_mod(mod):
def wrapper(wrapped, instance, args, kwargs):
pin = Pin._find(instance, mod)
if (pin and (not pin.enabled())):
return wrapped(*args, **kwargs)
elif (not pin):
log.debug('Pin not found for traced method %r', wrapped)
return wrapped(*args, **kwargs)
return func(mod, pin, wrapped, instance, args, kwargs)
return wrapper
return with_mod
|
def with_traced_module(func):
'Helper for providing tracing essentials (module and pin) for tracing\n wrappers.\n\n This helper enables tracing wrappers to dynamically be disabled when the\n corresponding pin is disabled.\n\n Usage::\n\n @with_traced_module\n def my_traced_wrapper(django, pin, func, instance, args, kwargs):\n # Do tracing stuff\n pass\n\n def patch():\n import django\n wrap(django.somefunc, my_traced_wrapper(django))\n '
def with_mod(mod):
def wrapper(wrapped, instance, args, kwargs):
pin = Pin._find(instance, mod)
if (pin and (not pin.enabled())):
return wrapped(*args, **kwargs)
elif (not pin):
log.debug('Pin not found for traced method %r', wrapped)
return wrapped(*args, **kwargs)
return func(mod, pin, wrapped, instance, args, kwargs)
return wrapper
return with_mod<|docstring|>Helper for providing tracing essentials (module and pin) for tracing
wrappers.
This helper enables tracing wrappers to dynamically be disabled when the
corresponding pin is disabled.
Usage::
@with_traced_module
def my_traced_wrapper(django, pin, func, instance, args, kwargs):
# Do tracing stuff
pass
def patch():
import django
wrap(django.somefunc, my_traced_wrapper(django))<|endoftext|>
|
28926a369ff0e10b71f4162a84e9ce375fec5ddba43bf479362a2cade3c129a4
|
def int_service(pin, int_config, default=None):
"Returns the service name for an integration which is internal\n to the application. Internal meaning that the work belongs to the\n user's application. Eg. Web framework, sqlalchemy, web servers.\n\n For internal integrations we prioritize overrides, then global defaults and\n lastly the default provided by the integration.\n "
int_config = (int_config or {})
if (pin and pin.service):
return pin.service
if (('service' in int_config) and (int_config.service is not None)):
return int_config.service
if (('service_name' in int_config) and (int_config.service_name is not None)):
return int_config.service_name
global_service = int_config.global_config._get_service()
if global_service:
return global_service
if (('_default_service' in int_config) and (int_config._default_service is not None)):
return int_config._default_service
return default
|
Returns the service name for an integration which is internal
to the application. Internal meaning that the work belongs to the
user's application. Eg. Web framework, sqlalchemy, web servers.
For internal integrations we prioritize overrides, then global defaults and
lastly the default provided by the integration.
|
ddtrace/contrib/trace_utils.py
|
int_service
|
sjhewitt/dd-trace-py
| 0 |
python
|
def int_service(pin, int_config, default=None):
"Returns the service name for an integration which is internal\n to the application. Internal meaning that the work belongs to the\n user's application. Eg. Web framework, sqlalchemy, web servers.\n\n For internal integrations we prioritize overrides, then global defaults and\n lastly the default provided by the integration.\n "
int_config = (int_config or {})
if (pin and pin.service):
return pin.service
if (('service' in int_config) and (int_config.service is not None)):
return int_config.service
if (('service_name' in int_config) and (int_config.service_name is not None)):
return int_config.service_name
global_service = int_config.global_config._get_service()
if global_service:
return global_service
if (('_default_service' in int_config) and (int_config._default_service is not None)):
return int_config._default_service
return default
|
def int_service(pin, int_config, default=None):
"Returns the service name for an integration which is internal\n to the application. Internal meaning that the work belongs to the\n user's application. Eg. Web framework, sqlalchemy, web servers.\n\n For internal integrations we prioritize overrides, then global defaults and\n lastly the default provided by the integration.\n "
int_config = (int_config or {})
if (pin and pin.service):
return pin.service
if (('service' in int_config) and (int_config.service is not None)):
return int_config.service
if (('service_name' in int_config) and (int_config.service_name is not None)):
return int_config.service_name
global_service = int_config.global_config._get_service()
if global_service:
return global_service
if (('_default_service' in int_config) and (int_config._default_service is not None)):
return int_config._default_service
return default<|docstring|>Returns the service name for an integration which is internal
to the application. Internal meaning that the work belongs to the
user's application. Eg. Web framework, sqlalchemy, web servers.
For internal integrations we prioritize overrides, then global defaults and
lastly the default provided by the integration.<|endoftext|>
|
b042972c967865d8c16b27eef7b6457ba16cf840ebf4820b11b48f67ee62d7fd
|
def ext_service(pin, int_config, default=None):
"Returns the service name for an integration which is external\n to the application. External meaning that the integration generates\n spans wrapping code that is outside the scope of the user's application. Eg. A database, RPC, cache, etc.\n "
int_config = (int_config or {})
if (pin and pin.service):
return pin.service
if (('service' in int_config) and (int_config.service is not None)):
return int_config.service
if (('service_name' in int_config) and (int_config.service_name is not None)):
return int_config.service_name
if (('_default_service' in int_config) and (int_config._default_service is not None)):
return int_config._default_service
return default
|
Returns the service name for an integration which is external
to the application. External meaning that the integration generates
spans wrapping code that is outside the scope of the user's application. Eg. A database, RPC, cache, etc.
|
ddtrace/contrib/trace_utils.py
|
ext_service
|
sjhewitt/dd-trace-py
| 0 |
python
|
def ext_service(pin, int_config, default=None):
"Returns the service name for an integration which is external\n to the application. External meaning that the integration generates\n spans wrapping code that is outside the scope of the user's application. Eg. A database, RPC, cache, etc.\n "
int_config = (int_config or {})
if (pin and pin.service):
return pin.service
if (('service' in int_config) and (int_config.service is not None)):
return int_config.service
if (('service_name' in int_config) and (int_config.service_name is not None)):
return int_config.service_name
if (('_default_service' in int_config) and (int_config._default_service is not None)):
return int_config._default_service
return default
|
def ext_service(pin, int_config, default=None):
"Returns the service name for an integration which is external\n to the application. External meaning that the integration generates\n spans wrapping code that is outside the scope of the user's application. Eg. A database, RPC, cache, etc.\n "
int_config = (int_config or {})
if (pin and pin.service):
return pin.service
if (('service' in int_config) and (int_config.service is not None)):
return int_config.service
if (('service_name' in int_config) and (int_config.service_name is not None)):
return int_config.service_name
if (('_default_service' in int_config) and (int_config._default_service is not None)):
return int_config._default_service
return default<|docstring|>Returns the service name for an integration which is external
to the application. External meaning that the integration generates
spans wrapping code that is outside the scope of the user's application. Eg. A database, RPC, cache, etc.<|endoftext|>
|
19fa5d4563472ec9035dc0eee9a8d0e008e109f83149604daf94b7e99a504ee7
|
def activate_distributed_headers(tracer, int_config=None, request_headers=None, override=None):
"\n Helper for activating a distributed trace headers' context if enabled in integration config.\n int_config will be used to check if distributed trace headers context will be activated, but\n override will override whatever value is set in int_config if passed any value other than None.\n "
int_config = (int_config or {})
if (override is False):
return None
if (override or int_config.get('distributed_tracing_enabled', int_config.get('distributed_tracing', False))):
context = HTTPPropagator.extract(request_headers)
if context.trace_id:
tracer.context_provider.activate(context)
|
Helper for activating a distributed trace headers' context if enabled in integration config.
int_config will be used to check if distributed trace headers context will be activated, but
override will override whatever value is set in int_config if passed any value other than None.
|
ddtrace/contrib/trace_utils.py
|
activate_distributed_headers
|
sjhewitt/dd-trace-py
| 0 |
python
|
def activate_distributed_headers(tracer, int_config=None, request_headers=None, override=None):
"\n Helper for activating a distributed trace headers' context if enabled in integration config.\n int_config will be used to check if distributed trace headers context will be activated, but\n override will override whatever value is set in int_config if passed any value other than None.\n "
int_config = (int_config or {})
if (override is False):
return None
if (override or int_config.get('distributed_tracing_enabled', int_config.get('distributed_tracing', False))):
context = HTTPPropagator.extract(request_headers)
if context.trace_id:
tracer.context_provider.activate(context)
|
def activate_distributed_headers(tracer, int_config=None, request_headers=None, override=None):
"\n Helper for activating a distributed trace headers' context if enabled in integration config.\n int_config will be used to check if distributed trace headers context will be activated, but\n override will override whatever value is set in int_config if passed any value other than None.\n "
int_config = (int_config or {})
if (override is False):
return None
if (override or int_config.get('distributed_tracing_enabled', int_config.get('distributed_tracing', False))):
context = HTTPPropagator.extract(request_headers)
if context.trace_id:
tracer.context_provider.activate(context)<|docstring|>Helper for activating a distributed trace headers' context if enabled in integration config.
int_config will be used to check if distributed trace headers context will be activated, but
override will override whatever value is set in int_config if passed any value other than None.<|endoftext|>
|
95be34c5604f03fa89ebaff43ed293dcce70b572a8f77a042962d70a0bdd3815
|
def flatten_dict(d, sep='.', prefix='', exclude=None):
'\n Returns a normalized dict of depth 1\n '
flat = {}
s = deque()
s.append((prefix, d))
exclude = (exclude or set())
while s:
(p, v) = s.pop()
if (p in exclude):
continue
if isinstance(v, dict):
s.extend((((((p + sep) + k) if p else k), v) for (k, v) in v.items()))
else:
flat[p] = v
return flat
|
Returns a normalized dict of depth 1
|
ddtrace/contrib/trace_utils.py
|
flatten_dict
|
sjhewitt/dd-trace-py
| 0 |
python
|
def flatten_dict(d, sep='.', prefix=, exclude=None):
'\n \n '
flat = {}
s = deque()
s.append((prefix, d))
exclude = (exclude or set())
while s:
(p, v) = s.pop()
if (p in exclude):
continue
if isinstance(v, dict):
s.extend((((((p + sep) + k) if p else k), v) for (k, v) in v.items()))
else:
flat[p] = v
return flat
|
def flatten_dict(d, sep='.', prefix=, exclude=None):
'\n \n '
flat = {}
s = deque()
s.append((prefix, d))
exclude = (exclude or set())
while s:
(p, v) = s.pop()
if (p in exclude):
continue
if isinstance(v, dict):
s.extend((((((p + sep) + k) if p else k), v) for (k, v) in v.items()))
else:
flat[p] = v
return flat<|docstring|>Returns a normalized dict of depth 1<|endoftext|>
|
8f8033fd2507f46a22acd30b07541513f1e4aa425ecf038002321bfa39912c8a
|
def manage_updaters(request):
'Представление: управление загрузчиками.'
from catalog.models import Updater
if (request.user.has_perm('catalog.add_updater') or request.user.has_perm('catalog.change_updater') or request.user.has_perm('catalog.delete_updater')):
updaters = Updater.objects.all().order_by('name')
return render(request, 'catalog/manage_updaters.html', locals())
|
Представление: управление загрузчиками.
|
views.py
|
manage_updaters
|
anodos-ru/catalog
| 2 |
python
|
def manage_updaters(request):
from catalog.models import Updater
if (request.user.has_perm('catalog.add_updater') or request.user.has_perm('catalog.change_updater') or request.user.has_perm('catalog.delete_updater')):
updaters = Updater.objects.all().order_by('name')
return render(request, 'catalog/manage_updaters.html', locals())
|
def manage_updaters(request):
from catalog.models import Updater
if (request.user.has_perm('catalog.add_updater') or request.user.has_perm('catalog.change_updater') or request.user.has_perm('catalog.delete_updater')):
updaters = Updater.objects.all().order_by('name')
return render(request, 'catalog/manage_updaters.html', locals())<|docstring|>Представление: управление загрузчиками.<|endoftext|>
|
fda9ecb2872673f54245e90fb9994dc63164c1a3cf2b91436206ce6681843386
|
def manage_distributors(request):
'Представление: управление производителеми.'
from catalog.models import Distributor
if (request.user.has_perm('catalog.add_distributor') or request.user.has_perm('catalog.change_distributor') or request.user.has_perm('catalog.delete_distributor')):
distributors = Distributor.objects.all().order_by('name')
return render(request, 'catalog/manage_distributors.html', locals())
|
Представление: управление производителеми.
|
views.py
|
manage_distributors
|
anodos-ru/catalog
| 2 |
python
|
def manage_distributors(request):
from catalog.models import Distributor
if (request.user.has_perm('catalog.add_distributor') or request.user.has_perm('catalog.change_distributor') or request.user.has_perm('catalog.delete_distributor')):
distributors = Distributor.objects.all().order_by('name')
return render(request, 'catalog/manage_distributors.html', locals())
|
def manage_distributors(request):
from catalog.models import Distributor
if (request.user.has_perm('catalog.add_distributor') or request.user.has_perm('catalog.change_distributor') or request.user.has_perm('catalog.delete_distributor')):
distributors = Distributor.objects.all().order_by('name')
return render(request, 'catalog/manage_distributors.html', locals())<|docstring|>Представление: управление производителеми.<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.