body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
6bf8a4ca977d2680170ad1d459b657d5413641165bf4f674a487a8dcea1b0a9a
def _parent(self, i: int) -> (int | None): 'Returns parent index of given index if exists, else None.' return (((i - 1) // 2) if (0 < i < self.size) else None)
Returns parent index of given index if exists, else None.
cs/structures/heap/binary_heap.py
_parent
TylerYep/workshop
1
python
def _parent(self, i: int) -> (int | None): return (((i - 1) // 2) if (0 < i < self.size) else None)
def _parent(self, i: int) -> (int | None): return (((i - 1) // 2) if (0 < i < self.size) else None)<|docstring|>Returns parent index of given index if exists, else None.<|endoftext|>
0f4fce31d8c22419bdfef0d2485e95b5f86003e10b1de8d93a13906d0c043818
def _left(self, i: int) -> (int | None): 'Returns left-child-index of given index if exists, else None.' left = int(((2 * i) + 1)) return (left if (0 < left < self.size) else None)
Returns left-child-index of given index if exists, else None.
cs/structures/heap/binary_heap.py
_left
TylerYep/workshop
1
python
def _left(self, i: int) -> (int | None): left = int(((2 * i) + 1)) return (left if (0 < left < self.size) else None)
def _left(self, i: int) -> (int | None): left = int(((2 * i) + 1)) return (left if (0 < left < self.size) else None)<|docstring|>Returns left-child-index of given index if exists, else None.<|endoftext|>
8195fcfe927aac4cb45d0f10c872b752c8343b77cfb507b8e3a53a2130282712
def _right(self, i: int) -> (int | None): 'Returns right-child-index of given index if exists, else None.' right = int(((2 * i) + 2)) return (right if (0 < right < self.size) else None)
Returns right-child-index of given index if exists, else None.
cs/structures/heap/binary_heap.py
_right
TylerYep/workshop
1
python
def _right(self, i: int) -> (int | None): right = int(((2 * i) + 2)) return (right if (0 < right < self.size) else None)
def _right(self, i: int) -> (int | None): right = int(((2 * i) + 2)) return (right if (0 < right < self.size) else None)<|docstring|>Returns right-child-index of given index if exists, else None.<|endoftext|>
558cbefa86f5c45a8e60f4accd6fbc9a666e94b9ff2534b0873fd0f619e80f42
def _swap(self, i: int, j: int) -> None: 'Performs changes required for swapping two elements in the heap.' (self.elem_to_index[self._heap[i]], self.elem_to_index[self._heap[j]]) = (self.elem_to_index[self._heap[j]], self.elem_to_index[self._heap[i]]) (self._heap[i], self._heap[j]) = (self._heap[j], self._heap[i])
Performs changes required for swapping two elements in the heap.
cs/structures/heap/binary_heap.py
_swap
TylerYep/workshop
1
python
def _swap(self, i: int, j: int) -> None: (self.elem_to_index[self._heap[i]], self.elem_to_index[self._heap[j]]) = (self.elem_to_index[self._heap[j]], self.elem_to_index[self._heap[i]]) (self._heap[i], self._heap[j]) = (self._heap[j], self._heap[i])
def _swap(self, i: int, j: int) -> None: (self.elem_to_index[self._heap[i]], self.elem_to_index[self._heap[j]]) = (self.elem_to_index[self._heap[j]], self.elem_to_index[self._heap[i]]) (self._heap[i], self._heap[j]) = (self._heap[j], self._heap[i])<|docstring|>Performs changes required for swapping two elements in the heap.<|endoftext|>
ff29bda101c4a7a8cf9fccd050f14d6c5098c7ef682ce5877c7701e60cde5663
def _cmp(self, i: int, j: int) -> bool: 'Compares the two items using default comparison.' return (self.key(self._heap[i]) < self.key(self._heap[j]))
Compares the two items using default comparison.
cs/structures/heap/binary_heap.py
_cmp
TylerYep/workshop
1
python
def _cmp(self, i: int, j: int) -> bool: return (self.key(self._heap[i]) < self.key(self._heap[j]))
def _cmp(self, i: int, j: int) -> bool: return (self.key(self._heap[i]) < self.key(self._heap[j]))<|docstring|>Compares the two items using default comparison.<|endoftext|>
2e04cdb3f74664a00988f3ce105ea6d8387a1459e0ad6b16318e82049db3e491
def _get_valid_parent(self, i: int) -> int: '\n Returns index of valid parent as per desired ordering among given index and\n both its children.\n ' (left, right) = (self._left(i), self._right(i)) valid_parent = i if ((left is not None) and (not self._cmp(left, valid_parent))): valid_parent = left if ((right is not None) and (not self._cmp(right, valid_parent))): valid_parent = right return valid_parent
Returns index of valid parent as per desired ordering among given index and both its children.
cs/structures/heap/binary_heap.py
_get_valid_parent
TylerYep/workshop
1
python
def _get_valid_parent(self, i: int) -> int: '\n Returns index of valid parent as per desired ordering among given index and\n both its children.\n ' (left, right) = (self._left(i), self._right(i)) valid_parent = i if ((left is not None) and (not self._cmp(left, valid_parent))): valid_parent = left if ((right is not None) and (not self._cmp(right, valid_parent))): valid_parent = right return valid_parent
def _get_valid_parent(self, i: int) -> int: '\n Returns index of valid parent as per desired ordering among given index and\n both its children.\n ' (left, right) = (self._left(i), self._right(i)) valid_parent = i if ((left is not None) and (not self._cmp(left, valid_parent))): valid_parent = left if ((right is not None) and (not self._cmp(right, valid_parent))): valid_parent = right return valid_parent<|docstring|>Returns index of valid parent as per desired ordering among given index and both its children.<|endoftext|>
7c253e58e6acf7cc57dd0a1c89cf263ead3d60f514c0cc266334db77bd2f5b29
def _heapify_up(self, index: int) -> None: 'Fixes the heap in upward direction of given index.' parent = self._parent(index) while ((parent is not None) and (not self._cmp(index, parent))): self._swap(index, parent) (index, parent) = (parent, self._parent(parent))
Fixes the heap in upward direction of given index.
cs/structures/heap/binary_heap.py
_heapify_up
TylerYep/workshop
1
python
def _heapify_up(self, index: int) -> None: parent = self._parent(index) while ((parent is not None) and (not self._cmp(index, parent))): self._swap(index, parent) (index, parent) = (parent, self._parent(parent))
def _heapify_up(self, index: int) -> None: parent = self._parent(index) while ((parent is not None) and (not self._cmp(index, parent))): self._swap(index, parent) (index, parent) = (parent, self._parent(parent))<|docstring|>Fixes the heap in upward direction of given index.<|endoftext|>
150ba0adaa201a72e422beda02a0e10b2df43c3e75c672cbde2119f3c23b72e3
def _heapify_down(self, index: int) -> None: 'Fixes the heap in downward direction of given index.' valid_parent = self._get_valid_parent(index) while (valid_parent != index): self._swap(index, valid_parent) (index, valid_parent) = (valid_parent, self._get_valid_parent(valid_parent))
Fixes the heap in downward direction of given index.
cs/structures/heap/binary_heap.py
_heapify_down
TylerYep/workshop
1
python
def _heapify_down(self, index: int) -> None: valid_parent = self._get_valid_parent(index) while (valid_parent != index): self._swap(index, valid_parent) (index, valid_parent) = (valid_parent, self._get_valid_parent(valid_parent))
def _heapify_down(self, index: int) -> None: valid_parent = self._get_valid_parent(index) while (valid_parent != index): self._swap(index, valid_parent) (index, valid_parent) = (valid_parent, self._get_valid_parent(valid_parent))<|docstring|>Fixes the heap in downward direction of given index.<|endoftext|>
3674b30dbe0415224720ac2c7186b761e7d0a629e374fcff0774703a52a8d978
def __init__(self, **kwargs): '\n Initializes a new UserDefinedFunctionValidationSummary object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param total_message_count:\n The value to assign to the total_message_count property of this UserDefinedFunctionValidationSummary.\n :type total_message_count: int\n\n :param error_message_count:\n The value to assign to the error_message_count property of this UserDefinedFunctionValidationSummary.\n :type error_message_count: int\n\n :param warn_message_count:\n The value to assign to the warn_message_count property of this UserDefinedFunctionValidationSummary.\n :type warn_message_count: int\n\n :param info_message_count:\n The value to assign to the info_message_count property of this UserDefinedFunctionValidationSummary.\n :type info_message_count: int\n\n :param validation_messages:\n The value to assign to the validation_messages property of this UserDefinedFunctionValidationSummary.\n :type validation_messages: dict(str, list[ValidationMessage])\n\n :param key:\n The value to assign to the key property of this UserDefinedFunctionValidationSummary.\n :type key: str\n\n :param model_type:\n The value to assign to the model_type property of this UserDefinedFunctionValidationSummary.\n :type model_type: str\n\n :param model_version:\n The value to assign to the model_version property of this UserDefinedFunctionValidationSummary.\n :type model_version: str\n\n :param parent_ref:\n The value to assign to the parent_ref property of this UserDefinedFunctionValidationSummary.\n :type parent_ref: oci.data_integration.models.ParentReference\n\n :param name:\n The value to assign to the name property of this UserDefinedFunctionValidationSummary.\n :type name: str\n\n :param description:\n The value to assign to the description property of this UserDefinedFunctionValidationSummary.\n :type description: str\n\n :param object_version:\n The value to assign to the object_version property of this UserDefinedFunctionValidationSummary.\n :type object_version: int\n\n :param object_status:\n The value to assign to the object_status property of this UserDefinedFunctionValidationSummary.\n :type object_status: int\n\n :param identifier:\n The value to assign to the identifier property of this UserDefinedFunctionValidationSummary.\n :type identifier: str\n\n :param metadata:\n The value to assign to the metadata property of this UserDefinedFunctionValidationSummary.\n :type metadata: oci.data_integration.models.ObjectMetadata\n\n ' self.swagger_types = {'total_message_count': 'int', 'error_message_count': 'int', 'warn_message_count': 'int', 'info_message_count': 'int', 'validation_messages': 'dict(str, list[ValidationMessage])', 'key': 'str', 'model_type': 'str', 'model_version': 'str', 'parent_ref': 'ParentReference', 'name': 'str', 'description': 'str', 'object_version': 'int', 'object_status': 'int', 'identifier': 'str', 'metadata': 'ObjectMetadata'} self.attribute_map = {'total_message_count': 'totalMessageCount', 'error_message_count': 'errorMessageCount', 'warn_message_count': 'warnMessageCount', 'info_message_count': 'infoMessageCount', 'validation_messages': 'validationMessages', 'key': 'key', 'model_type': 'modelType', 'model_version': 'modelVersion', 'parent_ref': 'parentRef', 'name': 'name', 'description': 'description', 'object_version': 'objectVersion', 'object_status': 'objectStatus', 'identifier': 'identifier', 'metadata': 'metadata'} self._total_message_count = None self._error_message_count = None self._warn_message_count = None self._info_message_count = None self._validation_messages = None self._key = None self._model_type = None self._model_version = None self._parent_ref = None self._name = None self._description = None self._object_version = None self._object_status = None self._identifier = None self._metadata = None
Initializes a new UserDefinedFunctionValidationSummary object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param total_message_count: The value to assign to the total_message_count property of this UserDefinedFunctionValidationSummary. :type total_message_count: int :param error_message_count: The value to assign to the error_message_count property of this UserDefinedFunctionValidationSummary. :type error_message_count: int :param warn_message_count: The value to assign to the warn_message_count property of this UserDefinedFunctionValidationSummary. :type warn_message_count: int :param info_message_count: The value to assign to the info_message_count property of this UserDefinedFunctionValidationSummary. :type info_message_count: int :param validation_messages: The value to assign to the validation_messages property of this UserDefinedFunctionValidationSummary. :type validation_messages: dict(str, list[ValidationMessage]) :param key: The value to assign to the key property of this UserDefinedFunctionValidationSummary. :type key: str :param model_type: The value to assign to the model_type property of this UserDefinedFunctionValidationSummary. :type model_type: str :param model_version: The value to assign to the model_version property of this UserDefinedFunctionValidationSummary. :type model_version: str :param parent_ref: The value to assign to the parent_ref property of this UserDefinedFunctionValidationSummary. :type parent_ref: oci.data_integration.models.ParentReference :param name: The value to assign to the name property of this UserDefinedFunctionValidationSummary. :type name: str :param description: The value to assign to the description property of this UserDefinedFunctionValidationSummary. :type description: str :param object_version: The value to assign to the object_version property of this UserDefinedFunctionValidationSummary. :type object_version: int :param object_status: The value to assign to the object_status property of this UserDefinedFunctionValidationSummary. :type object_status: int :param identifier: The value to assign to the identifier property of this UserDefinedFunctionValidationSummary. :type identifier: str :param metadata: The value to assign to the metadata property of this UserDefinedFunctionValidationSummary. :type metadata: oci.data_integration.models.ObjectMetadata
src/oci/data_integration/models/user_defined_function_validation_summary.py
__init__
ezequielramos/oci-python-sdk
249
python
def __init__(self, **kwargs): '\n Initializes a new UserDefinedFunctionValidationSummary object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param total_message_count:\n The value to assign to the total_message_count property of this UserDefinedFunctionValidationSummary.\n :type total_message_count: int\n\n :param error_message_count:\n The value to assign to the error_message_count property of this UserDefinedFunctionValidationSummary.\n :type error_message_count: int\n\n :param warn_message_count:\n The value to assign to the warn_message_count property of this UserDefinedFunctionValidationSummary.\n :type warn_message_count: int\n\n :param info_message_count:\n The value to assign to the info_message_count property of this UserDefinedFunctionValidationSummary.\n :type info_message_count: int\n\n :param validation_messages:\n The value to assign to the validation_messages property of this UserDefinedFunctionValidationSummary.\n :type validation_messages: dict(str, list[ValidationMessage])\n\n :param key:\n The value to assign to the key property of this UserDefinedFunctionValidationSummary.\n :type key: str\n\n :param model_type:\n The value to assign to the model_type property of this UserDefinedFunctionValidationSummary.\n :type model_type: str\n\n :param model_version:\n The value to assign to the model_version property of this UserDefinedFunctionValidationSummary.\n :type model_version: str\n\n :param parent_ref:\n The value to assign to the parent_ref property of this UserDefinedFunctionValidationSummary.\n :type parent_ref: oci.data_integration.models.ParentReference\n\n :param name:\n The value to assign to the name property of this UserDefinedFunctionValidationSummary.\n :type name: str\n\n :param description:\n The value to assign to the description property of this UserDefinedFunctionValidationSummary.\n :type description: str\n\n :param object_version:\n The value to assign to the object_version property of this UserDefinedFunctionValidationSummary.\n :type object_version: int\n\n :param object_status:\n The value to assign to the object_status property of this UserDefinedFunctionValidationSummary.\n :type object_status: int\n\n :param identifier:\n The value to assign to the identifier property of this UserDefinedFunctionValidationSummary.\n :type identifier: str\n\n :param metadata:\n The value to assign to the metadata property of this UserDefinedFunctionValidationSummary.\n :type metadata: oci.data_integration.models.ObjectMetadata\n\n ' self.swagger_types = {'total_message_count': 'int', 'error_message_count': 'int', 'warn_message_count': 'int', 'info_message_count': 'int', 'validation_messages': 'dict(str, list[ValidationMessage])', 'key': 'str', 'model_type': 'str', 'model_version': 'str', 'parent_ref': 'ParentReference', 'name': 'str', 'description': 'str', 'object_version': 'int', 'object_status': 'int', 'identifier': 'str', 'metadata': 'ObjectMetadata'} self.attribute_map = {'total_message_count': 'totalMessageCount', 'error_message_count': 'errorMessageCount', 'warn_message_count': 'warnMessageCount', 'info_message_count': 'infoMessageCount', 'validation_messages': 'validationMessages', 'key': 'key', 'model_type': 'modelType', 'model_version': 'modelVersion', 'parent_ref': 'parentRef', 'name': 'name', 'description': 'description', 'object_version': 'objectVersion', 'object_status': 'objectStatus', 'identifier': 'identifier', 'metadata': 'metadata'} self._total_message_count = None self._error_message_count = None self._warn_message_count = None self._info_message_count = None self._validation_messages = None self._key = None self._model_type = None self._model_version = None self._parent_ref = None self._name = None self._description = None self._object_version = None self._object_status = None self._identifier = None self._metadata = None
def __init__(self, **kwargs): '\n Initializes a new UserDefinedFunctionValidationSummary object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param total_message_count:\n The value to assign to the total_message_count property of this UserDefinedFunctionValidationSummary.\n :type total_message_count: int\n\n :param error_message_count:\n The value to assign to the error_message_count property of this UserDefinedFunctionValidationSummary.\n :type error_message_count: int\n\n :param warn_message_count:\n The value to assign to the warn_message_count property of this UserDefinedFunctionValidationSummary.\n :type warn_message_count: int\n\n :param info_message_count:\n The value to assign to the info_message_count property of this UserDefinedFunctionValidationSummary.\n :type info_message_count: int\n\n :param validation_messages:\n The value to assign to the validation_messages property of this UserDefinedFunctionValidationSummary.\n :type validation_messages: dict(str, list[ValidationMessage])\n\n :param key:\n The value to assign to the key property of this UserDefinedFunctionValidationSummary.\n :type key: str\n\n :param model_type:\n The value to assign to the model_type property of this UserDefinedFunctionValidationSummary.\n :type model_type: str\n\n :param model_version:\n The value to assign to the model_version property of this UserDefinedFunctionValidationSummary.\n :type model_version: str\n\n :param parent_ref:\n The value to assign to the parent_ref property of this UserDefinedFunctionValidationSummary.\n :type parent_ref: oci.data_integration.models.ParentReference\n\n :param name:\n The value to assign to the name property of this UserDefinedFunctionValidationSummary.\n :type name: str\n\n :param description:\n The value to assign to the description property of this UserDefinedFunctionValidationSummary.\n :type description: str\n\n :param object_version:\n The value to assign to the object_version property of this UserDefinedFunctionValidationSummary.\n :type object_version: int\n\n :param object_status:\n The value to assign to the object_status property of this UserDefinedFunctionValidationSummary.\n :type object_status: int\n\n :param identifier:\n The value to assign to the identifier property of this UserDefinedFunctionValidationSummary.\n :type identifier: str\n\n :param metadata:\n The value to assign to the metadata property of this UserDefinedFunctionValidationSummary.\n :type metadata: oci.data_integration.models.ObjectMetadata\n\n ' self.swagger_types = {'total_message_count': 'int', 'error_message_count': 'int', 'warn_message_count': 'int', 'info_message_count': 'int', 'validation_messages': 'dict(str, list[ValidationMessage])', 'key': 'str', 'model_type': 'str', 'model_version': 'str', 'parent_ref': 'ParentReference', 'name': 'str', 'description': 'str', 'object_version': 'int', 'object_status': 'int', 'identifier': 'str', 'metadata': 'ObjectMetadata'} self.attribute_map = {'total_message_count': 'totalMessageCount', 'error_message_count': 'errorMessageCount', 'warn_message_count': 'warnMessageCount', 'info_message_count': 'infoMessageCount', 'validation_messages': 'validationMessages', 'key': 'key', 'model_type': 'modelType', 'model_version': 'modelVersion', 'parent_ref': 'parentRef', 'name': 'name', 'description': 'description', 'object_version': 'objectVersion', 'object_status': 'objectStatus', 'identifier': 'identifier', 'metadata': 'metadata'} self._total_message_count = None self._error_message_count = None self._warn_message_count = None self._info_message_count = None self._validation_messages = None self._key = None self._model_type = None self._model_version = None self._parent_ref = None self._name = None self._description = None self._object_version = None self._object_status = None self._identifier = None self._metadata = None<|docstring|>Initializes a new UserDefinedFunctionValidationSummary object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param total_message_count: The value to assign to the total_message_count property of this UserDefinedFunctionValidationSummary. :type total_message_count: int :param error_message_count: The value to assign to the error_message_count property of this UserDefinedFunctionValidationSummary. :type error_message_count: int :param warn_message_count: The value to assign to the warn_message_count property of this UserDefinedFunctionValidationSummary. :type warn_message_count: int :param info_message_count: The value to assign to the info_message_count property of this UserDefinedFunctionValidationSummary. :type info_message_count: int :param validation_messages: The value to assign to the validation_messages property of this UserDefinedFunctionValidationSummary. :type validation_messages: dict(str, list[ValidationMessage]) :param key: The value to assign to the key property of this UserDefinedFunctionValidationSummary. :type key: str :param model_type: The value to assign to the model_type property of this UserDefinedFunctionValidationSummary. :type model_type: str :param model_version: The value to assign to the model_version property of this UserDefinedFunctionValidationSummary. :type model_version: str :param parent_ref: The value to assign to the parent_ref property of this UserDefinedFunctionValidationSummary. :type parent_ref: oci.data_integration.models.ParentReference :param name: The value to assign to the name property of this UserDefinedFunctionValidationSummary. :type name: str :param description: The value to assign to the description property of this UserDefinedFunctionValidationSummary. :type description: str :param object_version: The value to assign to the object_version property of this UserDefinedFunctionValidationSummary. :type object_version: int :param object_status: The value to assign to the object_status property of this UserDefinedFunctionValidationSummary. :type object_status: int :param identifier: The value to assign to the identifier property of this UserDefinedFunctionValidationSummary. :type identifier: str :param metadata: The value to assign to the metadata property of this UserDefinedFunctionValidationSummary. :type metadata: oci.data_integration.models.ObjectMetadata<|endoftext|>
fdec9f2cd6f6fb0c68f33da772cf38e4e5bb5350f24e69a2c4413e31eb890638
@property def total_message_count(self): '\n Gets the total_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation messages.\n\n\n :return: The total_message_count of this UserDefinedFunctionValidationSummary.\n :rtype: int\n ' return self._total_message_count
Gets the total_message_count of this UserDefinedFunctionValidationSummary. The total number of validation messages. :return: The total_message_count of this UserDefinedFunctionValidationSummary. :rtype: int
src/oci/data_integration/models/user_defined_function_validation_summary.py
total_message_count
ezequielramos/oci-python-sdk
249
python
@property def total_message_count(self): '\n Gets the total_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation messages.\n\n\n :return: The total_message_count of this UserDefinedFunctionValidationSummary.\n :rtype: int\n ' return self._total_message_count
@property def total_message_count(self): '\n Gets the total_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation messages.\n\n\n :return: The total_message_count of this UserDefinedFunctionValidationSummary.\n :rtype: int\n ' return self._total_message_count<|docstring|>Gets the total_message_count of this UserDefinedFunctionValidationSummary. The total number of validation messages. :return: The total_message_count of this UserDefinedFunctionValidationSummary. :rtype: int<|endoftext|>
576a98f785c79a06d52072872f82d7a85f3958f5287b3c3b1979c9bdeea545ea
@total_message_count.setter def total_message_count(self, total_message_count): '\n Sets the total_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation messages.\n\n\n :param total_message_count: The total_message_count of this UserDefinedFunctionValidationSummary.\n :type: int\n ' self._total_message_count = total_message_count
Sets the total_message_count of this UserDefinedFunctionValidationSummary. The total number of validation messages. :param total_message_count: The total_message_count of this UserDefinedFunctionValidationSummary. :type: int
src/oci/data_integration/models/user_defined_function_validation_summary.py
total_message_count
ezequielramos/oci-python-sdk
249
python
@total_message_count.setter def total_message_count(self, total_message_count): '\n Sets the total_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation messages.\n\n\n :param total_message_count: The total_message_count of this UserDefinedFunctionValidationSummary.\n :type: int\n ' self._total_message_count = total_message_count
@total_message_count.setter def total_message_count(self, total_message_count): '\n Sets the total_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation messages.\n\n\n :param total_message_count: The total_message_count of this UserDefinedFunctionValidationSummary.\n :type: int\n ' self._total_message_count = total_message_count<|docstring|>Sets the total_message_count of this UserDefinedFunctionValidationSummary. The total number of validation messages. :param total_message_count: The total_message_count of this UserDefinedFunctionValidationSummary. :type: int<|endoftext|>
0e06c555cbd23e16049c59f84430c2998b8729bd936dccae1333ddc583eed20d
@property def error_message_count(self): '\n Gets the error_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation error messages.\n\n\n :return: The error_message_count of this UserDefinedFunctionValidationSummary.\n :rtype: int\n ' return self._error_message_count
Gets the error_message_count of this UserDefinedFunctionValidationSummary. The total number of validation error messages. :return: The error_message_count of this UserDefinedFunctionValidationSummary. :rtype: int
src/oci/data_integration/models/user_defined_function_validation_summary.py
error_message_count
ezequielramos/oci-python-sdk
249
python
@property def error_message_count(self): '\n Gets the error_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation error messages.\n\n\n :return: The error_message_count of this UserDefinedFunctionValidationSummary.\n :rtype: int\n ' return self._error_message_count
@property def error_message_count(self): '\n Gets the error_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation error messages.\n\n\n :return: The error_message_count of this UserDefinedFunctionValidationSummary.\n :rtype: int\n ' return self._error_message_count<|docstring|>Gets the error_message_count of this UserDefinedFunctionValidationSummary. The total number of validation error messages. :return: The error_message_count of this UserDefinedFunctionValidationSummary. :rtype: int<|endoftext|>
ca8e46f99589df1f8eb3287eb94af8c5cb1ef1a30bf607978223186fa5ff539f
@error_message_count.setter def error_message_count(self, error_message_count): '\n Sets the error_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation error messages.\n\n\n :param error_message_count: The error_message_count of this UserDefinedFunctionValidationSummary.\n :type: int\n ' self._error_message_count = error_message_count
Sets the error_message_count of this UserDefinedFunctionValidationSummary. The total number of validation error messages. :param error_message_count: The error_message_count of this UserDefinedFunctionValidationSummary. :type: int
src/oci/data_integration/models/user_defined_function_validation_summary.py
error_message_count
ezequielramos/oci-python-sdk
249
python
@error_message_count.setter def error_message_count(self, error_message_count): '\n Sets the error_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation error messages.\n\n\n :param error_message_count: The error_message_count of this UserDefinedFunctionValidationSummary.\n :type: int\n ' self._error_message_count = error_message_count
@error_message_count.setter def error_message_count(self, error_message_count): '\n Sets the error_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation error messages.\n\n\n :param error_message_count: The error_message_count of this UserDefinedFunctionValidationSummary.\n :type: int\n ' self._error_message_count = error_message_count<|docstring|>Sets the error_message_count of this UserDefinedFunctionValidationSummary. The total number of validation error messages. :param error_message_count: The error_message_count of this UserDefinedFunctionValidationSummary. :type: int<|endoftext|>
2660fa89febc120e47e41e23a3337a597909293935b82805e6759412cfe0157c
@property def warn_message_count(self): '\n Gets the warn_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation warning messages.\n\n\n :return: The warn_message_count of this UserDefinedFunctionValidationSummary.\n :rtype: int\n ' return self._warn_message_count
Gets the warn_message_count of this UserDefinedFunctionValidationSummary. The total number of validation warning messages. :return: The warn_message_count of this UserDefinedFunctionValidationSummary. :rtype: int
src/oci/data_integration/models/user_defined_function_validation_summary.py
warn_message_count
ezequielramos/oci-python-sdk
249
python
@property def warn_message_count(self): '\n Gets the warn_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation warning messages.\n\n\n :return: The warn_message_count of this UserDefinedFunctionValidationSummary.\n :rtype: int\n ' return self._warn_message_count
@property def warn_message_count(self): '\n Gets the warn_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation warning messages.\n\n\n :return: The warn_message_count of this UserDefinedFunctionValidationSummary.\n :rtype: int\n ' return self._warn_message_count<|docstring|>Gets the warn_message_count of this UserDefinedFunctionValidationSummary. The total number of validation warning messages. :return: The warn_message_count of this UserDefinedFunctionValidationSummary. :rtype: int<|endoftext|>
4f334857f3422ff79db537fe2224f758872d89a9f3a04174ab2c68289f1d45dd
@warn_message_count.setter def warn_message_count(self, warn_message_count): '\n Sets the warn_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation warning messages.\n\n\n :param warn_message_count: The warn_message_count of this UserDefinedFunctionValidationSummary.\n :type: int\n ' self._warn_message_count = warn_message_count
Sets the warn_message_count of this UserDefinedFunctionValidationSummary. The total number of validation warning messages. :param warn_message_count: The warn_message_count of this UserDefinedFunctionValidationSummary. :type: int
src/oci/data_integration/models/user_defined_function_validation_summary.py
warn_message_count
ezequielramos/oci-python-sdk
249
python
@warn_message_count.setter def warn_message_count(self, warn_message_count): '\n Sets the warn_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation warning messages.\n\n\n :param warn_message_count: The warn_message_count of this UserDefinedFunctionValidationSummary.\n :type: int\n ' self._warn_message_count = warn_message_count
@warn_message_count.setter def warn_message_count(self, warn_message_count): '\n Sets the warn_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation warning messages.\n\n\n :param warn_message_count: The warn_message_count of this UserDefinedFunctionValidationSummary.\n :type: int\n ' self._warn_message_count = warn_message_count<|docstring|>Sets the warn_message_count of this UserDefinedFunctionValidationSummary. The total number of validation warning messages. :param warn_message_count: The warn_message_count of this UserDefinedFunctionValidationSummary. :type: int<|endoftext|>
fa47ce80751324a19fe14ec4986f61824b634b3fbb062d607fe94a90ed7cbc50
@property def info_message_count(self): '\n Gets the info_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation information messages.\n\n\n :return: The info_message_count of this UserDefinedFunctionValidationSummary.\n :rtype: int\n ' return self._info_message_count
Gets the info_message_count of this UserDefinedFunctionValidationSummary. The total number of validation information messages. :return: The info_message_count of this UserDefinedFunctionValidationSummary. :rtype: int
src/oci/data_integration/models/user_defined_function_validation_summary.py
info_message_count
ezequielramos/oci-python-sdk
249
python
@property def info_message_count(self): '\n Gets the info_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation information messages.\n\n\n :return: The info_message_count of this UserDefinedFunctionValidationSummary.\n :rtype: int\n ' return self._info_message_count
@property def info_message_count(self): '\n Gets the info_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation information messages.\n\n\n :return: The info_message_count of this UserDefinedFunctionValidationSummary.\n :rtype: int\n ' return self._info_message_count<|docstring|>Gets the info_message_count of this UserDefinedFunctionValidationSummary. The total number of validation information messages. :return: The info_message_count of this UserDefinedFunctionValidationSummary. :rtype: int<|endoftext|>
b2d45e5839c2a9b3ca8988cc1f042578b2abdaf2d39916f7e3757ef824271463
@info_message_count.setter def info_message_count(self, info_message_count): '\n Sets the info_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation information messages.\n\n\n :param info_message_count: The info_message_count of this UserDefinedFunctionValidationSummary.\n :type: int\n ' self._info_message_count = info_message_count
Sets the info_message_count of this UserDefinedFunctionValidationSummary. The total number of validation information messages. :param info_message_count: The info_message_count of this UserDefinedFunctionValidationSummary. :type: int
src/oci/data_integration/models/user_defined_function_validation_summary.py
info_message_count
ezequielramos/oci-python-sdk
249
python
@info_message_count.setter def info_message_count(self, info_message_count): '\n Sets the info_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation information messages.\n\n\n :param info_message_count: The info_message_count of this UserDefinedFunctionValidationSummary.\n :type: int\n ' self._info_message_count = info_message_count
@info_message_count.setter def info_message_count(self, info_message_count): '\n Sets the info_message_count of this UserDefinedFunctionValidationSummary.\n The total number of validation information messages.\n\n\n :param info_message_count: The info_message_count of this UserDefinedFunctionValidationSummary.\n :type: int\n ' self._info_message_count = info_message_count<|docstring|>Sets the info_message_count of this UserDefinedFunctionValidationSummary. The total number of validation information messages. :param info_message_count: The info_message_count of this UserDefinedFunctionValidationSummary. :type: int<|endoftext|>
0d8c15dfb86c08513bb6e3975aa3dfa95747080f4f0e187a398392556571506b
@property def validation_messages(self): '\n Gets the validation_messages of this UserDefinedFunctionValidationSummary.\n The detailed information of the UserDefinedFunction object validation.\n\n\n :return: The validation_messages of this UserDefinedFunctionValidationSummary.\n :rtype: dict(str, list[ValidationMessage])\n ' return self._validation_messages
Gets the validation_messages of this UserDefinedFunctionValidationSummary. The detailed information of the UserDefinedFunction object validation. :return: The validation_messages of this UserDefinedFunctionValidationSummary. :rtype: dict(str, list[ValidationMessage])
src/oci/data_integration/models/user_defined_function_validation_summary.py
validation_messages
ezequielramos/oci-python-sdk
249
python
@property def validation_messages(self): '\n Gets the validation_messages of this UserDefinedFunctionValidationSummary.\n The detailed information of the UserDefinedFunction object validation.\n\n\n :return: The validation_messages of this UserDefinedFunctionValidationSummary.\n :rtype: dict(str, list[ValidationMessage])\n ' return self._validation_messages
@property def validation_messages(self): '\n Gets the validation_messages of this UserDefinedFunctionValidationSummary.\n The detailed information of the UserDefinedFunction object validation.\n\n\n :return: The validation_messages of this UserDefinedFunctionValidationSummary.\n :rtype: dict(str, list[ValidationMessage])\n ' return self._validation_messages<|docstring|>Gets the validation_messages of this UserDefinedFunctionValidationSummary. The detailed information of the UserDefinedFunction object validation. :return: The validation_messages of this UserDefinedFunctionValidationSummary. :rtype: dict(str, list[ValidationMessage])<|endoftext|>
3542307f2e71c5c7837b25464e12715897ac91aaf4207b2043f5f5f55c5a6c63
@validation_messages.setter def validation_messages(self, validation_messages): '\n Sets the validation_messages of this UserDefinedFunctionValidationSummary.\n The detailed information of the UserDefinedFunction object validation.\n\n\n :param validation_messages: The validation_messages of this UserDefinedFunctionValidationSummary.\n :type: dict(str, list[ValidationMessage])\n ' self._validation_messages = validation_messages
Sets the validation_messages of this UserDefinedFunctionValidationSummary. The detailed information of the UserDefinedFunction object validation. :param validation_messages: The validation_messages of this UserDefinedFunctionValidationSummary. :type: dict(str, list[ValidationMessage])
src/oci/data_integration/models/user_defined_function_validation_summary.py
validation_messages
ezequielramos/oci-python-sdk
249
python
@validation_messages.setter def validation_messages(self, validation_messages): '\n Sets the validation_messages of this UserDefinedFunctionValidationSummary.\n The detailed information of the UserDefinedFunction object validation.\n\n\n :param validation_messages: The validation_messages of this UserDefinedFunctionValidationSummary.\n :type: dict(str, list[ValidationMessage])\n ' self._validation_messages = validation_messages
@validation_messages.setter def validation_messages(self, validation_messages): '\n Sets the validation_messages of this UserDefinedFunctionValidationSummary.\n The detailed information of the UserDefinedFunction object validation.\n\n\n :param validation_messages: The validation_messages of this UserDefinedFunctionValidationSummary.\n :type: dict(str, list[ValidationMessage])\n ' self._validation_messages = validation_messages<|docstring|>Sets the validation_messages of this UserDefinedFunctionValidationSummary. The detailed information of the UserDefinedFunction object validation. :param validation_messages: The validation_messages of this UserDefinedFunctionValidationSummary. :type: dict(str, list[ValidationMessage])<|endoftext|>
171aba8baf9e311c6d3a4e54ab9f678bfd9950969f5a0bd8899e5f43cb002c22
@property def key(self): '\n Gets the key of this UserDefinedFunctionValidationSummary.\n Objects will use a 36 character key as unique ID. It is system generated and cannot be modified.\n\n\n :return: The key of this UserDefinedFunctionValidationSummary.\n :rtype: str\n ' return self._key
Gets the key of this UserDefinedFunctionValidationSummary. Objects will use a 36 character key as unique ID. It is system generated and cannot be modified. :return: The key of this UserDefinedFunctionValidationSummary. :rtype: str
src/oci/data_integration/models/user_defined_function_validation_summary.py
key
ezequielramos/oci-python-sdk
249
python
@property def key(self): '\n Gets the key of this UserDefinedFunctionValidationSummary.\n Objects will use a 36 character key as unique ID. It is system generated and cannot be modified.\n\n\n :return: The key of this UserDefinedFunctionValidationSummary.\n :rtype: str\n ' return self._key
@property def key(self): '\n Gets the key of this UserDefinedFunctionValidationSummary.\n Objects will use a 36 character key as unique ID. It is system generated and cannot be modified.\n\n\n :return: The key of this UserDefinedFunctionValidationSummary.\n :rtype: str\n ' return self._key<|docstring|>Gets the key of this UserDefinedFunctionValidationSummary. Objects will use a 36 character key as unique ID. It is system generated and cannot be modified. :return: The key of this UserDefinedFunctionValidationSummary. :rtype: str<|endoftext|>
02d7c69d5960d0ed8d2ec54c2f05fca5b8304be9bc3ddde34dfb7a83ce49ced9
@key.setter def key(self, key): '\n Sets the key of this UserDefinedFunctionValidationSummary.\n Objects will use a 36 character key as unique ID. It is system generated and cannot be modified.\n\n\n :param key: The key of this UserDefinedFunctionValidationSummary.\n :type: str\n ' self._key = key
Sets the key of this UserDefinedFunctionValidationSummary. Objects will use a 36 character key as unique ID. It is system generated and cannot be modified. :param key: The key of this UserDefinedFunctionValidationSummary. :type: str
src/oci/data_integration/models/user_defined_function_validation_summary.py
key
ezequielramos/oci-python-sdk
249
python
@key.setter def key(self, key): '\n Sets the key of this UserDefinedFunctionValidationSummary.\n Objects will use a 36 character key as unique ID. It is system generated and cannot be modified.\n\n\n :param key: The key of this UserDefinedFunctionValidationSummary.\n :type: str\n ' self._key = key
@key.setter def key(self, key): '\n Sets the key of this UserDefinedFunctionValidationSummary.\n Objects will use a 36 character key as unique ID. It is system generated and cannot be modified.\n\n\n :param key: The key of this UserDefinedFunctionValidationSummary.\n :type: str\n ' self._key = key<|docstring|>Sets the key of this UserDefinedFunctionValidationSummary. Objects will use a 36 character key as unique ID. It is system generated and cannot be modified. :param key: The key of this UserDefinedFunctionValidationSummary. :type: str<|endoftext|>
2d8d706e3788b5e611d892e4a1e17ec20e0ba618cd4a10a6fac27835dce53430
@property def model_type(self): '\n Gets the model_type of this UserDefinedFunctionValidationSummary.\n The type of the object.\n\n\n :return: The model_type of this UserDefinedFunctionValidationSummary.\n :rtype: str\n ' return self._model_type
Gets the model_type of this UserDefinedFunctionValidationSummary. The type of the object. :return: The model_type of this UserDefinedFunctionValidationSummary. :rtype: str
src/oci/data_integration/models/user_defined_function_validation_summary.py
model_type
ezequielramos/oci-python-sdk
249
python
@property def model_type(self): '\n Gets the model_type of this UserDefinedFunctionValidationSummary.\n The type of the object.\n\n\n :return: The model_type of this UserDefinedFunctionValidationSummary.\n :rtype: str\n ' return self._model_type
@property def model_type(self): '\n Gets the model_type of this UserDefinedFunctionValidationSummary.\n The type of the object.\n\n\n :return: The model_type of this UserDefinedFunctionValidationSummary.\n :rtype: str\n ' return self._model_type<|docstring|>Gets the model_type of this UserDefinedFunctionValidationSummary. The type of the object. :return: The model_type of this UserDefinedFunctionValidationSummary. :rtype: str<|endoftext|>
713c1828bf2c8ac1e845e3a112645df707612ad3fd1f24b6910bf8f671c116b7
@model_type.setter def model_type(self, model_type): '\n Sets the model_type of this UserDefinedFunctionValidationSummary.\n The type of the object.\n\n\n :param model_type: The model_type of this UserDefinedFunctionValidationSummary.\n :type: str\n ' self._model_type = model_type
Sets the model_type of this UserDefinedFunctionValidationSummary. The type of the object. :param model_type: The model_type of this UserDefinedFunctionValidationSummary. :type: str
src/oci/data_integration/models/user_defined_function_validation_summary.py
model_type
ezequielramos/oci-python-sdk
249
python
@model_type.setter def model_type(self, model_type): '\n Sets the model_type of this UserDefinedFunctionValidationSummary.\n The type of the object.\n\n\n :param model_type: The model_type of this UserDefinedFunctionValidationSummary.\n :type: str\n ' self._model_type = model_type
@model_type.setter def model_type(self, model_type): '\n Sets the model_type of this UserDefinedFunctionValidationSummary.\n The type of the object.\n\n\n :param model_type: The model_type of this UserDefinedFunctionValidationSummary.\n :type: str\n ' self._model_type = model_type<|docstring|>Sets the model_type of this UserDefinedFunctionValidationSummary. The type of the object. :param model_type: The model_type of this UserDefinedFunctionValidationSummary. :type: str<|endoftext|>
3ed7b985d71d4c92135de689d42b38b7cf17f3b92bab23896bff947a4169fe75
@property def model_version(self): '\n Gets the model_version of this UserDefinedFunctionValidationSummary.\n The model version of the object.\n\n\n :return: The model_version of this UserDefinedFunctionValidationSummary.\n :rtype: str\n ' return self._model_version
Gets the model_version of this UserDefinedFunctionValidationSummary. The model version of the object. :return: The model_version of this UserDefinedFunctionValidationSummary. :rtype: str
src/oci/data_integration/models/user_defined_function_validation_summary.py
model_version
ezequielramos/oci-python-sdk
249
python
@property def model_version(self): '\n Gets the model_version of this UserDefinedFunctionValidationSummary.\n The model version of the object.\n\n\n :return: The model_version of this UserDefinedFunctionValidationSummary.\n :rtype: str\n ' return self._model_version
@property def model_version(self): '\n Gets the model_version of this UserDefinedFunctionValidationSummary.\n The model version of the object.\n\n\n :return: The model_version of this UserDefinedFunctionValidationSummary.\n :rtype: str\n ' return self._model_version<|docstring|>Gets the model_version of this UserDefinedFunctionValidationSummary. The model version of the object. :return: The model_version of this UserDefinedFunctionValidationSummary. :rtype: str<|endoftext|>
02a873e338010e6870c953f357c6009f6275ff3df9b99cf87638a5fb25b59236
@model_version.setter def model_version(self, model_version): '\n Sets the model_version of this UserDefinedFunctionValidationSummary.\n The model version of the object.\n\n\n :param model_version: The model_version of this UserDefinedFunctionValidationSummary.\n :type: str\n ' self._model_version = model_version
Sets the model_version of this UserDefinedFunctionValidationSummary. The model version of the object. :param model_version: The model_version of this UserDefinedFunctionValidationSummary. :type: str
src/oci/data_integration/models/user_defined_function_validation_summary.py
model_version
ezequielramos/oci-python-sdk
249
python
@model_version.setter def model_version(self, model_version): '\n Sets the model_version of this UserDefinedFunctionValidationSummary.\n The model version of the object.\n\n\n :param model_version: The model_version of this UserDefinedFunctionValidationSummary.\n :type: str\n ' self._model_version = model_version
@model_version.setter def model_version(self, model_version): '\n Sets the model_version of this UserDefinedFunctionValidationSummary.\n The model version of the object.\n\n\n :param model_version: The model_version of this UserDefinedFunctionValidationSummary.\n :type: str\n ' self._model_version = model_version<|docstring|>Sets the model_version of this UserDefinedFunctionValidationSummary. The model version of the object. :param model_version: The model_version of this UserDefinedFunctionValidationSummary. :type: str<|endoftext|>
1cfaa8c7377918e144e8722e8e378a1bbc4ed482afb6ff893fb7a5e0a6ee0b45
@property def parent_ref(self): '\n Gets the parent_ref of this UserDefinedFunctionValidationSummary.\n\n :return: The parent_ref of this UserDefinedFunctionValidationSummary.\n :rtype: oci.data_integration.models.ParentReference\n ' return self._parent_ref
Gets the parent_ref of this UserDefinedFunctionValidationSummary. :return: The parent_ref of this UserDefinedFunctionValidationSummary. :rtype: oci.data_integration.models.ParentReference
src/oci/data_integration/models/user_defined_function_validation_summary.py
parent_ref
ezequielramos/oci-python-sdk
249
python
@property def parent_ref(self): '\n Gets the parent_ref of this UserDefinedFunctionValidationSummary.\n\n :return: The parent_ref of this UserDefinedFunctionValidationSummary.\n :rtype: oci.data_integration.models.ParentReference\n ' return self._parent_ref
@property def parent_ref(self): '\n Gets the parent_ref of this UserDefinedFunctionValidationSummary.\n\n :return: The parent_ref of this UserDefinedFunctionValidationSummary.\n :rtype: oci.data_integration.models.ParentReference\n ' return self._parent_ref<|docstring|>Gets the parent_ref of this UserDefinedFunctionValidationSummary. :return: The parent_ref of this UserDefinedFunctionValidationSummary. :rtype: oci.data_integration.models.ParentReference<|endoftext|>
e0d0d7c16795b0ba531297f6517f26028e823817c1fab8b9dc1b4bd9fa56117e
@parent_ref.setter def parent_ref(self, parent_ref): '\n Sets the parent_ref of this UserDefinedFunctionValidationSummary.\n\n :param parent_ref: The parent_ref of this UserDefinedFunctionValidationSummary.\n :type: oci.data_integration.models.ParentReference\n ' self._parent_ref = parent_ref
Sets the parent_ref of this UserDefinedFunctionValidationSummary. :param parent_ref: The parent_ref of this UserDefinedFunctionValidationSummary. :type: oci.data_integration.models.ParentReference
src/oci/data_integration/models/user_defined_function_validation_summary.py
parent_ref
ezequielramos/oci-python-sdk
249
python
@parent_ref.setter def parent_ref(self, parent_ref): '\n Sets the parent_ref of this UserDefinedFunctionValidationSummary.\n\n :param parent_ref: The parent_ref of this UserDefinedFunctionValidationSummary.\n :type: oci.data_integration.models.ParentReference\n ' self._parent_ref = parent_ref
@parent_ref.setter def parent_ref(self, parent_ref): '\n Sets the parent_ref of this UserDefinedFunctionValidationSummary.\n\n :param parent_ref: The parent_ref of this UserDefinedFunctionValidationSummary.\n :type: oci.data_integration.models.ParentReference\n ' self._parent_ref = parent_ref<|docstring|>Sets the parent_ref of this UserDefinedFunctionValidationSummary. :param parent_ref: The parent_ref of this UserDefinedFunctionValidationSummary. :type: oci.data_integration.models.ParentReference<|endoftext|>
8fcd625d2fb4a3e881e2a2b8010c27e432fb3c92302e354fdcdafe2810db68b3
@property def name(self): '\n Gets the name of this UserDefinedFunctionValidationSummary.\n Free form text without any restriction on permitted characters. Name can have letters, numbers, and special characters. The value is editable and is restricted to 1000 characters.\n\n\n :return: The name of this UserDefinedFunctionValidationSummary.\n :rtype: str\n ' return self._name
Gets the name of this UserDefinedFunctionValidationSummary. Free form text without any restriction on permitted characters. Name can have letters, numbers, and special characters. The value is editable and is restricted to 1000 characters. :return: The name of this UserDefinedFunctionValidationSummary. :rtype: str
src/oci/data_integration/models/user_defined_function_validation_summary.py
name
ezequielramos/oci-python-sdk
249
python
@property def name(self): '\n Gets the name of this UserDefinedFunctionValidationSummary.\n Free form text without any restriction on permitted characters. Name can have letters, numbers, and special characters. The value is editable and is restricted to 1000 characters.\n\n\n :return: The name of this UserDefinedFunctionValidationSummary.\n :rtype: str\n ' return self._name
@property def name(self): '\n Gets the name of this UserDefinedFunctionValidationSummary.\n Free form text without any restriction on permitted characters. Name can have letters, numbers, and special characters. The value is editable and is restricted to 1000 characters.\n\n\n :return: The name of this UserDefinedFunctionValidationSummary.\n :rtype: str\n ' return self._name<|docstring|>Gets the name of this UserDefinedFunctionValidationSummary. Free form text without any restriction on permitted characters. Name can have letters, numbers, and special characters. The value is editable and is restricted to 1000 characters. :return: The name of this UserDefinedFunctionValidationSummary. :rtype: str<|endoftext|>
dc968158c12c7135f8a545e525b200f0c60e7edff3e71c103cb3fe1e4a29f0d4
@name.setter def name(self, name): '\n Sets the name of this UserDefinedFunctionValidationSummary.\n Free form text without any restriction on permitted characters. Name can have letters, numbers, and special characters. The value is editable and is restricted to 1000 characters.\n\n\n :param name: The name of this UserDefinedFunctionValidationSummary.\n :type: str\n ' self._name = name
Sets the name of this UserDefinedFunctionValidationSummary. Free form text without any restriction on permitted characters. Name can have letters, numbers, and special characters. The value is editable and is restricted to 1000 characters. :param name: The name of this UserDefinedFunctionValidationSummary. :type: str
src/oci/data_integration/models/user_defined_function_validation_summary.py
name
ezequielramos/oci-python-sdk
249
python
@name.setter def name(self, name): '\n Sets the name of this UserDefinedFunctionValidationSummary.\n Free form text without any restriction on permitted characters. Name can have letters, numbers, and special characters. The value is editable and is restricted to 1000 characters.\n\n\n :param name: The name of this UserDefinedFunctionValidationSummary.\n :type: str\n ' self._name = name
@name.setter def name(self, name): '\n Sets the name of this UserDefinedFunctionValidationSummary.\n Free form text without any restriction on permitted characters. Name can have letters, numbers, and special characters. The value is editable and is restricted to 1000 characters.\n\n\n :param name: The name of this UserDefinedFunctionValidationSummary.\n :type: str\n ' self._name = name<|docstring|>Sets the name of this UserDefinedFunctionValidationSummary. Free form text without any restriction on permitted characters. Name can have letters, numbers, and special characters. The value is editable and is restricted to 1000 characters. :param name: The name of this UserDefinedFunctionValidationSummary. :type: str<|endoftext|>
c8dace48a770ef9775a5380e2aac6260b864851f656608987555f97ea6989424
@property def description(self): '\n Gets the description of this UserDefinedFunctionValidationSummary.\n Detailed description for the object.\n\n\n :return: The description of this UserDefinedFunctionValidationSummary.\n :rtype: str\n ' return self._description
Gets the description of this UserDefinedFunctionValidationSummary. Detailed description for the object. :return: The description of this UserDefinedFunctionValidationSummary. :rtype: str
src/oci/data_integration/models/user_defined_function_validation_summary.py
description
ezequielramos/oci-python-sdk
249
python
@property def description(self): '\n Gets the description of this UserDefinedFunctionValidationSummary.\n Detailed description for the object.\n\n\n :return: The description of this UserDefinedFunctionValidationSummary.\n :rtype: str\n ' return self._description
@property def description(self): '\n Gets the description of this UserDefinedFunctionValidationSummary.\n Detailed description for the object.\n\n\n :return: The description of this UserDefinedFunctionValidationSummary.\n :rtype: str\n ' return self._description<|docstring|>Gets the description of this UserDefinedFunctionValidationSummary. Detailed description for the object. :return: The description of this UserDefinedFunctionValidationSummary. :rtype: str<|endoftext|>
754518b6e135d27c563fd9b40ff86c2db13f4c16e0900173b781176607d6e581
@description.setter def description(self, description): '\n Sets the description of this UserDefinedFunctionValidationSummary.\n Detailed description for the object.\n\n\n :param description: The description of this UserDefinedFunctionValidationSummary.\n :type: str\n ' self._description = description
Sets the description of this UserDefinedFunctionValidationSummary. Detailed description for the object. :param description: The description of this UserDefinedFunctionValidationSummary. :type: str
src/oci/data_integration/models/user_defined_function_validation_summary.py
description
ezequielramos/oci-python-sdk
249
python
@description.setter def description(self, description): '\n Sets the description of this UserDefinedFunctionValidationSummary.\n Detailed description for the object.\n\n\n :param description: The description of this UserDefinedFunctionValidationSummary.\n :type: str\n ' self._description = description
@description.setter def description(self, description): '\n Sets the description of this UserDefinedFunctionValidationSummary.\n Detailed description for the object.\n\n\n :param description: The description of this UserDefinedFunctionValidationSummary.\n :type: str\n ' self._description = description<|docstring|>Sets the description of this UserDefinedFunctionValidationSummary. Detailed description for the object. :param description: The description of this UserDefinedFunctionValidationSummary. :type: str<|endoftext|>
25d23f88e54b1501e0e1b119b25fac39e4d75575927d5e231753e59b7e794a71
@property def object_version(self): '\n Gets the object_version of this UserDefinedFunctionValidationSummary.\n The version of the object that is used to track changes in the object instance.\n\n\n :return: The object_version of this UserDefinedFunctionValidationSummary.\n :rtype: int\n ' return self._object_version
Gets the object_version of this UserDefinedFunctionValidationSummary. The version of the object that is used to track changes in the object instance. :return: The object_version of this UserDefinedFunctionValidationSummary. :rtype: int
src/oci/data_integration/models/user_defined_function_validation_summary.py
object_version
ezequielramos/oci-python-sdk
249
python
@property def object_version(self): '\n Gets the object_version of this UserDefinedFunctionValidationSummary.\n The version of the object that is used to track changes in the object instance.\n\n\n :return: The object_version of this UserDefinedFunctionValidationSummary.\n :rtype: int\n ' return self._object_version
@property def object_version(self): '\n Gets the object_version of this UserDefinedFunctionValidationSummary.\n The version of the object that is used to track changes in the object instance.\n\n\n :return: The object_version of this UserDefinedFunctionValidationSummary.\n :rtype: int\n ' return self._object_version<|docstring|>Gets the object_version of this UserDefinedFunctionValidationSummary. The version of the object that is used to track changes in the object instance. :return: The object_version of this UserDefinedFunctionValidationSummary. :rtype: int<|endoftext|>
63321b5bb16ba9671649d9f8a33e5ce955490ed9f2c75ef1e976c629c18a1416
@object_version.setter def object_version(self, object_version): '\n Sets the object_version of this UserDefinedFunctionValidationSummary.\n The version of the object that is used to track changes in the object instance.\n\n\n :param object_version: The object_version of this UserDefinedFunctionValidationSummary.\n :type: int\n ' self._object_version = object_version
Sets the object_version of this UserDefinedFunctionValidationSummary. The version of the object that is used to track changes in the object instance. :param object_version: The object_version of this UserDefinedFunctionValidationSummary. :type: int
src/oci/data_integration/models/user_defined_function_validation_summary.py
object_version
ezequielramos/oci-python-sdk
249
python
@object_version.setter def object_version(self, object_version): '\n Sets the object_version of this UserDefinedFunctionValidationSummary.\n The version of the object that is used to track changes in the object instance.\n\n\n :param object_version: The object_version of this UserDefinedFunctionValidationSummary.\n :type: int\n ' self._object_version = object_version
@object_version.setter def object_version(self, object_version): '\n Sets the object_version of this UserDefinedFunctionValidationSummary.\n The version of the object that is used to track changes in the object instance.\n\n\n :param object_version: The object_version of this UserDefinedFunctionValidationSummary.\n :type: int\n ' self._object_version = object_version<|docstring|>Sets the object_version of this UserDefinedFunctionValidationSummary. The version of the object that is used to track changes in the object instance. :param object_version: The object_version of this UserDefinedFunctionValidationSummary. :type: int<|endoftext|>
fc4aa37536262bd41970976c53294c10fa19fa1aad8434664496c5b6e916f5b4
@property def object_status(self): '\n Gets the object_status of this UserDefinedFunctionValidationSummary.\n The status of an object that can be set to value 1 for shallow references across objects, other values reserved.\n\n\n :return: The object_status of this UserDefinedFunctionValidationSummary.\n :rtype: int\n ' return self._object_status
Gets the object_status of this UserDefinedFunctionValidationSummary. The status of an object that can be set to value 1 for shallow references across objects, other values reserved. :return: The object_status of this UserDefinedFunctionValidationSummary. :rtype: int
src/oci/data_integration/models/user_defined_function_validation_summary.py
object_status
ezequielramos/oci-python-sdk
249
python
@property def object_status(self): '\n Gets the object_status of this UserDefinedFunctionValidationSummary.\n The status of an object that can be set to value 1 for shallow references across objects, other values reserved.\n\n\n :return: The object_status of this UserDefinedFunctionValidationSummary.\n :rtype: int\n ' return self._object_status
@property def object_status(self): '\n Gets the object_status of this UserDefinedFunctionValidationSummary.\n The status of an object that can be set to value 1 for shallow references across objects, other values reserved.\n\n\n :return: The object_status of this UserDefinedFunctionValidationSummary.\n :rtype: int\n ' return self._object_status<|docstring|>Gets the object_status of this UserDefinedFunctionValidationSummary. The status of an object that can be set to value 1 for shallow references across objects, other values reserved. :return: The object_status of this UserDefinedFunctionValidationSummary. :rtype: int<|endoftext|>
4cd725345f2184c25fc4f762aef7c5efbaa85d30057158a8fc6bd39e5d34fd2b
@object_status.setter def object_status(self, object_status): '\n Sets the object_status of this UserDefinedFunctionValidationSummary.\n The status of an object that can be set to value 1 for shallow references across objects, other values reserved.\n\n\n :param object_status: The object_status of this UserDefinedFunctionValidationSummary.\n :type: int\n ' self._object_status = object_status
Sets the object_status of this UserDefinedFunctionValidationSummary. The status of an object that can be set to value 1 for shallow references across objects, other values reserved. :param object_status: The object_status of this UserDefinedFunctionValidationSummary. :type: int
src/oci/data_integration/models/user_defined_function_validation_summary.py
object_status
ezequielramos/oci-python-sdk
249
python
@object_status.setter def object_status(self, object_status): '\n Sets the object_status of this UserDefinedFunctionValidationSummary.\n The status of an object that can be set to value 1 for shallow references across objects, other values reserved.\n\n\n :param object_status: The object_status of this UserDefinedFunctionValidationSummary.\n :type: int\n ' self._object_status = object_status
@object_status.setter def object_status(self, object_status): '\n Sets the object_status of this UserDefinedFunctionValidationSummary.\n The status of an object that can be set to value 1 for shallow references across objects, other values reserved.\n\n\n :param object_status: The object_status of this UserDefinedFunctionValidationSummary.\n :type: int\n ' self._object_status = object_status<|docstring|>Sets the object_status of this UserDefinedFunctionValidationSummary. The status of an object that can be set to value 1 for shallow references across objects, other values reserved. :param object_status: The object_status of this UserDefinedFunctionValidationSummary. :type: int<|endoftext|>
be6b0e838b8e5b35eef63514901e2ee9d858ea0a93a5faa07d2fc074e0ad1dda
@property def identifier(self): '\n Gets the identifier of this UserDefinedFunctionValidationSummary.\n Value can only contain upper case letters, underscore, and numbers. It should begin with upper case letter or underscore. The value can be modified.\n\n\n :return: The identifier of this UserDefinedFunctionValidationSummary.\n :rtype: str\n ' return self._identifier
Gets the identifier of this UserDefinedFunctionValidationSummary. Value can only contain upper case letters, underscore, and numbers. It should begin with upper case letter or underscore. The value can be modified. :return: The identifier of this UserDefinedFunctionValidationSummary. :rtype: str
src/oci/data_integration/models/user_defined_function_validation_summary.py
identifier
ezequielramos/oci-python-sdk
249
python
@property def identifier(self): '\n Gets the identifier of this UserDefinedFunctionValidationSummary.\n Value can only contain upper case letters, underscore, and numbers. It should begin with upper case letter or underscore. The value can be modified.\n\n\n :return: The identifier of this UserDefinedFunctionValidationSummary.\n :rtype: str\n ' return self._identifier
@property def identifier(self): '\n Gets the identifier of this UserDefinedFunctionValidationSummary.\n Value can only contain upper case letters, underscore, and numbers. It should begin with upper case letter or underscore. The value can be modified.\n\n\n :return: The identifier of this UserDefinedFunctionValidationSummary.\n :rtype: str\n ' return self._identifier<|docstring|>Gets the identifier of this UserDefinedFunctionValidationSummary. Value can only contain upper case letters, underscore, and numbers. It should begin with upper case letter or underscore. The value can be modified. :return: The identifier of this UserDefinedFunctionValidationSummary. :rtype: str<|endoftext|>
3d5cfecd38ea9e47d9b13829f8519c65636fc540673f64d84594ada7264c7528
@identifier.setter def identifier(self, identifier): '\n Sets the identifier of this UserDefinedFunctionValidationSummary.\n Value can only contain upper case letters, underscore, and numbers. It should begin with upper case letter or underscore. The value can be modified.\n\n\n :param identifier: The identifier of this UserDefinedFunctionValidationSummary.\n :type: str\n ' self._identifier = identifier
Sets the identifier of this UserDefinedFunctionValidationSummary. Value can only contain upper case letters, underscore, and numbers. It should begin with upper case letter or underscore. The value can be modified. :param identifier: The identifier of this UserDefinedFunctionValidationSummary. :type: str
src/oci/data_integration/models/user_defined_function_validation_summary.py
identifier
ezequielramos/oci-python-sdk
249
python
@identifier.setter def identifier(self, identifier): '\n Sets the identifier of this UserDefinedFunctionValidationSummary.\n Value can only contain upper case letters, underscore, and numbers. It should begin with upper case letter or underscore. The value can be modified.\n\n\n :param identifier: The identifier of this UserDefinedFunctionValidationSummary.\n :type: str\n ' self._identifier = identifier
@identifier.setter def identifier(self, identifier): '\n Sets the identifier of this UserDefinedFunctionValidationSummary.\n Value can only contain upper case letters, underscore, and numbers. It should begin with upper case letter or underscore. The value can be modified.\n\n\n :param identifier: The identifier of this UserDefinedFunctionValidationSummary.\n :type: str\n ' self._identifier = identifier<|docstring|>Sets the identifier of this UserDefinedFunctionValidationSummary. Value can only contain upper case letters, underscore, and numbers. It should begin with upper case letter or underscore. The value can be modified. :param identifier: The identifier of this UserDefinedFunctionValidationSummary. :type: str<|endoftext|>
e8f65a3b81cb79a439adf53aab6f94b46a5e39d3eeb4361b1c80f99b73089c99
@property def metadata(self): '\n Gets the metadata of this UserDefinedFunctionValidationSummary.\n\n :return: The metadata of this UserDefinedFunctionValidationSummary.\n :rtype: oci.data_integration.models.ObjectMetadata\n ' return self._metadata
Gets the metadata of this UserDefinedFunctionValidationSummary. :return: The metadata of this UserDefinedFunctionValidationSummary. :rtype: oci.data_integration.models.ObjectMetadata
src/oci/data_integration/models/user_defined_function_validation_summary.py
metadata
ezequielramos/oci-python-sdk
249
python
@property def metadata(self): '\n Gets the metadata of this UserDefinedFunctionValidationSummary.\n\n :return: The metadata of this UserDefinedFunctionValidationSummary.\n :rtype: oci.data_integration.models.ObjectMetadata\n ' return self._metadata
@property def metadata(self): '\n Gets the metadata of this UserDefinedFunctionValidationSummary.\n\n :return: The metadata of this UserDefinedFunctionValidationSummary.\n :rtype: oci.data_integration.models.ObjectMetadata\n ' return self._metadata<|docstring|>Gets the metadata of this UserDefinedFunctionValidationSummary. :return: The metadata of this UserDefinedFunctionValidationSummary. :rtype: oci.data_integration.models.ObjectMetadata<|endoftext|>
11b5de2f6113c45f3686dfad93a712f6b26fe0dc7a41e211330190da9eb30012
@metadata.setter def metadata(self, metadata): '\n Sets the metadata of this UserDefinedFunctionValidationSummary.\n\n :param metadata: The metadata of this UserDefinedFunctionValidationSummary.\n :type: oci.data_integration.models.ObjectMetadata\n ' self._metadata = metadata
Sets the metadata of this UserDefinedFunctionValidationSummary. :param metadata: The metadata of this UserDefinedFunctionValidationSummary. :type: oci.data_integration.models.ObjectMetadata
src/oci/data_integration/models/user_defined_function_validation_summary.py
metadata
ezequielramos/oci-python-sdk
249
python
@metadata.setter def metadata(self, metadata): '\n Sets the metadata of this UserDefinedFunctionValidationSummary.\n\n :param metadata: The metadata of this UserDefinedFunctionValidationSummary.\n :type: oci.data_integration.models.ObjectMetadata\n ' self._metadata = metadata
@metadata.setter def metadata(self, metadata): '\n Sets the metadata of this UserDefinedFunctionValidationSummary.\n\n :param metadata: The metadata of this UserDefinedFunctionValidationSummary.\n :type: oci.data_integration.models.ObjectMetadata\n ' self._metadata = metadata<|docstring|>Sets the metadata of this UserDefinedFunctionValidationSummary. :param metadata: The metadata of this UserDefinedFunctionValidationSummary. :type: oci.data_integration.models.ObjectMetadata<|endoftext|>
a11015e2c9ea8328fe67cfb6e2fa9b2c583e93a32ce66bb78d9b2e85d31ff620
@distributed_trace def list(self, **kwargs: Any) -> Iterable['_models.FrontDoorListResult']: 'Lists all of the Front Doors within an Azure subscription.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either FrontDoorListResult or the result of cls(response)\n :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.frontdoor.models.FrontDoorListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if (not next_link): request = build_list_request(subscription_id=self._config.subscription_id, template_url=self.list.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_request(subscription_id=self._config.subscription_id, template_url=next_link) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = 'GET' return request def extract_data(pipeline_response): deserialized = self._deserialize('FrontDoorListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return ((deserialized.next_link or None), iter(list_of_elem)) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged(get_next, extract_data)
Lists all of the Front Doors within an Azure subscription. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either FrontDoorListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.frontdoor.models.FrontDoorListResult] :raises: ~azure.core.exceptions.HttpResponseError
sdk/network/azure-mgmt-frontdoor/azure/mgmt/frontdoor/operations/_front_doors_operations.py
list
NateLehman/azure-sdk-for-python
1
python
@distributed_trace def list(self, **kwargs: Any) -> Iterable['_models.FrontDoorListResult']: 'Lists all of the Front Doors within an Azure subscription.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either FrontDoorListResult or the result of cls(response)\n :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.frontdoor.models.FrontDoorListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if (not next_link): request = build_list_request(subscription_id=self._config.subscription_id, template_url=self.list.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_request(subscription_id=self._config.subscription_id, template_url=next_link) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = 'GET' return request def extract_data(pipeline_response): deserialized = self._deserialize('FrontDoorListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return ((deserialized.next_link or None), iter(list_of_elem)) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged(get_next, extract_data)
@distributed_trace def list(self, **kwargs: Any) -> Iterable['_models.FrontDoorListResult']: 'Lists all of the Front Doors within an Azure subscription.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either FrontDoorListResult or the result of cls(response)\n :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.frontdoor.models.FrontDoorListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if (not next_link): request = build_list_request(subscription_id=self._config.subscription_id, template_url=self.list.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_request(subscription_id=self._config.subscription_id, template_url=next_link) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = 'GET' return request def extract_data(pipeline_response): deserialized = self._deserialize('FrontDoorListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return ((deserialized.next_link or None), iter(list_of_elem)) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged(get_next, extract_data)<|docstring|>Lists all of the Front Doors within an Azure subscription. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either FrontDoorListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.frontdoor.models.FrontDoorListResult] :raises: ~azure.core.exceptions.HttpResponseError<|endoftext|>
337e6fb39484a1e59a6e04bdab472f93669a3ad3ed8a903b7f0845bfcaedff70
@distributed_trace def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable['_models.FrontDoorListResult']: 'Lists all of the Front Doors within a resource group under a subscription.\n\n :param resource_group_name: Name of the Resource group within the Azure subscription.\n :type resource_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either FrontDoorListResult or the result of cls(response)\n :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.frontdoor.models.FrontDoorListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if (not next_link): request = build_list_by_resource_group_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, template_url=self.list_by_resource_group.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_by_resource_group_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, template_url=next_link) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = 'GET' return request def extract_data(pipeline_response): deserialized = self._deserialize('FrontDoorListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return ((deserialized.next_link or None), iter(list_of_elem)) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged(get_next, extract_data)
Lists all of the Front Doors within a resource group under a subscription. :param resource_group_name: Name of the Resource group within the Azure subscription. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either FrontDoorListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.frontdoor.models.FrontDoorListResult] :raises: ~azure.core.exceptions.HttpResponseError
sdk/network/azure-mgmt-frontdoor/azure/mgmt/frontdoor/operations/_front_doors_operations.py
list_by_resource_group
NateLehman/azure-sdk-for-python
1
python
@distributed_trace def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable['_models.FrontDoorListResult']: 'Lists all of the Front Doors within a resource group under a subscription.\n\n :param resource_group_name: Name of the Resource group within the Azure subscription.\n :type resource_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either FrontDoorListResult or the result of cls(response)\n :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.frontdoor.models.FrontDoorListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if (not next_link): request = build_list_by_resource_group_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, template_url=self.list_by_resource_group.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_by_resource_group_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, template_url=next_link) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = 'GET' return request def extract_data(pipeline_response): deserialized = self._deserialize('FrontDoorListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return ((deserialized.next_link or None), iter(list_of_elem)) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged(get_next, extract_data)
@distributed_trace def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable['_models.FrontDoorListResult']: 'Lists all of the Front Doors within a resource group under a subscription.\n\n :param resource_group_name: Name of the Resource group within the Azure subscription.\n :type resource_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either FrontDoorListResult or the result of cls(response)\n :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.frontdoor.models.FrontDoorListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if (not next_link): request = build_list_by_resource_group_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, template_url=self.list_by_resource_group.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_by_resource_group_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, template_url=next_link) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = 'GET' return request def extract_data(pipeline_response): deserialized = self._deserialize('FrontDoorListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return ((deserialized.next_link or None), iter(list_of_elem)) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged(get_next, extract_data)<|docstring|>Lists all of the Front Doors within a resource group under a subscription. :param resource_group_name: Name of the Resource group within the Azure subscription. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either FrontDoorListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.frontdoor.models.FrontDoorListResult] :raises: ~azure.core.exceptions.HttpResponseError<|endoftext|>
ea8b7b95ba5d233a0cc2330a00ee3ab1c1ab34857e08d4d11ac887c5edebdac4
@distributed_trace def get(self, resource_group_name: str, front_door_name: str, **kwargs: Any) -> '_models.FrontDoor': 'Gets a Front Door with the specified Front Door name under the specified subscription and\n resource group.\n\n :param resource_group_name: Name of the Resource group within the Azure subscription.\n :type resource_group_name: str\n :param front_door_name: Name of the Front Door which is globally unique.\n :type front_door_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: FrontDoor, or the result of cls(response)\n :rtype: ~azure.mgmt.frontdoor.models.FrontDoor\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_get_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, front_door_name=front_door_name, template_url=self.get.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('FrontDoor', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
Gets a Front Door with the specified Front Door name under the specified subscription and resource group. :param resource_group_name: Name of the Resource group within the Azure subscription. :type resource_group_name: str :param front_door_name: Name of the Front Door which is globally unique. :type front_door_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: FrontDoor, or the result of cls(response) :rtype: ~azure.mgmt.frontdoor.models.FrontDoor :raises: ~azure.core.exceptions.HttpResponseError
sdk/network/azure-mgmt-frontdoor/azure/mgmt/frontdoor/operations/_front_doors_operations.py
get
NateLehman/azure-sdk-for-python
1
python
@distributed_trace def get(self, resource_group_name: str, front_door_name: str, **kwargs: Any) -> '_models.FrontDoor': 'Gets a Front Door with the specified Front Door name under the specified subscription and\n resource group.\n\n :param resource_group_name: Name of the Resource group within the Azure subscription.\n :type resource_group_name: str\n :param front_door_name: Name of the Front Door which is globally unique.\n :type front_door_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: FrontDoor, or the result of cls(response)\n :rtype: ~azure.mgmt.frontdoor.models.FrontDoor\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_get_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, front_door_name=front_door_name, template_url=self.get.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('FrontDoor', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
@distributed_trace def get(self, resource_group_name: str, front_door_name: str, **kwargs: Any) -> '_models.FrontDoor': 'Gets a Front Door with the specified Front Door name under the specified subscription and\n resource group.\n\n :param resource_group_name: Name of the Resource group within the Azure subscription.\n :type resource_group_name: str\n :param front_door_name: Name of the Front Door which is globally unique.\n :type front_door_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: FrontDoor, or the result of cls(response)\n :rtype: ~azure.mgmt.frontdoor.models.FrontDoor\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_get_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, front_door_name=front_door_name, template_url=self.get.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('FrontDoor', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized<|docstring|>Gets a Front Door with the specified Front Door name under the specified subscription and resource group. :param resource_group_name: Name of the Resource group within the Azure subscription. :type resource_group_name: str :param front_door_name: Name of the Front Door which is globally unique. :type front_door_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: FrontDoor, or the result of cls(response) :rtype: ~azure.mgmt.frontdoor.models.FrontDoor :raises: ~azure.core.exceptions.HttpResponseError<|endoftext|>
e13e91e23ce8429029eedb6bac107a077b3faa28e1584ead91fc4395f22331ae
@distributed_trace def begin_create_or_update(self, resource_group_name: str, front_door_name: str, front_door_parameters: '_models.FrontDoor', **kwargs: Any) -> LROPoller['_models.FrontDoor']: 'Creates a new Front Door with a Front Door name under the specified subscription and resource\n group.\n\n :param resource_group_name: Name of the Resource group within the Azure subscription.\n :type resource_group_name: str\n :param front_door_name: Name of the Front Door which is globally unique.\n :type front_door_name: str\n :param front_door_parameters: Front Door properties needed to create a new Front Door.\n :type front_door_parameters: ~azure.mgmt.frontdoor.models.FrontDoor\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this\n operation to not poll, or pass in your own initialized polling object for a personal polling\n strategy.\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no\n Retry-After header is present.\n :return: An instance of LROPoller that returns either FrontDoor or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.frontdoor.models.FrontDoor]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' content_type = kwargs.pop('content_type', 'application/json') polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = self._create_or_update_initial(resource_group_name=resource_group_name, front_door_name=front_door_name, front_door_parameters=front_door_parameters, content_type=content_type, cls=(lambda x, y, z: x), **kwargs) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('FrontDoor', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if (polling is True): polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs) elif (polling is False): polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
Creates a new Front Door with a Front Door name under the specified subscription and resource group. :param resource_group_name: Name of the Resource group within the Azure subscription. :type resource_group_name: str :param front_door_name: Name of the Front Door which is globally unique. :type front_door_name: str :param front_door_parameters: Front Door properties needed to create a new Front Door. :type front_door_parameters: ~azure.mgmt.frontdoor.models.FrontDoor :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either FrontDoor or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.frontdoor.models.FrontDoor] :raises: ~azure.core.exceptions.HttpResponseError
sdk/network/azure-mgmt-frontdoor/azure/mgmt/frontdoor/operations/_front_doors_operations.py
begin_create_or_update
NateLehman/azure-sdk-for-python
1
python
@distributed_trace def begin_create_or_update(self, resource_group_name: str, front_door_name: str, front_door_parameters: '_models.FrontDoor', **kwargs: Any) -> LROPoller['_models.FrontDoor']: 'Creates a new Front Door with a Front Door name under the specified subscription and resource\n group.\n\n :param resource_group_name: Name of the Resource group within the Azure subscription.\n :type resource_group_name: str\n :param front_door_name: Name of the Front Door which is globally unique.\n :type front_door_name: str\n :param front_door_parameters: Front Door properties needed to create a new Front Door.\n :type front_door_parameters: ~azure.mgmt.frontdoor.models.FrontDoor\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this\n operation to not poll, or pass in your own initialized polling object for a personal polling\n strategy.\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no\n Retry-After header is present.\n :return: An instance of LROPoller that returns either FrontDoor or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.frontdoor.models.FrontDoor]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' content_type = kwargs.pop('content_type', 'application/json') polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = self._create_or_update_initial(resource_group_name=resource_group_name, front_door_name=front_door_name, front_door_parameters=front_door_parameters, content_type=content_type, cls=(lambda x, y, z: x), **kwargs) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('FrontDoor', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if (polling is True): polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs) elif (polling is False): polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
@distributed_trace def begin_create_or_update(self, resource_group_name: str, front_door_name: str, front_door_parameters: '_models.FrontDoor', **kwargs: Any) -> LROPoller['_models.FrontDoor']: 'Creates a new Front Door with a Front Door name under the specified subscription and resource\n group.\n\n :param resource_group_name: Name of the Resource group within the Azure subscription.\n :type resource_group_name: str\n :param front_door_name: Name of the Front Door which is globally unique.\n :type front_door_name: str\n :param front_door_parameters: Front Door properties needed to create a new Front Door.\n :type front_door_parameters: ~azure.mgmt.frontdoor.models.FrontDoor\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this\n operation to not poll, or pass in your own initialized polling object for a personal polling\n strategy.\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no\n Retry-After header is present.\n :return: An instance of LROPoller that returns either FrontDoor or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.frontdoor.models.FrontDoor]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' content_type = kwargs.pop('content_type', 'application/json') polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = self._create_or_update_initial(resource_group_name=resource_group_name, front_door_name=front_door_name, front_door_parameters=front_door_parameters, content_type=content_type, cls=(lambda x, y, z: x), **kwargs) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('FrontDoor', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if (polling is True): polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs) elif (polling is False): polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method)<|docstring|>Creates a new Front Door with a Front Door name under the specified subscription and resource group. :param resource_group_name: Name of the Resource group within the Azure subscription. :type resource_group_name: str :param front_door_name: Name of the Front Door which is globally unique. :type front_door_name: str :param front_door_parameters: Front Door properties needed to create a new Front Door. :type front_door_parameters: ~azure.mgmt.frontdoor.models.FrontDoor :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either FrontDoor or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.frontdoor.models.FrontDoor] :raises: ~azure.core.exceptions.HttpResponseError<|endoftext|>
352ba858e143ca1a47cebaccca2851eb1d35727d4485006252c7bf84c8fcd327
@distributed_trace def begin_delete(self, resource_group_name: str, front_door_name: str, **kwargs: Any) -> LROPoller[None]: 'Deletes an existing Front Door with the specified parameters.\n\n :param resource_group_name: Name of the Resource group within the Azure subscription.\n :type resource_group_name: str\n :param front_door_name: Name of the Front Door which is globally unique.\n :type front_door_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this\n operation to not poll, or pass in your own initialized polling object for a personal polling\n strategy.\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no\n Retry-After header is present.\n :return: An instance of LROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[None]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = self._delete_initial(resource_group_name=resource_group_name, front_door_name=front_door_name, cls=(lambda x, y, z: x), **kwargs) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if (polling is True): polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs) elif (polling is False): polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
Deletes an existing Front Door with the specified parameters. :param resource_group_name: Name of the Resource group within the Azure subscription. :type resource_group_name: str :param front_door_name: Name of the Front Door which is globally unique. :type front_door_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises: ~azure.core.exceptions.HttpResponseError
sdk/network/azure-mgmt-frontdoor/azure/mgmt/frontdoor/operations/_front_doors_operations.py
begin_delete
NateLehman/azure-sdk-for-python
1
python
@distributed_trace def begin_delete(self, resource_group_name: str, front_door_name: str, **kwargs: Any) -> LROPoller[None]: 'Deletes an existing Front Door with the specified parameters.\n\n :param resource_group_name: Name of the Resource group within the Azure subscription.\n :type resource_group_name: str\n :param front_door_name: Name of the Front Door which is globally unique.\n :type front_door_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this\n operation to not poll, or pass in your own initialized polling object for a personal polling\n strategy.\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no\n Retry-After header is present.\n :return: An instance of LROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[None]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = self._delete_initial(resource_group_name=resource_group_name, front_door_name=front_door_name, cls=(lambda x, y, z: x), **kwargs) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if (polling is True): polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs) elif (polling is False): polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
@distributed_trace def begin_delete(self, resource_group_name: str, front_door_name: str, **kwargs: Any) -> LROPoller[None]: 'Deletes an existing Front Door with the specified parameters.\n\n :param resource_group_name: Name of the Resource group within the Azure subscription.\n :type resource_group_name: str\n :param front_door_name: Name of the Front Door which is globally unique.\n :type front_door_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this\n operation to not poll, or pass in your own initialized polling object for a personal polling\n strategy.\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no\n Retry-After header is present.\n :return: An instance of LROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[None]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = self._delete_initial(resource_group_name=resource_group_name, front_door_name=front_door_name, cls=(lambda x, y, z: x), **kwargs) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if (polling is True): polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs) elif (polling is False): polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method)<|docstring|>Deletes an existing Front Door with the specified parameters. :param resource_group_name: Name of the Resource group within the Azure subscription. :type resource_group_name: str :param front_door_name: Name of the Front Door which is globally unique. :type front_door_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises: ~azure.core.exceptions.HttpResponseError<|endoftext|>
ba06a7e9798a32824ce3b1af6d3a06e0824e01d3523ebcca9695faf64aa7ba1d
@distributed_trace def validate_custom_domain(self, resource_group_name: str, front_door_name: str, custom_domain_properties: '_models.ValidateCustomDomainInput', **kwargs: Any) -> '_models.ValidateCustomDomainOutput': 'Validates the custom domain mapping to ensure it maps to the correct Front Door endpoint in\n DNS.\n\n :param resource_group_name: Name of the Resource group within the Azure subscription.\n :type resource_group_name: str\n :param front_door_name: Name of the Front Door which is globally unique.\n :type front_door_name: str\n :param custom_domain_properties: Custom domain to be validated.\n :type custom_domain_properties: ~azure.mgmt.frontdoor.models.ValidateCustomDomainInput\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ValidateCustomDomainOutput, or the result of cls(response)\n :rtype: ~azure.mgmt.frontdoor.models.ValidateCustomDomainOutput\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', 'application/json') _json = self._serialize.body(custom_domain_properties, 'ValidateCustomDomainInput') request = build_validate_custom_domain_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, front_door_name=front_door_name, content_type=content_type, json=_json, template_url=self.validate_custom_domain.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('ValidateCustomDomainOutput', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
Validates the custom domain mapping to ensure it maps to the correct Front Door endpoint in DNS. :param resource_group_name: Name of the Resource group within the Azure subscription. :type resource_group_name: str :param front_door_name: Name of the Front Door which is globally unique. :type front_door_name: str :param custom_domain_properties: Custom domain to be validated. :type custom_domain_properties: ~azure.mgmt.frontdoor.models.ValidateCustomDomainInput :keyword callable cls: A custom type or function that will be passed the direct response :return: ValidateCustomDomainOutput, or the result of cls(response) :rtype: ~azure.mgmt.frontdoor.models.ValidateCustomDomainOutput :raises: ~azure.core.exceptions.HttpResponseError
sdk/network/azure-mgmt-frontdoor/azure/mgmt/frontdoor/operations/_front_doors_operations.py
validate_custom_domain
NateLehman/azure-sdk-for-python
1
python
@distributed_trace def validate_custom_domain(self, resource_group_name: str, front_door_name: str, custom_domain_properties: '_models.ValidateCustomDomainInput', **kwargs: Any) -> '_models.ValidateCustomDomainOutput': 'Validates the custom domain mapping to ensure it maps to the correct Front Door endpoint in\n DNS.\n\n :param resource_group_name: Name of the Resource group within the Azure subscription.\n :type resource_group_name: str\n :param front_door_name: Name of the Front Door which is globally unique.\n :type front_door_name: str\n :param custom_domain_properties: Custom domain to be validated.\n :type custom_domain_properties: ~azure.mgmt.frontdoor.models.ValidateCustomDomainInput\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ValidateCustomDomainOutput, or the result of cls(response)\n :rtype: ~azure.mgmt.frontdoor.models.ValidateCustomDomainOutput\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', 'application/json') _json = self._serialize.body(custom_domain_properties, 'ValidateCustomDomainInput') request = build_validate_custom_domain_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, front_door_name=front_door_name, content_type=content_type, json=_json, template_url=self.validate_custom_domain.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('ValidateCustomDomainOutput', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
@distributed_trace def validate_custom_domain(self, resource_group_name: str, front_door_name: str, custom_domain_properties: '_models.ValidateCustomDomainInput', **kwargs: Any) -> '_models.ValidateCustomDomainOutput': 'Validates the custom domain mapping to ensure it maps to the correct Front Door endpoint in\n DNS.\n\n :param resource_group_name: Name of the Resource group within the Azure subscription.\n :type resource_group_name: str\n :param front_door_name: Name of the Front Door which is globally unique.\n :type front_door_name: str\n :param custom_domain_properties: Custom domain to be validated.\n :type custom_domain_properties: ~azure.mgmt.frontdoor.models.ValidateCustomDomainInput\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ValidateCustomDomainOutput, or the result of cls(response)\n :rtype: ~azure.mgmt.frontdoor.models.ValidateCustomDomainOutput\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', 'application/json') _json = self._serialize.body(custom_domain_properties, 'ValidateCustomDomainInput') request = build_validate_custom_domain_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, front_door_name=front_door_name, content_type=content_type, json=_json, template_url=self.validate_custom_domain.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('ValidateCustomDomainOutput', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized<|docstring|>Validates the custom domain mapping to ensure it maps to the correct Front Door endpoint in DNS. :param resource_group_name: Name of the Resource group within the Azure subscription. :type resource_group_name: str :param front_door_name: Name of the Front Door which is globally unique. :type front_door_name: str :param custom_domain_properties: Custom domain to be validated. :type custom_domain_properties: ~azure.mgmt.frontdoor.models.ValidateCustomDomainInput :keyword callable cls: A custom type or function that will be passed the direct response :return: ValidateCustomDomainOutput, or the result of cls(response) :rtype: ~azure.mgmt.frontdoor.models.ValidateCustomDomainOutput :raises: ~azure.core.exceptions.HttpResponseError<|endoftext|>
dc92024fdb3ab3b39b49f23078948c4e1c598ca9b360a1de9380dd03013bcacd
def run_ridge(data, subjects_data, cv=10, alphas=None, train_sizes=None, n_jobs=None): 'Run ridge resgression.\n\n Parameters\n ----------\n data : pandas.DataFrame\n Features to be used for predictions.\n\n subjects_data : pandas.DataFrame\n Information about subjects from CamCAN dataset.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross validation,\n - integer, to specify the number of folds in a `(Stratified)KFold`,\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the estimator is a classifier and ``y`` is\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\n other cases, :class:`KFold` is used.\n\n alphas : numpy.ndarray\n Values for parameter alpha to be tested. Default is\n np.logspace(start=-3, stop=1, num=50, base=10.0).\n\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the dtype is float, it is regarded as a\n fraction of the maximum size of the training set (that is determined\n by the selected validation method), i.e. it has to be within (0, 1].\n Otherwise it is interpreted as absolute sizes of the training sets.\n Note that for classification the number of samples usually have to\n be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n\n n_jobs : int or None, optional (default=None)\n The number of CPUs to use to do the computation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n ' if (alphas is None): alphas = np.logspace((- 3), 5, 100) if (train_sizes is None): train_sizes = np.linspace(0.1, 1.0, 5) subjects = data.index.values y = subjects_data.loc[subjects]['age'] reg = make_pipeline(StandardScaler(), RidgeCV(alphas)) cv = check_cv(cv) index = data.index mae = list() r2 = list() df_pred = pd.DataFrame(columns=['fold', 'y_pred'], index=subjects, dtype=float) for (ii, (train, test)) in enumerate(cv.split(data.values)): train_mask = (~ data.loc[index[train]].isna().values[(:, 0)]) test_mask = (~ data.loc[index[test]].isna().values[(:, 0)]) X_train = data.loc[index[train]].values[train_mask] X_test = data.loc[index[test]].values[test_mask] y_train = y.loc[index[train]].values[train_mask] y_test = y.loc[index[test]].values[test_mask] reg.fit(X_train, y_train) y_pred = reg.predict(X_test) mae.append((- mean_absolute_error(y_true=y_test, y_pred=y_pred))) r2.append(r2_score(y_true=y_test, y_pred=y_pred)) df_pred.loc[(index[test[test_mask]], 'y_pred')] = y_pred df_pred.loc[(index[test], 'fold')] = ii mask = (~ data.isna().values[(:, 0)]) (train_sizes, train_scores, test_scores) = learning_curve(reg, data[mask].values, y[mask].values, cv=cv, train_sizes=train_sizes, scoring='neg_mean_absolute_error', n_jobs=n_jobs) return (df_pred, np.array(mae), np.array(r2), train_sizes, train_scores, test_scores)
Run ridge resgression. Parameters ---------- data : pandas.DataFrame Features to be used for predictions. subjects_data : pandas.DataFrame Information about subjects from CamCAN dataset. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. alphas : numpy.ndarray Values for parameter alpha to be tested. Default is np.logspace(start=-3, stop=1, num=50, base=10.0). train_sizes : array-like, shape (n_ticks,), dtype float or int Relative or absolute numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of the maximum size of the training set (that is determined by the selected validation method), i.e. it has to be within (0, 1]. Otherwise it is interpreted as absolute sizes of the training sets. Note that for classification the number of samples usually have to be big enough to contain at least one sample from each class. (default: np.linspace(0.1, 1.0, 5)) n_jobs : int or None, optional (default=None) The number of CPUs to use to do the computation. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details.
camcan/utils/evaluation.py
run_ridge
dengemann/engemann-2020-multimodal-brain-age
6
python
def run_ridge(data, subjects_data, cv=10, alphas=None, train_sizes=None, n_jobs=None): 'Run ridge resgression.\n\n Parameters\n ----------\n data : pandas.DataFrame\n Features to be used for predictions.\n\n subjects_data : pandas.DataFrame\n Information about subjects from CamCAN dataset.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross validation,\n - integer, to specify the number of folds in a `(Stratified)KFold`,\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the estimator is a classifier and ``y`` is\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\n other cases, :class:`KFold` is used.\n\n alphas : numpy.ndarray\n Values for parameter alpha to be tested. Default is\n np.logspace(start=-3, stop=1, num=50, base=10.0).\n\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the dtype is float, it is regarded as a\n fraction of the maximum size of the training set (that is determined\n by the selected validation method), i.e. it has to be within (0, 1].\n Otherwise it is interpreted as absolute sizes of the training sets.\n Note that for classification the number of samples usually have to\n be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n\n n_jobs : int or None, optional (default=None)\n The number of CPUs to use to do the computation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n ' if (alphas is None): alphas = np.logspace((- 3), 5, 100) if (train_sizes is None): train_sizes = np.linspace(0.1, 1.0, 5) subjects = data.index.values y = subjects_data.loc[subjects]['age'] reg = make_pipeline(StandardScaler(), RidgeCV(alphas)) cv = check_cv(cv) index = data.index mae = list() r2 = list() df_pred = pd.DataFrame(columns=['fold', 'y_pred'], index=subjects, dtype=float) for (ii, (train, test)) in enumerate(cv.split(data.values)): train_mask = (~ data.loc[index[train]].isna().values[(:, 0)]) test_mask = (~ data.loc[index[test]].isna().values[(:, 0)]) X_train = data.loc[index[train]].values[train_mask] X_test = data.loc[index[test]].values[test_mask] y_train = y.loc[index[train]].values[train_mask] y_test = y.loc[index[test]].values[test_mask] reg.fit(X_train, y_train) y_pred = reg.predict(X_test) mae.append((- mean_absolute_error(y_true=y_test, y_pred=y_pred))) r2.append(r2_score(y_true=y_test, y_pred=y_pred)) df_pred.loc[(index[test[test_mask]], 'y_pred')] = y_pred df_pred.loc[(index[test], 'fold')] = ii mask = (~ data.isna().values[(:, 0)]) (train_sizes, train_scores, test_scores) = learning_curve(reg, data[mask].values, y[mask].values, cv=cv, train_sizes=train_sizes, scoring='neg_mean_absolute_error', n_jobs=n_jobs) return (df_pred, np.array(mae), np.array(r2), train_sizes, train_scores, test_scores)
def run_ridge(data, subjects_data, cv=10, alphas=None, train_sizes=None, n_jobs=None): 'Run ridge resgression.\n\n Parameters\n ----------\n data : pandas.DataFrame\n Features to be used for predictions.\n\n subjects_data : pandas.DataFrame\n Information about subjects from CamCAN dataset.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross validation,\n - integer, to specify the number of folds in a `(Stratified)KFold`,\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the estimator is a classifier and ``y`` is\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\n other cases, :class:`KFold` is used.\n\n alphas : numpy.ndarray\n Values for parameter alpha to be tested. Default is\n np.logspace(start=-3, stop=1, num=50, base=10.0).\n\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the dtype is float, it is regarded as a\n fraction of the maximum size of the training set (that is determined\n by the selected validation method), i.e. it has to be within (0, 1].\n Otherwise it is interpreted as absolute sizes of the training sets.\n Note that for classification the number of samples usually have to\n be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n\n n_jobs : int or None, optional (default=None)\n The number of CPUs to use to do the computation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n ' if (alphas is None): alphas = np.logspace((- 3), 5, 100) if (train_sizes is None): train_sizes = np.linspace(0.1, 1.0, 5) subjects = data.index.values y = subjects_data.loc[subjects]['age'] reg = make_pipeline(StandardScaler(), RidgeCV(alphas)) cv = check_cv(cv) index = data.index mae = list() r2 = list() df_pred = pd.DataFrame(columns=['fold', 'y_pred'], index=subjects, dtype=float) for (ii, (train, test)) in enumerate(cv.split(data.values)): train_mask = (~ data.loc[index[train]].isna().values[(:, 0)]) test_mask = (~ data.loc[index[test]].isna().values[(:, 0)]) X_train = data.loc[index[train]].values[train_mask] X_test = data.loc[index[test]].values[test_mask] y_train = y.loc[index[train]].values[train_mask] y_test = y.loc[index[test]].values[test_mask] reg.fit(X_train, y_train) y_pred = reg.predict(X_test) mae.append((- mean_absolute_error(y_true=y_test, y_pred=y_pred))) r2.append(r2_score(y_true=y_test, y_pred=y_pred)) df_pred.loc[(index[test[test_mask]], 'y_pred')] = y_pred df_pred.loc[(index[test], 'fold')] = ii mask = (~ data.isna().values[(:, 0)]) (train_sizes, train_scores, test_scores) = learning_curve(reg, data[mask].values, y[mask].values, cv=cv, train_sizes=train_sizes, scoring='neg_mean_absolute_error', n_jobs=n_jobs) return (df_pred, np.array(mae), np.array(r2), train_sizes, train_scores, test_scores)<|docstring|>Run ridge resgression. Parameters ---------- data : pandas.DataFrame Features to be used for predictions. subjects_data : pandas.DataFrame Information about subjects from CamCAN dataset. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. alphas : numpy.ndarray Values for parameter alpha to be tested. Default is np.logspace(start=-3, stop=1, num=50, base=10.0). train_sizes : array-like, shape (n_ticks,), dtype float or int Relative or absolute numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of the maximum size of the training set (that is determined by the selected validation method), i.e. it has to be within (0, 1]. Otherwise it is interpreted as absolute sizes of the training sets. Note that for classification the number of samples usually have to be big enough to contain at least one sample from each class. (default: np.linspace(0.1, 1.0, 5)) n_jobs : int or None, optional (default=None) The number of CPUs to use to do the computation. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details.<|endoftext|>
7d6081ffda9dd50acc82013cd574ee11885aa0356eb4b99b86edd3b01267b9c0
def run_meg_spoc(data, subjects_data, cv=10, alphas=None, train_sizes=None, fbands=None, n_jobs=None): 'Run ridge resgression on MEG data with SPoC.\n\n Parameters\n ----------\n data : pandas.DataFrame\n Features to be used for predictions.\n\n subjects_data : pandas.DataFrame\n Information about subjects from CamCAN dataset.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross validation,\n - integer, to specify the number of folds in a `(Stratified)KFold`,\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the estimator is a classifier and ``y`` is\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\n other cases, :class:`KFold` is used.\n\n alphas : numpy.ndarray\n Values for parameter alpha to be tested using RidgeCV. Default is\n np.logspace(start=-3, stop=1, num=50, base=10.0).\n\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the dtype is float, it is regarded as a\n fraction of the maximum size of the training set (that is determined\n by the selected validation method), i.e. it has to be within (0, 1].\n Otherwise it is interpreted as absolute sizes of the training sets.\n Note that for classification the number of samples usually have to\n be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n\n fbands : [(float, float)]\n List of frequency bands to be checked with SPoC.\n\n n_jobs : int or None, optional (default=None)\n The number of CPUs to use to do the computation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n ' if (alphas is None): alphas = np.logspace((- 3), 5, 100) if (train_sizes is None): train_sizes = np.linspace(0.1, 1.0, 5) data_path = sample.data_path() raw_fname = path.join(data_path, 'MEG/sample/sample_audvis_filt-0-40_raw.fif') raw = mne.io.read_raw_fif(raw_fname) info = raw.info picks = mne.pick_types(info, meg='mag') subjects = [d['subject'] for d in data if ('subject' in d)] covs = np.array(tuple((d['covs'][(:, picks)][(:, :, picks)] for d in data if ('subject' in d)))) y = subjects_data.loc[subjects].age.values X = np.arange(len(y)) spoc = SPoC(covs=covs, fbands=fbands, spoc=True, n_components=len(picks), alpha=0.01) reg = make_pipeline(spoc, StandardScaler(), RidgeCV(alphas)) cv_ss = ShuffleSplit(n_splits=cv, random_state=42) mae = cross_val_score(reg, X, y, scoring='neg_mean_absolute_error', cv=cv_ss, n_jobs=n_jobs) r2 = cross_val_score(reg, X, y, scoring='r2', cv=cv_ss, n_jobs=n_jobs) y_pred = cross_val_predict(reg, X, y, cv=cv, n_jobs=n_jobs) (train_sizes, train_scores, test_scores) = learning_curve(reg, X, y, cv=cv_ss, train_sizes=train_sizes, scoring='neg_mean_absolute_error', n_jobs=n_jobs) df_pred = pd.DataFrame(y_pred, index=subjects, dtype=float) return (df_pred, mae, r2, train_sizes, train_scores, test_scores)
Run ridge resgression on MEG data with SPoC. Parameters ---------- data : pandas.DataFrame Features to be used for predictions. subjects_data : pandas.DataFrame Information about subjects from CamCAN dataset. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. alphas : numpy.ndarray Values for parameter alpha to be tested using RidgeCV. Default is np.logspace(start=-3, stop=1, num=50, base=10.0). train_sizes : array-like, shape (n_ticks,), dtype float or int Relative or absolute numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of the maximum size of the training set (that is determined by the selected validation method), i.e. it has to be within (0, 1]. Otherwise it is interpreted as absolute sizes of the training sets. Note that for classification the number of samples usually have to be big enough to contain at least one sample from each class. (default: np.linspace(0.1, 1.0, 5)) fbands : [(float, float)] List of frequency bands to be checked with SPoC. n_jobs : int or None, optional (default=None) The number of CPUs to use to do the computation. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details.
camcan/utils/evaluation.py
run_meg_spoc
dengemann/engemann-2020-multimodal-brain-age
6
python
def run_meg_spoc(data, subjects_data, cv=10, alphas=None, train_sizes=None, fbands=None, n_jobs=None): 'Run ridge resgression on MEG data with SPoC.\n\n Parameters\n ----------\n data : pandas.DataFrame\n Features to be used for predictions.\n\n subjects_data : pandas.DataFrame\n Information about subjects from CamCAN dataset.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross validation,\n - integer, to specify the number of folds in a `(Stratified)KFold`,\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the estimator is a classifier and ``y`` is\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\n other cases, :class:`KFold` is used.\n\n alphas : numpy.ndarray\n Values for parameter alpha to be tested using RidgeCV. Default is\n np.logspace(start=-3, stop=1, num=50, base=10.0).\n\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the dtype is float, it is regarded as a\n fraction of the maximum size of the training set (that is determined\n by the selected validation method), i.e. it has to be within (0, 1].\n Otherwise it is interpreted as absolute sizes of the training sets.\n Note that for classification the number of samples usually have to\n be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n\n fbands : [(float, float)]\n List of frequency bands to be checked with SPoC.\n\n n_jobs : int or None, optional (default=None)\n The number of CPUs to use to do the computation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n ' if (alphas is None): alphas = np.logspace((- 3), 5, 100) if (train_sizes is None): train_sizes = np.linspace(0.1, 1.0, 5) data_path = sample.data_path() raw_fname = path.join(data_path, 'MEG/sample/sample_audvis_filt-0-40_raw.fif') raw = mne.io.read_raw_fif(raw_fname) info = raw.info picks = mne.pick_types(info, meg='mag') subjects = [d['subject'] for d in data if ('subject' in d)] covs = np.array(tuple((d['covs'][(:, picks)][(:, :, picks)] for d in data if ('subject' in d)))) y = subjects_data.loc[subjects].age.values X = np.arange(len(y)) spoc = SPoC(covs=covs, fbands=fbands, spoc=True, n_components=len(picks), alpha=0.01) reg = make_pipeline(spoc, StandardScaler(), RidgeCV(alphas)) cv_ss = ShuffleSplit(n_splits=cv, random_state=42) mae = cross_val_score(reg, X, y, scoring='neg_mean_absolute_error', cv=cv_ss, n_jobs=n_jobs) r2 = cross_val_score(reg, X, y, scoring='r2', cv=cv_ss, n_jobs=n_jobs) y_pred = cross_val_predict(reg, X, y, cv=cv, n_jobs=n_jobs) (train_sizes, train_scores, test_scores) = learning_curve(reg, X, y, cv=cv_ss, train_sizes=train_sizes, scoring='neg_mean_absolute_error', n_jobs=n_jobs) df_pred = pd.DataFrame(y_pred, index=subjects, dtype=float) return (df_pred, mae, r2, train_sizes, train_scores, test_scores)
def run_meg_spoc(data, subjects_data, cv=10, alphas=None, train_sizes=None, fbands=None, n_jobs=None): 'Run ridge resgression on MEG data with SPoC.\n\n Parameters\n ----------\n data : pandas.DataFrame\n Features to be used for predictions.\n\n subjects_data : pandas.DataFrame\n Information about subjects from CamCAN dataset.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross validation,\n - integer, to specify the number of folds in a `(Stratified)KFold`,\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the estimator is a classifier and ``y`` is\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\n other cases, :class:`KFold` is used.\n\n alphas : numpy.ndarray\n Values for parameter alpha to be tested using RidgeCV. Default is\n np.logspace(start=-3, stop=1, num=50, base=10.0).\n\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the dtype is float, it is regarded as a\n fraction of the maximum size of the training set (that is determined\n by the selected validation method), i.e. it has to be within (0, 1].\n Otherwise it is interpreted as absolute sizes of the training sets.\n Note that for classification the number of samples usually have to\n be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n\n fbands : [(float, float)]\n List of frequency bands to be checked with SPoC.\n\n n_jobs : int or None, optional (default=None)\n The number of CPUs to use to do the computation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n ' if (alphas is None): alphas = np.logspace((- 3), 5, 100) if (train_sizes is None): train_sizes = np.linspace(0.1, 1.0, 5) data_path = sample.data_path() raw_fname = path.join(data_path, 'MEG/sample/sample_audvis_filt-0-40_raw.fif') raw = mne.io.read_raw_fif(raw_fname) info = raw.info picks = mne.pick_types(info, meg='mag') subjects = [d['subject'] for d in data if ('subject' in d)] covs = np.array(tuple((d['covs'][(:, picks)][(:, :, picks)] for d in data if ('subject' in d)))) y = subjects_data.loc[subjects].age.values X = np.arange(len(y)) spoc = SPoC(covs=covs, fbands=fbands, spoc=True, n_components=len(picks), alpha=0.01) reg = make_pipeline(spoc, StandardScaler(), RidgeCV(alphas)) cv_ss = ShuffleSplit(n_splits=cv, random_state=42) mae = cross_val_score(reg, X, y, scoring='neg_mean_absolute_error', cv=cv_ss, n_jobs=n_jobs) r2 = cross_val_score(reg, X, y, scoring='r2', cv=cv_ss, n_jobs=n_jobs) y_pred = cross_val_predict(reg, X, y, cv=cv, n_jobs=n_jobs) (train_sizes, train_scores, test_scores) = learning_curve(reg, X, y, cv=cv_ss, train_sizes=train_sizes, scoring='neg_mean_absolute_error', n_jobs=n_jobs) df_pred = pd.DataFrame(y_pred, index=subjects, dtype=float) return (df_pred, mae, r2, train_sizes, train_scores, test_scores)<|docstring|>Run ridge resgression on MEG data with SPoC. Parameters ---------- data : pandas.DataFrame Features to be used for predictions. subjects_data : pandas.DataFrame Information about subjects from CamCAN dataset. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. alphas : numpy.ndarray Values for parameter alpha to be tested using RidgeCV. Default is np.logspace(start=-3, stop=1, num=50, base=10.0). train_sizes : array-like, shape (n_ticks,), dtype float or int Relative or absolute numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of the maximum size of the training set (that is determined by the selected validation method), i.e. it has to be within (0, 1]. Otherwise it is interpreted as absolute sizes of the training sets. Note that for classification the number of samples usually have to be big enough to contain at least one sample from each class. (default: np.linspace(0.1, 1.0, 5)) fbands : [(float, float)] List of frequency bands to be checked with SPoC. n_jobs : int or None, optional (default=None) The number of CPUs to use to do the computation. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details.<|endoftext|>
9c26592accc4ada56031d2e2ee96d969cf8b3b3afe53de07ec9879b69a69cc88
def run_stacking_spoc(named_data, subjects_data, cv=10, alphas=None, train_sizes=None, fbands=None, n_jobs=None): 'Run stacking.\n\n Parameters\n ----------\n named_data : list(tuple(str, pandas.DataFrame))\n List of tuples (name, data) with name and corresponding features\n to be used for predictions by linear models.\n\n subjects_data : pandas.DataFrame\n Information about subjects from CamCAN dataset.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross validation,\n - integer, to specify the number of folds in a `(Stratified)KFold`,\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the estimator is a classifier and ``y`` is\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\n other cases, :class:`KFold` is used.\n\n alphas : numpy.ndarray\n Values for parameter alpha to be tested. Default is\n np.logspace(start=-3, stop=1, num=50, base=10.0).\n\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the dtype is float, it is regarded as a\n fraction of the maximum size of the training set (that is determined\n by the selected validation method), i.e. it has to be within (0, 1].\n Otherwise it is interpreted as absolute sizes of the training sets.\n Note that for classification the number of samples usually have to\n be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n\n fbands : [(float, float)]\n List of frequency bands to be checked with SPoC.\n\n n_jobs : int or None, optional (default=None)\n The number of CPUs to use to do the computation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n ' if (alphas is None): alphas = np.logspace((- 3), 5, 100) if (train_sizes is None): train_sizes = np.linspace(0.1, 1.0, 5) rnd_state = 42 names = [] combined_data = [] meg_data = None for (name, data) in named_data: names.append(name) if (name == 'meg'): meg_data = data meg_subjects = tuple((d['subject'] for d in data if ('subject' in d))) pseudo_data = np.arange(len(meg_subjects)) combined_data.append(pd.DataFrame(pseudo_data, index=meg_subjects)) else: combined_data.append(data) data = pd.concat(combined_data, axis=1, join='inner') feature_col_lens = tuple((d.shape[1] for d in combined_data)) estimators = [] subjects = data.index.values for (i_data, _) in enumerate(named_data): feature_transformers = [] ft_begin = 0 ft_end = 0 for (i_ct, (name, col_len)) in enumerate(zip(names, feature_col_lens)): trans_name = (('pass_' if (i_data == i_ct) else 'drop_') + name) transformer = ('passthrough' if (i_data == i_ct) else 'drop') ft_end = (ft_end + col_len) trans_slice = slice(ft_begin, ft_end) ft_begin = (ft_begin + col_len) feature_transformers.append((trans_name, transformer, trans_slice)) est_name = ('reg_' + named_data[i_data][0]) if (est_name == 'reg_meg'): if (fbands is None): raise ValueError('fbands should be given for MEG classifier.') data_path = sample.data_path() raw_fname = path.join(data_path, 'MEG/sample', 'sample_audvis_filt-0-40_raw.fif') raw = mne.io.read_raw_fif(raw_fname) info = raw.info picks = mne.pick_types(info, meg='mag') covs = np.array(tuple((d['covs'][(:, picks)][(:, :, picks)] for d in meg_data if ('subject' in d)))) spoc = SPoC(covs=covs, fbands=fbands, spoc=True, n_components=len(picks), alpha=0.01) est_pipeline = make_pipeline(ColumnTransformer(feature_transformers), spoc, StandardScaler(), RidgeCV(alphas)) else: est_pipeline = make_pipeline(ColumnTransformer(feature_transformers), StandardScaler(), RidgeCV(alphas)) estimators.append((est_name, est_pipeline)) final_estimator = RandomForestRegressor(n_estimators=100, random_state=rnd_state, oob_score=True) reg = StackingRegressor(estimators=estimators, final_estimator=final_estimator, cv=cv, random_state=rnd_state, n_jobs=n_jobs) y = subjects_data.loc[subjects].age.values X = data.values cv = check_cv(cv) mae = cross_val_score(reg, X, y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=n_jobs) r2 = cross_val_score(reg, X, y, scoring='r2', cv=cv, n_jobs=n_jobs) y_pred = cross_val_predict(reg, X, y, cv=cv, n_jobs=n_jobs) (train_sizes, train_scores, test_scores) = learning_curve(reg, X, y, cv=cv, train_sizes=train_sizes, scoring='neg_mean_absolute_error', n_jobs=n_jobs) fold = _get_fold_indices(cv, X, y) df_pred = pd.DataFrame(dict(y=y_pred, fold=fold), index=subjects, dtype=float) return (df_pred, mae, r2, train_sizes, train_scores, test_scores)
Run stacking. Parameters ---------- named_data : list(tuple(str, pandas.DataFrame)) List of tuples (name, data) with name and corresponding features to be used for predictions by linear models. subjects_data : pandas.DataFrame Information about subjects from CamCAN dataset. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. alphas : numpy.ndarray Values for parameter alpha to be tested. Default is np.logspace(start=-3, stop=1, num=50, base=10.0). train_sizes : array-like, shape (n_ticks,), dtype float or int Relative or absolute numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of the maximum size of the training set (that is determined by the selected validation method), i.e. it has to be within (0, 1]. Otherwise it is interpreted as absolute sizes of the training sets. Note that for classification the number of samples usually have to be big enough to contain at least one sample from each class. (default: np.linspace(0.1, 1.0, 5)) fbands : [(float, float)] List of frequency bands to be checked with SPoC. n_jobs : int or None, optional (default=None) The number of CPUs to use to do the computation. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details.
camcan/utils/evaluation.py
run_stacking_spoc
dengemann/engemann-2020-multimodal-brain-age
6
python
def run_stacking_spoc(named_data, subjects_data, cv=10, alphas=None, train_sizes=None, fbands=None, n_jobs=None): 'Run stacking.\n\n Parameters\n ----------\n named_data : list(tuple(str, pandas.DataFrame))\n List of tuples (name, data) with name and corresponding features\n to be used for predictions by linear models.\n\n subjects_data : pandas.DataFrame\n Information about subjects from CamCAN dataset.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross validation,\n - integer, to specify the number of folds in a `(Stratified)KFold`,\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the estimator is a classifier and ``y`` is\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\n other cases, :class:`KFold` is used.\n\n alphas : numpy.ndarray\n Values for parameter alpha to be tested. Default is\n np.logspace(start=-3, stop=1, num=50, base=10.0).\n\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the dtype is float, it is regarded as a\n fraction of the maximum size of the training set (that is determined\n by the selected validation method), i.e. it has to be within (0, 1].\n Otherwise it is interpreted as absolute sizes of the training sets.\n Note that for classification the number of samples usually have to\n be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n\n fbands : [(float, float)]\n List of frequency bands to be checked with SPoC.\n\n n_jobs : int or None, optional (default=None)\n The number of CPUs to use to do the computation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n ' if (alphas is None): alphas = np.logspace((- 3), 5, 100) if (train_sizes is None): train_sizes = np.linspace(0.1, 1.0, 5) rnd_state = 42 names = [] combined_data = [] meg_data = None for (name, data) in named_data: names.append(name) if (name == 'meg'): meg_data = data meg_subjects = tuple((d['subject'] for d in data if ('subject' in d))) pseudo_data = np.arange(len(meg_subjects)) combined_data.append(pd.DataFrame(pseudo_data, index=meg_subjects)) else: combined_data.append(data) data = pd.concat(combined_data, axis=1, join='inner') feature_col_lens = tuple((d.shape[1] for d in combined_data)) estimators = [] subjects = data.index.values for (i_data, _) in enumerate(named_data): feature_transformers = [] ft_begin = 0 ft_end = 0 for (i_ct, (name, col_len)) in enumerate(zip(names, feature_col_lens)): trans_name = (('pass_' if (i_data == i_ct) else 'drop_') + name) transformer = ('passthrough' if (i_data == i_ct) else 'drop') ft_end = (ft_end + col_len) trans_slice = slice(ft_begin, ft_end) ft_begin = (ft_begin + col_len) feature_transformers.append((trans_name, transformer, trans_slice)) est_name = ('reg_' + named_data[i_data][0]) if (est_name == 'reg_meg'): if (fbands is None): raise ValueError('fbands should be given for MEG classifier.') data_path = sample.data_path() raw_fname = path.join(data_path, 'MEG/sample', 'sample_audvis_filt-0-40_raw.fif') raw = mne.io.read_raw_fif(raw_fname) info = raw.info picks = mne.pick_types(info, meg='mag') covs = np.array(tuple((d['covs'][(:, picks)][(:, :, picks)] for d in meg_data if ('subject' in d)))) spoc = SPoC(covs=covs, fbands=fbands, spoc=True, n_components=len(picks), alpha=0.01) est_pipeline = make_pipeline(ColumnTransformer(feature_transformers), spoc, StandardScaler(), RidgeCV(alphas)) else: est_pipeline = make_pipeline(ColumnTransformer(feature_transformers), StandardScaler(), RidgeCV(alphas)) estimators.append((est_name, est_pipeline)) final_estimator = RandomForestRegressor(n_estimators=100, random_state=rnd_state, oob_score=True) reg = StackingRegressor(estimators=estimators, final_estimator=final_estimator, cv=cv, random_state=rnd_state, n_jobs=n_jobs) y = subjects_data.loc[subjects].age.values X = data.values cv = check_cv(cv) mae = cross_val_score(reg, X, y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=n_jobs) r2 = cross_val_score(reg, X, y, scoring='r2', cv=cv, n_jobs=n_jobs) y_pred = cross_val_predict(reg, X, y, cv=cv, n_jobs=n_jobs) (train_sizes, train_scores, test_scores) = learning_curve(reg, X, y, cv=cv, train_sizes=train_sizes, scoring='neg_mean_absolute_error', n_jobs=n_jobs) fold = _get_fold_indices(cv, X, y) df_pred = pd.DataFrame(dict(y=y_pred, fold=fold), index=subjects, dtype=float) return (df_pred, mae, r2, train_sizes, train_scores, test_scores)
def run_stacking_spoc(named_data, subjects_data, cv=10, alphas=None, train_sizes=None, fbands=None, n_jobs=None): 'Run stacking.\n\n Parameters\n ----------\n named_data : list(tuple(str, pandas.DataFrame))\n List of tuples (name, data) with name and corresponding features\n to be used for predictions by linear models.\n\n subjects_data : pandas.DataFrame\n Information about subjects from CamCAN dataset.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross validation,\n - integer, to specify the number of folds in a `(Stratified)KFold`,\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the estimator is a classifier and ``y`` is\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\n other cases, :class:`KFold` is used.\n\n alphas : numpy.ndarray\n Values for parameter alpha to be tested. Default is\n np.logspace(start=-3, stop=1, num=50, base=10.0).\n\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the dtype is float, it is regarded as a\n fraction of the maximum size of the training set (that is determined\n by the selected validation method), i.e. it has to be within (0, 1].\n Otherwise it is interpreted as absolute sizes of the training sets.\n Note that for classification the number of samples usually have to\n be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n\n fbands : [(float, float)]\n List of frequency bands to be checked with SPoC.\n\n n_jobs : int or None, optional (default=None)\n The number of CPUs to use to do the computation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n ' if (alphas is None): alphas = np.logspace((- 3), 5, 100) if (train_sizes is None): train_sizes = np.linspace(0.1, 1.0, 5) rnd_state = 42 names = [] combined_data = [] meg_data = None for (name, data) in named_data: names.append(name) if (name == 'meg'): meg_data = data meg_subjects = tuple((d['subject'] for d in data if ('subject' in d))) pseudo_data = np.arange(len(meg_subjects)) combined_data.append(pd.DataFrame(pseudo_data, index=meg_subjects)) else: combined_data.append(data) data = pd.concat(combined_data, axis=1, join='inner') feature_col_lens = tuple((d.shape[1] for d in combined_data)) estimators = [] subjects = data.index.values for (i_data, _) in enumerate(named_data): feature_transformers = [] ft_begin = 0 ft_end = 0 for (i_ct, (name, col_len)) in enumerate(zip(names, feature_col_lens)): trans_name = (('pass_' if (i_data == i_ct) else 'drop_') + name) transformer = ('passthrough' if (i_data == i_ct) else 'drop') ft_end = (ft_end + col_len) trans_slice = slice(ft_begin, ft_end) ft_begin = (ft_begin + col_len) feature_transformers.append((trans_name, transformer, trans_slice)) est_name = ('reg_' + named_data[i_data][0]) if (est_name == 'reg_meg'): if (fbands is None): raise ValueError('fbands should be given for MEG classifier.') data_path = sample.data_path() raw_fname = path.join(data_path, 'MEG/sample', 'sample_audvis_filt-0-40_raw.fif') raw = mne.io.read_raw_fif(raw_fname) info = raw.info picks = mne.pick_types(info, meg='mag') covs = np.array(tuple((d['covs'][(:, picks)][(:, :, picks)] for d in meg_data if ('subject' in d)))) spoc = SPoC(covs=covs, fbands=fbands, spoc=True, n_components=len(picks), alpha=0.01) est_pipeline = make_pipeline(ColumnTransformer(feature_transformers), spoc, StandardScaler(), RidgeCV(alphas)) else: est_pipeline = make_pipeline(ColumnTransformer(feature_transformers), StandardScaler(), RidgeCV(alphas)) estimators.append((est_name, est_pipeline)) final_estimator = RandomForestRegressor(n_estimators=100, random_state=rnd_state, oob_score=True) reg = StackingRegressor(estimators=estimators, final_estimator=final_estimator, cv=cv, random_state=rnd_state, n_jobs=n_jobs) y = subjects_data.loc[subjects].age.values X = data.values cv = check_cv(cv) mae = cross_val_score(reg, X, y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=n_jobs) r2 = cross_val_score(reg, X, y, scoring='r2', cv=cv, n_jobs=n_jobs) y_pred = cross_val_predict(reg, X, y, cv=cv, n_jobs=n_jobs) (train_sizes, train_scores, test_scores) = learning_curve(reg, X, y, cv=cv, train_sizes=train_sizes, scoring='neg_mean_absolute_error', n_jobs=n_jobs) fold = _get_fold_indices(cv, X, y) df_pred = pd.DataFrame(dict(y=y_pred, fold=fold), index=subjects, dtype=float) return (df_pred, mae, r2, train_sizes, train_scores, test_scores)<|docstring|>Run stacking. Parameters ---------- named_data : list(tuple(str, pandas.DataFrame)) List of tuples (name, data) with name and corresponding features to be used for predictions by linear models. subjects_data : pandas.DataFrame Information about subjects from CamCAN dataset. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. alphas : numpy.ndarray Values for parameter alpha to be tested. Default is np.logspace(start=-3, stop=1, num=50, base=10.0). train_sizes : array-like, shape (n_ticks,), dtype float or int Relative or absolute numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of the maximum size of the training set (that is determined by the selected validation method), i.e. it has to be within (0, 1]. Otherwise it is interpreted as absolute sizes of the training sets. Note that for classification the number of samples usually have to be big enough to contain at least one sample from each class. (default: np.linspace(0.1, 1.0, 5)) fbands : [(float, float)] List of frequency bands to be checked with SPoC. n_jobs : int or None, optional (default=None) The number of CPUs to use to do the computation. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details.<|endoftext|>
583b8919539f3c315cd6a64036cffe6ab6eb57d7e5189a5e4afaf9d070a67dc3
def run_stacking(named_data, subjects_data, cv=10, alphas=None, train_sizes=None, n_jobs=None): 'Run stacking.\n\n Parameters\n ----------\n named_data : list(tuple(str, pandas.DataFrame))\n List of tuples (name, data) with name and corresponding features\n to be used for predictions by linear models.\n\n subjects_data : pandas.DataFrame\n Information about subjects from CamCAN dataset.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross validation,\n - integer, to specify the number of folds in a `(Stratified)KFold`,\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the estimator is a classifier and ``y`` is\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\n other cases, :class:`KFold` is used.\n\n alphas : numpy.ndarray\n Values for parameter alpha to be tested. Default is\n np.logspace(start=-3, stop=1, num=50, base=10.0).\n\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the dtype is float, it is regarded as a\n fraction of the maximum size of the training set (that is determined\n by the selected validation method), i.e. it has to be within (0, 1].\n Otherwise it is interpreted as absolute sizes of the training sets.\n Note that for classification the number of samples usually have to\n be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n\n n_jobs : int or None, optional (default=None)\n The number of CPUs to use to do the computation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n ' if (alphas is None): alphas = np.logspace((- 3), 5, 100) if (train_sizes is None): train_sizes = np.linspace(0.1, 1.0, 5) rnd_state = 42 names = [] combined_data = [] for (name, data) in named_data: names.append(name) combined_data.append(data) data = pd.concat(combined_data, axis=1, join='inner') feature_col_lens = tuple((d.shape[1] for d in combined_data)) estimators = [] subjects = data.index.values for (i_data, _) in enumerate(named_data): feature_transformers = [] ft_begin = 0 ft_end = 0 for (i_ct, (name, col_len)) in enumerate(zip(names, feature_col_lens)): trans_name = (('pass_' if (i_data == i_ct) else 'drop_') + name) transformer = ('passthrough' if (i_data == i_ct) else 'drop') ft_end = (ft_end + col_len) trans_slice = slice(ft_begin, ft_end) ft_begin = (ft_begin + col_len) feature_transformers.append((trans_name, transformer, trans_slice)) est_name = ('reg_' + named_data[i_data][0]) est_pipeline = make_pipeline(ColumnTransformer(feature_transformers), StandardScaler(), RidgeCV(alphas)) estimators.append((est_name, est_pipeline)) final_estimator = RandomForestRegressor(n_estimators=100, random_state=rnd_state, oob_score=True) reg = StackingRegressor(estimators=estimators, final_estimator=final_estimator, cv=cv, random_state=rnd_state, n_jobs=n_jobs) y = subjects_data.loc[subjects].age.values X = data.values cv = check_cv(cv) mae = cross_val_score(reg, X, y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=n_jobs) r2 = cross_val_score(reg, X, y, scoring='r2', cv=cv, n_jobs=n_jobs) y_pred = cross_val_predict(reg, X, y, cv=cv, n_jobs=n_jobs) (train_sizes, train_scores, test_scores) = learning_curve(reg, X, y, cv=cv, train_sizes=train_sizes, scoring='neg_mean_absolute_error', n_jobs=n_jobs) fold = _get_fold_indices(cv, X, y) df_pred = pd.DataFrame(dict(y=y_pred, fold=fold), index=subjects, dtype=float) return (df_pred, mae, r2, train_sizes, train_scores, test_scores)
Run stacking. Parameters ---------- named_data : list(tuple(str, pandas.DataFrame)) List of tuples (name, data) with name and corresponding features to be used for predictions by linear models. subjects_data : pandas.DataFrame Information about subjects from CamCAN dataset. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. alphas : numpy.ndarray Values for parameter alpha to be tested. Default is np.logspace(start=-3, stop=1, num=50, base=10.0). train_sizes : array-like, shape (n_ticks,), dtype float or int Relative or absolute numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of the maximum size of the training set (that is determined by the selected validation method), i.e. it has to be within (0, 1]. Otherwise it is interpreted as absolute sizes of the training sets. Note that for classification the number of samples usually have to be big enough to contain at least one sample from each class. (default: np.linspace(0.1, 1.0, 5)) n_jobs : int or None, optional (default=None) The number of CPUs to use to do the computation. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details.
camcan/utils/evaluation.py
run_stacking
dengemann/engemann-2020-multimodal-brain-age
6
python
def run_stacking(named_data, subjects_data, cv=10, alphas=None, train_sizes=None, n_jobs=None): 'Run stacking.\n\n Parameters\n ----------\n named_data : list(tuple(str, pandas.DataFrame))\n List of tuples (name, data) with name and corresponding features\n to be used for predictions by linear models.\n\n subjects_data : pandas.DataFrame\n Information about subjects from CamCAN dataset.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross validation,\n - integer, to specify the number of folds in a `(Stratified)KFold`,\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the estimator is a classifier and ``y`` is\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\n other cases, :class:`KFold` is used.\n\n alphas : numpy.ndarray\n Values for parameter alpha to be tested. Default is\n np.logspace(start=-3, stop=1, num=50, base=10.0).\n\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the dtype is float, it is regarded as a\n fraction of the maximum size of the training set (that is determined\n by the selected validation method), i.e. it has to be within (0, 1].\n Otherwise it is interpreted as absolute sizes of the training sets.\n Note that for classification the number of samples usually have to\n be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n\n n_jobs : int or None, optional (default=None)\n The number of CPUs to use to do the computation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n ' if (alphas is None): alphas = np.logspace((- 3), 5, 100) if (train_sizes is None): train_sizes = np.linspace(0.1, 1.0, 5) rnd_state = 42 names = [] combined_data = [] for (name, data) in named_data: names.append(name) combined_data.append(data) data = pd.concat(combined_data, axis=1, join='inner') feature_col_lens = tuple((d.shape[1] for d in combined_data)) estimators = [] subjects = data.index.values for (i_data, _) in enumerate(named_data): feature_transformers = [] ft_begin = 0 ft_end = 0 for (i_ct, (name, col_len)) in enumerate(zip(names, feature_col_lens)): trans_name = (('pass_' if (i_data == i_ct) else 'drop_') + name) transformer = ('passthrough' if (i_data == i_ct) else 'drop') ft_end = (ft_end + col_len) trans_slice = slice(ft_begin, ft_end) ft_begin = (ft_begin + col_len) feature_transformers.append((trans_name, transformer, trans_slice)) est_name = ('reg_' + named_data[i_data][0]) est_pipeline = make_pipeline(ColumnTransformer(feature_transformers), StandardScaler(), RidgeCV(alphas)) estimators.append((est_name, est_pipeline)) final_estimator = RandomForestRegressor(n_estimators=100, random_state=rnd_state, oob_score=True) reg = StackingRegressor(estimators=estimators, final_estimator=final_estimator, cv=cv, random_state=rnd_state, n_jobs=n_jobs) y = subjects_data.loc[subjects].age.values X = data.values cv = check_cv(cv) mae = cross_val_score(reg, X, y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=n_jobs) r2 = cross_val_score(reg, X, y, scoring='r2', cv=cv, n_jobs=n_jobs) y_pred = cross_val_predict(reg, X, y, cv=cv, n_jobs=n_jobs) (train_sizes, train_scores, test_scores) = learning_curve(reg, X, y, cv=cv, train_sizes=train_sizes, scoring='neg_mean_absolute_error', n_jobs=n_jobs) fold = _get_fold_indices(cv, X, y) df_pred = pd.DataFrame(dict(y=y_pred, fold=fold), index=subjects, dtype=float) return (df_pred, mae, r2, train_sizes, train_scores, test_scores)
def run_stacking(named_data, subjects_data, cv=10, alphas=None, train_sizes=None, n_jobs=None): 'Run stacking.\n\n Parameters\n ----------\n named_data : list(tuple(str, pandas.DataFrame))\n List of tuples (name, data) with name and corresponding features\n to be used for predictions by linear models.\n\n subjects_data : pandas.DataFrame\n Information about subjects from CamCAN dataset.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross validation,\n - integer, to specify the number of folds in a `(Stratified)KFold`,\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the estimator is a classifier and ``y`` is\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\n other cases, :class:`KFold` is used.\n\n alphas : numpy.ndarray\n Values for parameter alpha to be tested. Default is\n np.logspace(start=-3, stop=1, num=50, base=10.0).\n\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the dtype is float, it is regarded as a\n fraction of the maximum size of the training set (that is determined\n by the selected validation method), i.e. it has to be within (0, 1].\n Otherwise it is interpreted as absolute sizes of the training sets.\n Note that for classification the number of samples usually have to\n be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n\n n_jobs : int or None, optional (default=None)\n The number of CPUs to use to do the computation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n ' if (alphas is None): alphas = np.logspace((- 3), 5, 100) if (train_sizes is None): train_sizes = np.linspace(0.1, 1.0, 5) rnd_state = 42 names = [] combined_data = [] for (name, data) in named_data: names.append(name) combined_data.append(data) data = pd.concat(combined_data, axis=1, join='inner') feature_col_lens = tuple((d.shape[1] for d in combined_data)) estimators = [] subjects = data.index.values for (i_data, _) in enumerate(named_data): feature_transformers = [] ft_begin = 0 ft_end = 0 for (i_ct, (name, col_len)) in enumerate(zip(names, feature_col_lens)): trans_name = (('pass_' if (i_data == i_ct) else 'drop_') + name) transformer = ('passthrough' if (i_data == i_ct) else 'drop') ft_end = (ft_end + col_len) trans_slice = slice(ft_begin, ft_end) ft_begin = (ft_begin + col_len) feature_transformers.append((trans_name, transformer, trans_slice)) est_name = ('reg_' + named_data[i_data][0]) est_pipeline = make_pipeline(ColumnTransformer(feature_transformers), StandardScaler(), RidgeCV(alphas)) estimators.append((est_name, est_pipeline)) final_estimator = RandomForestRegressor(n_estimators=100, random_state=rnd_state, oob_score=True) reg = StackingRegressor(estimators=estimators, final_estimator=final_estimator, cv=cv, random_state=rnd_state, n_jobs=n_jobs) y = subjects_data.loc[subjects].age.values X = data.values cv = check_cv(cv) mae = cross_val_score(reg, X, y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=n_jobs) r2 = cross_val_score(reg, X, y, scoring='r2', cv=cv, n_jobs=n_jobs) y_pred = cross_val_predict(reg, X, y, cv=cv, n_jobs=n_jobs) (train_sizes, train_scores, test_scores) = learning_curve(reg, X, y, cv=cv, train_sizes=train_sizes, scoring='neg_mean_absolute_error', n_jobs=n_jobs) fold = _get_fold_indices(cv, X, y) df_pred = pd.DataFrame(dict(y=y_pred, fold=fold), index=subjects, dtype=float) return (df_pred, mae, r2, train_sizes, train_scores, test_scores)<|docstring|>Run stacking. Parameters ---------- named_data : list(tuple(str, pandas.DataFrame)) List of tuples (name, data) with name and corresponding features to be used for predictions by linear models. subjects_data : pandas.DataFrame Information about subjects from CamCAN dataset. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. alphas : numpy.ndarray Values for parameter alpha to be tested. Default is np.logspace(start=-3, stop=1, num=50, base=10.0). train_sizes : array-like, shape (n_ticks,), dtype float or int Relative or absolute numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of the maximum size of the training set (that is determined by the selected validation method), i.e. it has to be within (0, 1]. Otherwise it is interpreted as absolute sizes of the training sets. Note that for classification the number of samples usually have to be big enough to contain at least one sample from each class. (default: np.linspace(0.1, 1.0, 5)) n_jobs : int or None, optional (default=None) The number of CPUs to use to do the computation. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details.<|endoftext|>
af4f16d0d3d1e4fc2f3aa216def6841937b95be0510b78586ef8464636765e9d
def train_stacked_regressor(named_data, subjects_data, cv=10, alphas=None, train_sizes=None, n_jobs=None): 'Return stacked classifier trained on provided data.\n\n For MEG data features estimated in the source space should be provided.\n\n Parameters\n ----------\n named_data : list(tuple(str, pandas.DataFrame))\n List of tuples (name, data) with name and corresponding features\n to be used for predictions by linear models.\n\n subjects_data : pandas.DataFrame\n Information about subjects from CamCAN dataset.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross validation,\n - integer, to specify the number of folds in a `(Stratified)KFold`,\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the estimator is a classifier and ``y`` is\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\n other cases, :class:`KFold` is used.\n\n alphas : numpy.ndarray\n Values for parameter alpha to be tested. Default is\n np.logspace(start=-3, stop=1, num=50, base=10.0).\n\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the dtype is float, it is regarded as a\n fraction of the maximum size of the training set (that is determined\n by the selected validation method), i.e. it has to be within (0, 1].\n Otherwise it is interpreted as absolute sizes of the training sets.\n Note that for classification the number of samples usually have to\n be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n\n n_jobs : int or None, optional (default=None)\n The number of CPUs to use to do the computation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n ' if (alphas is None): alphas = np.logspace((- 3), 5, 100) if (train_sizes is None): train_sizes = np.linspace(0.1, 1.0, 5) rnd_state = 42 names = [] combined_data = [] for (name, data) in named_data: names.append(name) combined_data.append(data) data = pd.concat(combined_data, axis=1, join='inner') feature_col_lens = tuple((d.shape[1] for d in combined_data)) estimators = [] subjects = data.index.values for (i_data, _) in enumerate(named_data): feature_transformers = [] ft_begin = 0 ft_end = 0 for (i_ct, (name, col_len)) in enumerate(zip(names, feature_col_lens)): trans_name = (('pass_' if (i_data == i_ct) else 'drop_') + name) transformer = ('passthrough' if (i_data == i_ct) else 'drop') ft_end = (ft_end + col_len) trans_slice = slice(ft_begin, ft_end) ft_begin = (ft_begin + col_len) feature_transformers.append((trans_name, transformer, trans_slice)) est_name = ('reg_' + named_data[i_data][0]) est_pipeline = make_pipeline(ColumnTransformer(feature_transformers), StandardScaler(), RidgeCV(alphas)) estimators.append((est_name, est_pipeline)) final_estimator = RandomForestRegressor(n_estimators=100, random_state=rnd_state, oob_score=True) reg = StackingRegressor(estimators=estimators, final_estimator=final_estimator, cv=cv, random_state=rnd_state, n_jobs=n_jobs) y = subjects_data.loc[subjects].age.values X = data.values reg.fit(X, y) return (reg, X, y)
Return stacked classifier trained on provided data. For MEG data features estimated in the source space should be provided. Parameters ---------- named_data : list(tuple(str, pandas.DataFrame)) List of tuples (name, data) with name and corresponding features to be used for predictions by linear models. subjects_data : pandas.DataFrame Information about subjects from CamCAN dataset. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. alphas : numpy.ndarray Values for parameter alpha to be tested. Default is np.logspace(start=-3, stop=1, num=50, base=10.0). train_sizes : array-like, shape (n_ticks,), dtype float or int Relative or absolute numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of the maximum size of the training set (that is determined by the selected validation method), i.e. it has to be within (0, 1]. Otherwise it is interpreted as absolute sizes of the training sets. Note that for classification the number of samples usually have to be big enough to contain at least one sample from each class. (default: np.linspace(0.1, 1.0, 5)) n_jobs : int or None, optional (default=None) The number of CPUs to use to do the computation. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details.
camcan/utils/evaluation.py
train_stacked_regressor
dengemann/engemann-2020-multimodal-brain-age
6
python
def train_stacked_regressor(named_data, subjects_data, cv=10, alphas=None, train_sizes=None, n_jobs=None): 'Return stacked classifier trained on provided data.\n\n For MEG data features estimated in the source space should be provided.\n\n Parameters\n ----------\n named_data : list(tuple(str, pandas.DataFrame))\n List of tuples (name, data) with name and corresponding features\n to be used for predictions by linear models.\n\n subjects_data : pandas.DataFrame\n Information about subjects from CamCAN dataset.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross validation,\n - integer, to specify the number of folds in a `(Stratified)KFold`,\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the estimator is a classifier and ``y`` is\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\n other cases, :class:`KFold` is used.\n\n alphas : numpy.ndarray\n Values for parameter alpha to be tested. Default is\n np.logspace(start=-3, stop=1, num=50, base=10.0).\n\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the dtype is float, it is regarded as a\n fraction of the maximum size of the training set (that is determined\n by the selected validation method), i.e. it has to be within (0, 1].\n Otherwise it is interpreted as absolute sizes of the training sets.\n Note that for classification the number of samples usually have to\n be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n\n n_jobs : int or None, optional (default=None)\n The number of CPUs to use to do the computation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n ' if (alphas is None): alphas = np.logspace((- 3), 5, 100) if (train_sizes is None): train_sizes = np.linspace(0.1, 1.0, 5) rnd_state = 42 names = [] combined_data = [] for (name, data) in named_data: names.append(name) combined_data.append(data) data = pd.concat(combined_data, axis=1, join='inner') feature_col_lens = tuple((d.shape[1] for d in combined_data)) estimators = [] subjects = data.index.values for (i_data, _) in enumerate(named_data): feature_transformers = [] ft_begin = 0 ft_end = 0 for (i_ct, (name, col_len)) in enumerate(zip(names, feature_col_lens)): trans_name = (('pass_' if (i_data == i_ct) else 'drop_') + name) transformer = ('passthrough' if (i_data == i_ct) else 'drop') ft_end = (ft_end + col_len) trans_slice = slice(ft_begin, ft_end) ft_begin = (ft_begin + col_len) feature_transformers.append((trans_name, transformer, trans_slice)) est_name = ('reg_' + named_data[i_data][0]) est_pipeline = make_pipeline(ColumnTransformer(feature_transformers), StandardScaler(), RidgeCV(alphas)) estimators.append((est_name, est_pipeline)) final_estimator = RandomForestRegressor(n_estimators=100, random_state=rnd_state, oob_score=True) reg = StackingRegressor(estimators=estimators, final_estimator=final_estimator, cv=cv, random_state=rnd_state, n_jobs=n_jobs) y = subjects_data.loc[subjects].age.values X = data.values reg.fit(X, y) return (reg, X, y)
def train_stacked_regressor(named_data, subjects_data, cv=10, alphas=None, train_sizes=None, n_jobs=None): 'Return stacked classifier trained on provided data.\n\n For MEG data features estimated in the source space should be provided.\n\n Parameters\n ----------\n named_data : list(tuple(str, pandas.DataFrame))\n List of tuples (name, data) with name and corresponding features\n to be used for predictions by linear models.\n\n subjects_data : pandas.DataFrame\n Information about subjects from CamCAN dataset.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross validation,\n - integer, to specify the number of folds in a `(Stratified)KFold`,\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the estimator is a classifier and ``y`` is\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\n other cases, :class:`KFold` is used.\n\n alphas : numpy.ndarray\n Values for parameter alpha to be tested. Default is\n np.logspace(start=-3, stop=1, num=50, base=10.0).\n\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the dtype is float, it is regarded as a\n fraction of the maximum size of the training set (that is determined\n by the selected validation method), i.e. it has to be within (0, 1].\n Otherwise it is interpreted as absolute sizes of the training sets.\n Note that for classification the number of samples usually have to\n be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n\n n_jobs : int or None, optional (default=None)\n The number of CPUs to use to do the computation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n ' if (alphas is None): alphas = np.logspace((- 3), 5, 100) if (train_sizes is None): train_sizes = np.linspace(0.1, 1.0, 5) rnd_state = 42 names = [] combined_data = [] for (name, data) in named_data: names.append(name) combined_data.append(data) data = pd.concat(combined_data, axis=1, join='inner') feature_col_lens = tuple((d.shape[1] for d in combined_data)) estimators = [] subjects = data.index.values for (i_data, _) in enumerate(named_data): feature_transformers = [] ft_begin = 0 ft_end = 0 for (i_ct, (name, col_len)) in enumerate(zip(names, feature_col_lens)): trans_name = (('pass_' if (i_data == i_ct) else 'drop_') + name) transformer = ('passthrough' if (i_data == i_ct) else 'drop') ft_end = (ft_end + col_len) trans_slice = slice(ft_begin, ft_end) ft_begin = (ft_begin + col_len) feature_transformers.append((trans_name, transformer, trans_slice)) est_name = ('reg_' + named_data[i_data][0]) est_pipeline = make_pipeline(ColumnTransformer(feature_transformers), StandardScaler(), RidgeCV(alphas)) estimators.append((est_name, est_pipeline)) final_estimator = RandomForestRegressor(n_estimators=100, random_state=rnd_state, oob_score=True) reg = StackingRegressor(estimators=estimators, final_estimator=final_estimator, cv=cv, random_state=rnd_state, n_jobs=n_jobs) y = subjects_data.loc[subjects].age.values X = data.values reg.fit(X, y) return (reg, X, y)<|docstring|>Return stacked classifier trained on provided data. For MEG data features estimated in the source space should be provided. Parameters ---------- named_data : list(tuple(str, pandas.DataFrame)) List of tuples (name, data) with name and corresponding features to be used for predictions by linear models. subjects_data : pandas.DataFrame Information about subjects from CamCAN dataset. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. alphas : numpy.ndarray Values for parameter alpha to be tested. Default is np.logspace(start=-3, stop=1, num=50, base=10.0). train_sizes : array-like, shape (n_ticks,), dtype float or int Relative or absolute numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of the maximum size of the training set (that is determined by the selected validation method), i.e. it has to be within (0, 1]. Otherwise it is interpreted as absolute sizes of the training sets. Note that for classification the number of samples usually have to be big enough to contain at least one sample from each class. (default: np.linspace(0.1, 1.0, 5)) n_jobs : int or None, optional (default=None) The number of CPUs to use to do the computation. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details.<|endoftext|>
0a5ccce848965e5ed31e7009290441260c7398dc146bb250c81a740d49ea053a
def find_by_text(browser, tag, text): '\n Encontrar o elemeto com o texto\n Argumentos:\n - browser = Instancia do browser [firefox, chrome, ...]\n - text = conteúdo que deve estar na tag\n - tag = tag onde o texto será procurado\n\n ' elementos = browser.find_elements_by_tag_name(tag) for elemento in elementos: if (elemento.text == text): return elemento
Encontrar o elemeto com o texto Argumentos: - browser = Instancia do browser [firefox, chrome, ...] - text = conteúdo que deve estar na tag - tag = tag onde o texto será procurado
Aula04/04_selenium.py
find_by_text
vfamim/Selenium_Curso
0
python
def find_by_text(browser, tag, text): '\n Encontrar o elemeto com o texto\n Argumentos:\n - browser = Instancia do browser [firefox, chrome, ...]\n - text = conteúdo que deve estar na tag\n - tag = tag onde o texto será procurado\n\n ' elementos = browser.find_elements_by_tag_name(tag) for elemento in elementos: if (elemento.text == text): return elemento
def find_by_text(browser, tag, text): '\n Encontrar o elemeto com o texto\n Argumentos:\n - browser = Instancia do browser [firefox, chrome, ...]\n - text = conteúdo que deve estar na tag\n - tag = tag onde o texto será procurado\n\n ' elementos = browser.find_elements_by_tag_name(tag) for elemento in elementos: if (elemento.text == text): return elemento<|docstring|>Encontrar o elemeto com o texto Argumentos: - browser = Instancia do browser [firefox, chrome, ...] - text = conteúdo que deve estar na tag - tag = tag onde o texto será procurado<|endoftext|>
6e59bd311e09658c539846f78e60332aa65308db0d135a7dffd99bdd04dd09f8
def find_by_href(browser, link): "Encontrar o elemento 'a' com o link 'link'\n Argumentos:\n - browser = Instância do browser [firefox, chrome, ...]\n - link = link que será procurado em todos as tags 'a'\n " elementos = browser.find_elements_by_tag_name('a') for elemento in elementos: if (link in elemento.get_attribute('href')): return elemento
Encontrar o elemento 'a' com o link 'link' Argumentos: - browser = Instância do browser [firefox, chrome, ...] - link = link que será procurado em todos as tags 'a'
Aula04/04_selenium.py
find_by_href
vfamim/Selenium_Curso
0
python
def find_by_href(browser, link): "Encontrar o elemento 'a' com o link 'link'\n Argumentos:\n - browser = Instância do browser [firefox, chrome, ...]\n - link = link que será procurado em todos as tags 'a'\n " elementos = browser.find_elements_by_tag_name('a') for elemento in elementos: if (link in elemento.get_attribute('href')): return elemento
def find_by_href(browser, link): "Encontrar o elemento 'a' com o link 'link'\n Argumentos:\n - browser = Instância do browser [firefox, chrome, ...]\n - link = link que será procurado em todos as tags 'a'\n " elementos = browser.find_elements_by_tag_name('a') for elemento in elementos: if (link in elemento.get_attribute('href')): return elemento<|docstring|>Encontrar o elemento 'a' com o link 'link' Argumentos: - browser = Instância do browser [firefox, chrome, ...] - link = link que será procurado em todos as tags 'a'<|endoftext|>
3371e934fb92a3619c0bffc231bffb76a642fda12dbcebf85ba5e38c1f44e6f4
def _comment(self, msg: str) -> str: ' Returns an XML comment (which contains <msg>) as a string.\n ' return '<!-- {} -->'.format(msg)
Returns an XML comment (which contains <msg>) as a string.
test/units/test_file_writers.py
_comment
andrewjunyoung/symboard
5
python
def _comment(self, msg: str) -> str: ' \n ' return '<!-- {} -->'.format(msg)
def _comment(self, msg: str) -> str: ' \n ' return '<!-- {} -->'.format(msg)<|docstring|>Returns an XML comment (which contains <msg>) as a string.<|endoftext|>
3b0a54a33595d398c97498034aa508aa0930532e105bcf64d83a42c09b601346
def _test_comment_with_strftime(self, function, comment): ' This class tests that <function> returns a comment with the string\n <comment>. It mocks out the time.strftime method, which allows it to\n test comments which include times in them.\n\n For symboard, it can be used to test both the «_created» and «_updated»\n functions.\n ' time = Mock() time.strftime = MagicMock(return_value=self.expected_time) self.assertEqual(self._comment('Created by Symboard version {} at {}'.format(VERSION, self.expected_time)), self.file_writer._created(time))
This class tests that <function> returns a comment with the string <comment>. It mocks out the time.strftime method, which allows it to test comments which include times in them. For symboard, it can be used to test both the «_created» and «_updated» functions.
test/units/test_file_writers.py
_test_comment_with_strftime
andrewjunyoung/symboard
5
python
def _test_comment_with_strftime(self, function, comment): ' This class tests that <function> returns a comment with the string\n <comment>. It mocks out the time.strftime method, which allows it to\n test comments which include times in them.\n\n For symboard, it can be used to test both the «_created» and «_updated»\n functions.\n ' time = Mock() time.strftime = MagicMock(return_value=self.expected_time) self.assertEqual(self._comment('Created by Symboard version {} at {}'.format(VERSION, self.expected_time)), self.file_writer._created(time))
def _test_comment_with_strftime(self, function, comment): ' This class tests that <function> returns a comment with the string\n <comment>. It mocks out the time.strftime method, which allows it to\n test comments which include times in them.\n\n For symboard, it can be used to test both the «_created» and «_updated»\n functions.\n ' time = Mock() time.strftime = MagicMock(return_value=self.expected_time) self.assertEqual(self._comment('Created by Symboard version {} at {}'.format(VERSION, self.expected_time)), self.file_writer._created(time))<|docstring|>This class tests that <function> returns a comment with the string <comment>. It mocks out the time.strftime method, which allows it to test comments which include times in them. For symboard, it can be used to test both the «_created» and «_updated» functions.<|endoftext|>
09b7a03c010d0a4e663ec1815daf31088cbaa33f235bfbe3c1451e118740f76a
def _assert_properties_of_XML_tag_returned_from(self, function, expected_tag, expected_attributes, keylayout=None, args=None): " Assert that a function, when called on <keylayout> with an Element\n node and other arguments (provided by <args>) creates a sub-elem with\n the expected tag and attributes.\n\n <function>, given an Element node, should add a sub-elem to that\n node, and return the sub-elem.\n\n child_node = function(keylayout, Element('root'), *args)\n\n Args:\n function (Callable): The function under test.\n keylayout (Keylayout): The Keylayout, or mock object, being used to\n test <function>.\n expected_tag (str): The name that the sub-elem created by\n <function> is expected to have.\n expected_attributes (Dict[str, str]): A map of the expected\n attributes which the sub-elem created by <function> is expected\n to have.\n\n Raises:\n AssertionError: If the child is incorrectly created, or if either\n the expected tag or attributes are different to their actual values\n created by <function>.\n " (root, child) = self._execute_function_on_elem(function, keylayout, args) self.assertEqual([child], list(root)) self.assertEqual(expected_tag, child.tag) self.assertEqual(expected_attributes, child.attrib)
Assert that a function, when called on <keylayout> with an Element node and other arguments (provided by <args>) creates a sub-elem with the expected tag and attributes. <function>, given an Element node, should add a sub-elem to that node, and return the sub-elem. child_node = function(keylayout, Element('root'), *args) Args: function (Callable): The function under test. keylayout (Keylayout): The Keylayout, or mock object, being used to test <function>. expected_tag (str): The name that the sub-elem created by <function> is expected to have. expected_attributes (Dict[str, str]): A map of the expected attributes which the sub-elem created by <function> is expected to have. Raises: AssertionError: If the child is incorrectly created, or if either the expected tag or attributes are different to their actual values created by <function>.
test/units/test_file_writers.py
_assert_properties_of_XML_tag_returned_from
andrewjunyoung/symboard
5
python
def _assert_properties_of_XML_tag_returned_from(self, function, expected_tag, expected_attributes, keylayout=None, args=None): " Assert that a function, when called on <keylayout> with an Element\n node and other arguments (provided by <args>) creates a sub-elem with\n the expected tag and attributes.\n\n <function>, given an Element node, should add a sub-elem to that\n node, and return the sub-elem.\n\n child_node = function(keylayout, Element('root'), *args)\n\n Args:\n function (Callable): The function under test.\n keylayout (Keylayout): The Keylayout, or mock object, being used to\n test <function>.\n expected_tag (str): The name that the sub-elem created by\n <function> is expected to have.\n expected_attributes (Dict[str, str]): A map of the expected\n attributes which the sub-elem created by <function> is expected\n to have.\n\n Raises:\n AssertionError: If the child is incorrectly created, or if either\n the expected tag or attributes are different to their actual values\n created by <function>.\n " (root, child) = self._execute_function_on_elem(function, keylayout, args) self.assertEqual([child], list(root)) self.assertEqual(expected_tag, child.tag) self.assertEqual(expected_attributes, child.attrib)
def _assert_properties_of_XML_tag_returned_from(self, function, expected_tag, expected_attributes, keylayout=None, args=None): " Assert that a function, when called on <keylayout> with an Element\n node and other arguments (provided by <args>) creates a sub-elem with\n the expected tag and attributes.\n\n <function>, given an Element node, should add a sub-elem to that\n node, and return the sub-elem.\n\n child_node = function(keylayout, Element('root'), *args)\n\n Args:\n function (Callable): The function under test.\n keylayout (Keylayout): The Keylayout, or mock object, being used to\n test <function>.\n expected_tag (str): The name that the sub-elem created by\n <function> is expected to have.\n expected_attributes (Dict[str, str]): A map of the expected\n attributes which the sub-elem created by <function> is expected\n to have.\n\n Raises:\n AssertionError: If the child is incorrectly created, or if either\n the expected tag or attributes are different to their actual values\n created by <function>.\n " (root, child) = self._execute_function_on_elem(function, keylayout, args) self.assertEqual([child], list(root)) self.assertEqual(expected_tag, child.tag) self.assertEqual(expected_attributes, child.attrib)<|docstring|>Assert that a function, when called on <keylayout> with an Element node and other arguments (provided by <args>) creates a sub-elem with the expected tag and attributes. <function>, given an Element node, should add a sub-elem to that node, and return the sub-elem. child_node = function(keylayout, Element('root'), *args) Args: function (Callable): The function under test. keylayout (Keylayout): The Keylayout, or mock object, being used to test <function>. expected_tag (str): The name that the sub-elem created by <function> is expected to have. expected_attributes (Dict[str, str]): A map of the expected attributes which the sub-elem created by <function> is expected to have. Raises: AssertionError: If the child is incorrectly created, or if either the expected tag or attributes are different to their actual values created by <function>.<|endoftext|>
f67d3c5fdb71920cbc660cc032b5bd618505e1d0f348d48337702d44fd5897ef
def test_layouts_creates_a_well_formed_sub_elem(self): ' Asserts that the sub_elem created by «_layouts» meets the\n required specification.\n ' mock_keylayout = MagicMock(layouts=[]) self._assert_properties_of_XML_tag_returned_from(self.file_writer._layouts, keylayout=mock_keylayout, expected_tag='layouts', expected_attributes={})
Asserts that the sub_elem created by «_layouts» meets the required specification.
test/units/test_file_writers.py
test_layouts_creates_a_well_formed_sub_elem
andrewjunyoung/symboard
5
python
def test_layouts_creates_a_well_formed_sub_elem(self): ' Asserts that the sub_elem created by «_layouts» meets the\n required specification.\n ' mock_keylayout = MagicMock(layouts=[]) self._assert_properties_of_XML_tag_returned_from(self.file_writer._layouts, keylayout=mock_keylayout, expected_tag='layouts', expected_attributes={})
def test_layouts_creates_a_well_formed_sub_elem(self): ' Asserts that the sub_elem created by «_layouts» meets the\n required specification.\n ' mock_keylayout = MagicMock(layouts=[]) self._assert_properties_of_XML_tag_returned_from(self.file_writer._layouts, keylayout=mock_keylayout, expected_tag='layouts', expected_attributes={})<|docstring|>Asserts that the sub_elem created by «_layouts» meets the required specification.<|endoftext|>
873de6d7436f47c57cc17cabd2b1b38b10b38704ce7638258a82e7e8ebf944bb
def test_modifier_map_creates_a_well_formed_sub_elem(self): ' Asserts that the sub_elem created by «_modifier_map» meets the\n required specification.\n ' EXPECTED_DEFAULT_INDEX = 3 modifiers = 'Modifiers' mock_keylayout = MagicMock(layouts=[{'modifiers': modifiers}], default_index=str(EXPECTED_DEFAULT_INDEX)) self._assert_properties_of_XML_tag_returned_from(self.file_writer._modifier_map, keylayout=mock_keylayout, expected_tag='modifierMap', expected_attributes={'id': modifiers, 'defaultIndex': str(EXPECTED_DEFAULT_INDEX)})
Asserts that the sub_elem created by «_modifier_map» meets the required specification.
test/units/test_file_writers.py
test_modifier_map_creates_a_well_formed_sub_elem
andrewjunyoung/symboard
5
python
def test_modifier_map_creates_a_well_formed_sub_elem(self): ' Asserts that the sub_elem created by «_modifier_map» meets the\n required specification.\n ' EXPECTED_DEFAULT_INDEX = 3 modifiers = 'Modifiers' mock_keylayout = MagicMock(layouts=[{'modifiers': modifiers}], default_index=str(EXPECTED_DEFAULT_INDEX)) self._assert_properties_of_XML_tag_returned_from(self.file_writer._modifier_map, keylayout=mock_keylayout, expected_tag='modifierMap', expected_attributes={'id': modifiers, 'defaultIndex': str(EXPECTED_DEFAULT_INDEX)})
def test_modifier_map_creates_a_well_formed_sub_elem(self): ' Asserts that the sub_elem created by «_modifier_map» meets the\n required specification.\n ' EXPECTED_DEFAULT_INDEX = 3 modifiers = 'Modifiers' mock_keylayout = MagicMock(layouts=[{'modifiers': modifiers}], default_index=str(EXPECTED_DEFAULT_INDEX)) self._assert_properties_of_XML_tag_returned_from(self.file_writer._modifier_map, keylayout=mock_keylayout, expected_tag='modifierMap', expected_attributes={'id': modifiers, 'defaultIndex': str(EXPECTED_DEFAULT_INDEX)})<|docstring|>Asserts that the sub_elem created by «_modifier_map» meets the required specification.<|endoftext|>
ebd235f6e3f566b04405cf1b894f53e144d380e3053303979db713dfed7c8698
def test_key_map_set_creates_a_well_formed_sub_elem(self): ' Asserts that the sub_elem created by «_key_map_set» meets the\n required specification.\n ' mock_keylayout = MagicMock(key_map={}) self._assert_properties_of_XML_tag_returned_from(self.file_writer._key_map_set, keylayout=mock_keylayout, expected_tag='keyMapSet', expected_attributes={'id': 'ANSI'})
Asserts that the sub_elem created by «_key_map_set» meets the required specification.
test/units/test_file_writers.py
test_key_map_set_creates_a_well_formed_sub_elem
andrewjunyoung/symboard
5
python
def test_key_map_set_creates_a_well_formed_sub_elem(self): ' Asserts that the sub_elem created by «_key_map_set» meets the\n required specification.\n ' mock_keylayout = MagicMock(key_map={}) self._assert_properties_of_XML_tag_returned_from(self.file_writer._key_map_set, keylayout=mock_keylayout, expected_tag='keyMapSet', expected_attributes={'id': 'ANSI'})
def test_key_map_set_creates_a_well_formed_sub_elem(self): ' Asserts that the sub_elem created by «_key_map_set» meets the\n required specification.\n ' mock_keylayout = MagicMock(key_map={}) self._assert_properties_of_XML_tag_returned_from(self.file_writer._key_map_set, keylayout=mock_keylayout, expected_tag='keyMapSet', expected_attributes={'id': 'ANSI'})<|docstring|>Asserts that the sub_elem created by «_key_map_set» meets the required specification.<|endoftext|>
a1ef4057c363f0e719f64a019025d55a0674acc509fdd6ed1cb1eaaf08e5ec38
def test_action_creates_well_formed_sub_elem(self): ' Asserts that the sub_elem created by «_action» meets the\n required specification.\n ' self._assert_properties_of_XML_tag_returned_from(self.file_writer._action, keylayout=self.states_mocker, args=[self.action], expected_tag='action', expected_attributes={'id': self.action.id_})
Asserts that the sub_elem created by «_action» meets the required specification.
test/units/test_file_writers.py
test_action_creates_well_formed_sub_elem
andrewjunyoung/symboard
5
python
def test_action_creates_well_formed_sub_elem(self): ' Asserts that the sub_elem created by «_action» meets the\n required specification.\n ' self._assert_properties_of_XML_tag_returned_from(self.file_writer._action, keylayout=self.states_mocker, args=[self.action], expected_tag='action', expected_attributes={'id': self.action.id_})
def test_action_creates_well_formed_sub_elem(self): ' Asserts that the sub_elem created by «_action» meets the\n required specification.\n ' self._assert_properties_of_XML_tag_returned_from(self.file_writer._action, keylayout=self.states_mocker, args=[self.action], expected_tag='action', expected_attributes={'id': self.action.id_})<|docstring|>Asserts that the sub_elem created by «_action» meets the required specification.<|endoftext|>
88031ca833029e8a6f1f277905a9842d02637c99bd963cf28a9805cf31c814ac
def test_terminators_creates_well_formed_sub_elem(self): ' Asserts that the sub_elem created by «_terminators» meets the\n required specification.\n ' mock_keylayout = MagicMock(states=[]) self._assert_properties_of_XML_tag_returned_from(self.file_writer._terminators, keylayout=mock_keylayout, expected_tag='terminators', expected_attributes={})
Asserts that the sub_elem created by «_terminators» meets the required specification.
test/units/test_file_writers.py
test_terminators_creates_well_formed_sub_elem
andrewjunyoung/symboard
5
python
def test_terminators_creates_well_formed_sub_elem(self): ' Asserts that the sub_elem created by «_terminators» meets the\n required specification.\n ' mock_keylayout = MagicMock(states=[]) self._assert_properties_of_XML_tag_returned_from(self.file_writer._terminators, keylayout=mock_keylayout, expected_tag='terminators', expected_attributes={})
def test_terminators_creates_well_formed_sub_elem(self): ' Asserts that the sub_elem created by «_terminators» meets the\n required specification.\n ' mock_keylayout = MagicMock(states=[]) self._assert_properties_of_XML_tag_returned_from(self.file_writer._terminators, keylayout=mock_keylayout, expected_tag='terminators', expected_attributes={})<|docstring|>Asserts that the sub_elem created by «_terminators» meets the required specification.<|endoftext|>
f0650151f4c7ffcc91c6da37cd836b9dff66e2a3c2c543430bba25dace49f525
def rename_permissions(apps, schema_editor): '\n This fixes the typo in delete_associated_propositiondocument and renames delete_propositiondocument to\n delete_all_propositiondocument so it is consistent with corresponding permission of evidencedocument.\n It also removed _investmentproject_ from proposition permissions to make it consistent.\n ' permission_model = apps.get_model('auth', 'Permission') old_new_codename = (('deleted_associated_propositiondocument', 'delete_associated_propositiondocument'), ('delete_propositiondocument', 'delete_all_propositiondocument'), ('view_associated_investmentproject_proposition', 'view_associated_proposition'), ('change_associated_investmentproject_proposition', 'change_associated_proposition'), ('add_associated_investmentproject_proposition', 'add_associated_proposition')) for (old_codename, new_codename) in old_new_codename: permission_model.objects.filter(codename=old_codename).update(codename=new_codename)
This fixes the typo in delete_associated_propositiondocument and renames delete_propositiondocument to delete_all_propositiondocument so it is consistent with corresponding permission of evidencedocument. It also removed _investmentproject_ from proposition permissions to make it consistent.
datahub/investment/project/proposition/migrations/0006_rename_permissions.py
rename_permissions
Staberinde/data-hub-api
6
python
def rename_permissions(apps, schema_editor): '\n This fixes the typo in delete_associated_propositiondocument and renames delete_propositiondocument to\n delete_all_propositiondocument so it is consistent with corresponding permission of evidencedocument.\n It also removed _investmentproject_ from proposition permissions to make it consistent.\n ' permission_model = apps.get_model('auth', 'Permission') old_new_codename = (('deleted_associated_propositiondocument', 'delete_associated_propositiondocument'), ('delete_propositiondocument', 'delete_all_propositiondocument'), ('view_associated_investmentproject_proposition', 'view_associated_proposition'), ('change_associated_investmentproject_proposition', 'change_associated_proposition'), ('add_associated_investmentproject_proposition', 'add_associated_proposition')) for (old_codename, new_codename) in old_new_codename: permission_model.objects.filter(codename=old_codename).update(codename=new_codename)
def rename_permissions(apps, schema_editor): '\n This fixes the typo in delete_associated_propositiondocument and renames delete_propositiondocument to\n delete_all_propositiondocument so it is consistent with corresponding permission of evidencedocument.\n It also removed _investmentproject_ from proposition permissions to make it consistent.\n ' permission_model = apps.get_model('auth', 'Permission') old_new_codename = (('deleted_associated_propositiondocument', 'delete_associated_propositiondocument'), ('delete_propositiondocument', 'delete_all_propositiondocument'), ('view_associated_investmentproject_proposition', 'view_associated_proposition'), ('change_associated_investmentproject_proposition', 'change_associated_proposition'), ('add_associated_investmentproject_proposition', 'add_associated_proposition')) for (old_codename, new_codename) in old_new_codename: permission_model.objects.filter(codename=old_codename).update(codename=new_codename)<|docstring|>This fixes the typo in delete_associated_propositiondocument and renames delete_propositiondocument to delete_all_propositiondocument so it is consistent with corresponding permission of evidencedocument. It also removed _investmentproject_ from proposition permissions to make it consistent.<|endoftext|>
6065f05dc71b6e8a6814cc124a63545ff7fe974f8a632aaea748e37ed1526295
def user_by_login(rstate: Rstate, login: str) -> (User | None): 'Return a user identified by login.' return _get_unique_x_by_y(rstate, users, 'login', login)
Return a user identified by login.
src/ghaudit/schema.py
user_by_login
DistantThunder/ghaudit
1
python
def user_by_login(rstate: Rstate, login: str) -> (User | None): return _get_unique_x_by_y(rstate, users, 'login', login)
def user_by_login(rstate: Rstate, login: str) -> (User | None): return _get_unique_x_by_y(rstate, users, 'login', login)<|docstring|>Return a user identified by login.<|endoftext|>
e7c5e771f7cfdc6f23c5e709fdd437a9a596e13eb6ac77fd3c2d011b5401a1f9
def user_by_id(rstate: Rstate, user_id: UserID) -> User: "Return a user identified by ID.\n\n :raises: RuntimeError if the identifier `id' is not found.\n " user = _user_by_id_noexcept(rstate, user_id) if (not user): raise RuntimeError('User not found from ID: "{}"'.format(user_id)) return user
Return a user identified by ID. :raises: RuntimeError if the identifier `id' is not found.
src/ghaudit/schema.py
user_by_id
DistantThunder/ghaudit
1
python
def user_by_id(rstate: Rstate, user_id: UserID) -> User: "Return a user identified by ID.\n\n :raises: RuntimeError if the identifier `id' is not found.\n " user = _user_by_id_noexcept(rstate, user_id) if (not user): raise RuntimeError('User not found from ID: "{}"'.format(user_id)) return user
def user_by_id(rstate: Rstate, user_id: UserID) -> User: "Return a user identified by ID.\n\n :raises: RuntimeError if the identifier `id' is not found.\n " user = _user_by_id_noexcept(rstate, user_id) if (not user): raise RuntimeError('User not found from ID: "{}"'.format(user_id)) return user<|docstring|>Return a user identified by ID. :raises: RuntimeError if the identifier `id' is not found.<|endoftext|>
93d56d38de8e70a4ac006d0d7e71ead16e2fb7b451ae533c697dcc020a760bad
def users(rstate: Rstate) -> Collection[User]: 'Return the list of all known users from the remote state.' return rstate['data']['users'].values()
Return the list of all known users from the remote state.
src/ghaudit/schema.py
users
DistantThunder/ghaudit
1
python
def users(rstate: Rstate) -> Collection[User]: return rstate['data']['users'].values()
def users(rstate: Rstate) -> Collection[User]: return rstate['data']['users'].values()<|docstring|>Return the list of all known users from the remote state.<|endoftext|>
b2b9884a1c157cc49beddf53978e14bf993dae2e3f1e18d764d828070c00d9ba
def org_repositories(rstate: Rstate) -> List[Repo]: 'Return the list of repositories in the organisation.' return _get_org_repos(rstate)
Return the list of repositories in the organisation.
src/ghaudit/schema.py
org_repositories
DistantThunder/ghaudit
1
python
def org_repositories(rstate: Rstate) -> List[Repo]: return _get_org_repos(rstate)
def org_repositories(rstate: Rstate) -> List[Repo]: return _get_org_repos(rstate)<|docstring|>Return the list of repositories in the organisation.<|endoftext|>
df3f08b6c0926150b27d1ccae683ba7b09d2690ba8802e88984541915a2e6991
def org_teams(rstate: Rstate) -> List[Team]: 'Return the list of teams in the organisation.' return _get_org_teams(rstate)
Return the list of teams in the organisation.
src/ghaudit/schema.py
org_teams
DistantThunder/ghaudit
1
python
def org_teams(rstate: Rstate) -> List[Team]: return _get_org_teams(rstate)
def org_teams(rstate: Rstate) -> List[Team]: return _get_org_teams(rstate)<|docstring|>Return the list of teams in the organisation.<|endoftext|>
11b6e7f4f668313dda492f3adf1ee52f20ef8c2633c7b7bebe8c01261432e6f3
def org_members(rstate: Rstate) -> List[UserWithOrgRole]: 'Return the list of members of the organisation.' return [cast(UserWithOrgRole, user_by_id(rstate, x)) for x in _get_org_members(rstate)]
Return the list of members of the organisation.
src/ghaudit/schema.py
org_members
DistantThunder/ghaudit
1
python
def org_members(rstate: Rstate) -> List[UserWithOrgRole]: return [cast(UserWithOrgRole, user_by_id(rstate, x)) for x in _get_org_members(rstate)]
def org_members(rstate: Rstate) -> List[UserWithOrgRole]: return [cast(UserWithOrgRole, user_by_id(rstate, x)) for x in _get_org_members(rstate)]<|docstring|>Return the list of members of the organisation.<|endoftext|>
cb9dbaecaeaf2f328fad835c5f46322e2340c7e3da8e062ad2fd67758eb8a0b0
def org_team_by_id(rstate: Rstate, team_id: TeamID) -> Team: 'Return a team from the organisation identified by ID.' return _get_unique_x_by_y(rstate, _get_org_teams, 'id', team_id)
Return a team from the organisation identified by ID.
src/ghaudit/schema.py
org_team_by_id
DistantThunder/ghaudit
1
python
def org_team_by_id(rstate: Rstate, team_id: TeamID) -> Team: return _get_unique_x_by_y(rstate, _get_org_teams, 'id', team_id)
def org_team_by_id(rstate: Rstate, team_id: TeamID) -> Team: return _get_unique_x_by_y(rstate, _get_org_teams, 'id', team_id)<|docstring|>Return a team from the organisation identified by ID.<|endoftext|>
def5a8982ec807d7d2288382b1ba8de4b40eae7ff6e0add5c911cace85297a4e
def org_team_by_name(rstate: Rstate, name: str) -> Team: 'Return a team from the organisation identified by name.' return _get_unique_x_by_y(rstate, _get_org_teams, 'name', name)
Return a team from the organisation identified by name.
src/ghaudit/schema.py
org_team_by_name
DistantThunder/ghaudit
1
python
def org_team_by_name(rstate: Rstate, name: str) -> Team: return _get_unique_x_by_y(rstate, _get_org_teams, 'name', name)
def org_team_by_name(rstate: Rstate, name: str) -> Team: return _get_unique_x_by_y(rstate, _get_org_teams, 'name', name)<|docstring|>Return a team from the organisation identified by name.<|endoftext|>
9edea6327f4b054256019591740852f30a0934bd036f334b9f190095cf41a8fb
def org_repo_by_id(rstate: Rstate, repo_id: RepoID) -> Repo: 'Return a repository from the organisation identified by ID.' return _get_unique_x_by_y(rstate, _get_org_repos, 'id', repo_id)
Return a repository from the organisation identified by ID.
src/ghaudit/schema.py
org_repo_by_id
DistantThunder/ghaudit
1
python
def org_repo_by_id(rstate: Rstate, repo_id: RepoID) -> Repo: return _get_unique_x_by_y(rstate, _get_org_repos, 'id', repo_id)
def org_repo_by_id(rstate: Rstate, repo_id: RepoID) -> Repo: return _get_unique_x_by_y(rstate, _get_org_repos, 'id', repo_id)<|docstring|>Return a repository from the organisation identified by ID.<|endoftext|>
15d3ef6897281130cd6c54538156199f106d8eb37151ec383abf2c9323692cb7
def org_repo_by_name(rstate: Rstate, name: str) -> Repo: 'Return a repository from the organisation identified by name.' return _get_unique_x_by_y(rstate, _get_org_repos, 'name', name)
Return a repository from the organisation identified by name.
src/ghaudit/schema.py
org_repo_by_name
DistantThunder/ghaudit
1
python
def org_repo_by_name(rstate: Rstate, name: str) -> Repo: return _get_unique_x_by_y(rstate, _get_org_repos, 'name', name)
def org_repo_by_name(rstate: Rstate, name: str) -> Repo: return _get_unique_x_by_y(rstate, _get_org_repos, 'name', name)<|docstring|>Return a repository from the organisation identified by name.<|endoftext|>
269bad07e5c6bbe248c32f105a433ac32a10b8f9c260828c36a9524f9a9b01e7
def repo_archived(repo: Repo) -> bool: 'Return whether the given repository is archived.' return repo['node']['isArchived']
Return whether the given repository is archived.
src/ghaudit/schema.py
repo_archived
DistantThunder/ghaudit
1
python
def repo_archived(repo: Repo) -> bool: return repo['node']['isArchived']
def repo_archived(repo: Repo) -> bool: return repo['node']['isArchived']<|docstring|>Return whether the given repository is archived.<|endoftext|>
a222a6cdec7e1e3df0c85a4635e2a1041e8c7063b02a2f48498e4a496876d81b
def repo_forked(repo: Repo) -> bool: 'Return whether the given repository is a fork.' return repo['node']['isFork']
Return whether the given repository is a fork.
src/ghaudit/schema.py
repo_forked
DistantThunder/ghaudit
1
python
def repo_forked(repo: Repo) -> bool: return repo['node']['isFork']
def repo_forked(repo: Repo) -> bool: return repo['node']['isFork']<|docstring|>Return whether the given repository is a fork.<|endoftext|>
fba2e1bd669dccfcadb5db3fcbf2c98abea8cc0e7329edbe4351e8a187d27958
def repo_private(repo: Repo) -> bool: 'Return whether the given repository has the visibility to private.' return repo['node']['isPrivate']
Return whether the given repository has the visibility to private.
src/ghaudit/schema.py
repo_private
DistantThunder/ghaudit
1
python
def repo_private(repo: Repo) -> bool: return repo['node']['isPrivate']
def repo_private(repo: Repo) -> bool: return repo['node']['isPrivate']<|docstring|>Return whether the given repository has the visibility to private.<|endoftext|>
267eb460479835e0499ef05fc8bc638c2f07266506c77e8f72c8b284ec0ca611
def repo_name(repo: Repo) -> str: 'Return the name of a given repository.' return repo['node']['name']
Return the name of a given repository.
src/ghaudit/schema.py
repo_name
DistantThunder/ghaudit
1
python
def repo_name(repo: Repo) -> str: return repo['node']['name']
def repo_name(repo: Repo) -> str: return repo['node']['name']<|docstring|>Return the name of a given repository.<|endoftext|>
50636417f04697988ca43b429bdbfb1d4e7638f08f0f25456f6057aa11f20fbd
def repo_description(repo: Repo) -> str: 'Return the description of a given repository.' return repo['node']['description']
Return the description of a given repository.
src/ghaudit/schema.py
repo_description
DistantThunder/ghaudit
1
python
def repo_description(repo: Repo) -> str: return repo['node']['description']
def repo_description(repo: Repo) -> str: return repo['node']['description']<|docstring|>Return the description of a given repository.<|endoftext|>
d430bdcaf4324f6b8875ddae8c4989b8c3be91f49257fba7ef585a085fded52a
def repo_collaborators(rstate: Rstate, repo: Repo) -> List[RepoCollaborator]: 'Return the list of collaborators to the given repository.\n\n The collaborators may be organisation members or external collaborators\n with access to the repository.\n ' def mkobj(rstate: Rstate, edge: RepoCollaboratorEdge) -> RepoCollaborator: return {'role': edge['permission'], 'node': user_by_id(rstate, edge['node']['id'])['node']} if (('collaborators' in repo['node']) and repo['node']['collaborators']): collaborators = repo['node']['collaborators']['edges'] return [mkobj(rstate, x) for x in collaborators if (x is not None)] return []
Return the list of collaborators to the given repository. The collaborators may be organisation members or external collaborators with access to the repository.
src/ghaudit/schema.py
repo_collaborators
DistantThunder/ghaudit
1
python
def repo_collaborators(rstate: Rstate, repo: Repo) -> List[RepoCollaborator]: 'Return the list of collaborators to the given repository.\n\n The collaborators may be organisation members or external collaborators\n with access to the repository.\n ' def mkobj(rstate: Rstate, edge: RepoCollaboratorEdge) -> RepoCollaborator: return {'role': edge['permission'], 'node': user_by_id(rstate, edge['node']['id'])['node']} if (('collaborators' in repo['node']) and repo['node']['collaborators']): collaborators = repo['node']['collaborators']['edges'] return [mkobj(rstate, x) for x in collaborators if (x is not None)] return []
def repo_collaborators(rstate: Rstate, repo: Repo) -> List[RepoCollaborator]: 'Return the list of collaborators to the given repository.\n\n The collaborators may be organisation members or external collaborators\n with access to the repository.\n ' def mkobj(rstate: Rstate, edge: RepoCollaboratorEdge) -> RepoCollaborator: return {'role': edge['permission'], 'node': user_by_id(rstate, edge['node']['id'])['node']} if (('collaborators' in repo['node']) and repo['node']['collaborators']): collaborators = repo['node']['collaborators']['edges'] return [mkobj(rstate, x) for x in collaborators if (x is not None)] return []<|docstring|>Return the list of collaborators to the given repository. The collaborators may be organisation members or external collaborators with access to the repository.<|endoftext|>
9a5e4ab1ed03390fb64d8cfd66fcaf9fd64fe9d5965ed94a9669a8cbc4effcca
def repo_branch_protection_rules(repo: Repo) -> List[BranchProtectionRuleNode]: 'Return the list of branch protection rules from a given repository.' return repo['node']['branchProtectionRules']['nodes']
Return the list of branch protection rules from a given repository.
src/ghaudit/schema.py
repo_branch_protection_rules
DistantThunder/ghaudit
1
python
def repo_branch_protection_rules(repo: Repo) -> List[BranchProtectionRuleNode]: return repo['node']['branchProtectionRules']['nodes']
def repo_branch_protection_rules(repo: Repo) -> List[BranchProtectionRuleNode]: return repo['node']['branchProtectionRules']['nodes']<|docstring|>Return the list of branch protection rules from a given repository.<|endoftext|>
4012fe08e165577b330a860238acc27cdc2e3ab4f2ce66aad1f919f828e7d77c
def repo_branch_protection_rule(repo: Repo, pattern: str) -> Optional[BranchProtectionRuleNode]: 'Return a branch protection rule given a branch name pattern.' rules = repo_branch_protection_rules(repo) elems = [x for x in rules if (branch_protection_pattern(x) == pattern)] assert (len(elems) <= 1) if elems: return elems[0] return None
Return a branch protection rule given a branch name pattern.
src/ghaudit/schema.py
repo_branch_protection_rule
DistantThunder/ghaudit
1
python
def repo_branch_protection_rule(repo: Repo, pattern: str) -> Optional[BranchProtectionRuleNode]: rules = repo_branch_protection_rules(repo) elems = [x for x in rules if (branch_protection_pattern(x) == pattern)] assert (len(elems) <= 1) if elems: return elems[0] return None
def repo_branch_protection_rule(repo: Repo, pattern: str) -> Optional[BranchProtectionRuleNode]: rules = repo_branch_protection_rules(repo) elems = [x for x in rules if (branch_protection_pattern(x) == pattern)] assert (len(elems) <= 1) if elems: return elems[0] return None<|docstring|>Return a branch protection rule given a branch name pattern.<|endoftext|>
46122bcc0cfdf77f926833a2ac374c8f92fd23f24a50b4bf239ad17c85057558
def team_name(team: Team) -> str: 'Return the name of a given team.' return team['node']['name']
Return the name of a given team.
src/ghaudit/schema.py
team_name
DistantThunder/ghaudit
1
python
def team_name(team: Team) -> str: return team['node']['name']
def team_name(team: Team) -> str: return team['node']['name']<|docstring|>Return the name of a given team.<|endoftext|>
3f88ec11eed3839accf122041c88ae260a4be44c27c6c8f28d50be9978238ece
def team_description(team: Team) -> str: 'Return the description of a given team.' return team['node']['description']
Return the description of a given team.
src/ghaudit/schema.py
team_description
DistantThunder/ghaudit
1
python
def team_description(team: Team) -> str: return team['node']['description']
def team_description(team: Team) -> str: return team['node']['description']<|docstring|>Return the description of a given team.<|endoftext|>
5414667584dac73dab1a29d88c4aca1bac7a86ecb9741020dcdb9fcea31cbba9
def team_repos(rstate: Rstate, team: Team) -> List[RepoWithPerms]: 'Return the list of repositories a team has effective access to.' def mkobj(rstate: Rstate, edge: TeamRepoEdge) -> RepoWithPerms: return {'permission': edge['permission'], 'node': org_repo_by_id(rstate, edge['node']['id'])['node']} if (('repositories' in team['node']) and team['node']['repositories']): repositories = team['node']['repositories']['edges'] return [mkobj(rstate, x) for x in repositories if (x is not None)] return []
Return the list of repositories a team has effective access to.
src/ghaudit/schema.py
team_repos
DistantThunder/ghaudit
1
python
def team_repos(rstate: Rstate, team: Team) -> List[RepoWithPerms]: def mkobj(rstate: Rstate, edge: TeamRepoEdge) -> RepoWithPerms: return {'permission': edge['permission'], 'node': org_repo_by_id(rstate, edge['node']['id'])['node']} if (('repositories' in team['node']) and team['node']['repositories']): repositories = team['node']['repositories']['edges'] return [mkobj(rstate, x) for x in repositories if (x is not None)] return []
def team_repos(rstate: Rstate, team: Team) -> List[RepoWithPerms]: def mkobj(rstate: Rstate, edge: TeamRepoEdge) -> RepoWithPerms: return {'permission': edge['permission'], 'node': org_repo_by_id(rstate, edge['node']['id'])['node']} if (('repositories' in team['node']) and team['node']['repositories']): repositories = team['node']['repositories']['edges'] return [mkobj(rstate, x) for x in repositories if (x is not None)] return []<|docstring|>Return the list of repositories a team has effective access to.<|endoftext|>
0d67906cbb5600711eef93ffd671e0b17e8345f6d9b5de915a73a2e9c290f6b0
def team_members(rstate: Rstate, team: Team) -> List[UserWithRole]: 'Return the list of members of a given team.' def mkobj(rstate: Rstate, edge: TeamMemberEdge) -> UserWithRole: return {'role': edge['role'], 'node': user_by_id(rstate, edge['node']['id'])['node']} if (('members' in team['node']) and team['node']['members']): members = team['node']['members']['edges'] return [mkobj(rstate, x) for x in members if (x is not None)] return []
Return the list of members of a given team.
src/ghaudit/schema.py
team_members
DistantThunder/ghaudit
1
python
def team_members(rstate: Rstate, team: Team) -> List[UserWithRole]: def mkobj(rstate: Rstate, edge: TeamMemberEdge) -> UserWithRole: return {'role': edge['role'], 'node': user_by_id(rstate, edge['node']['id'])['node']} if (('members' in team['node']) and team['node']['members']): members = team['node']['members']['edges'] return [mkobj(rstate, x) for x in members if (x is not None)] return []
def team_members(rstate: Rstate, team: Team) -> List[UserWithRole]: def mkobj(rstate: Rstate, edge: TeamMemberEdge) -> UserWithRole: return {'role': edge['role'], 'node': user_by_id(rstate, edge['node']['id'])['node']} if (('members' in team['node']) and team['node']['members']): members = team['node']['members']['edges'] return [mkobj(rstate, x) for x in members if (x is not None)] return []<|docstring|>Return the list of members of a given team.<|endoftext|>
1b070c58d5690257082f16411619a2a966d7a3088e9f9a3c03450b272c14685f
def team_parent(rstate: Rstate, team: Team) -> Optional[Team]: 'Return the parent of a given team if it exists.' parent_team = team['node']['parentTeam'] if parent_team: return org_team_by_id(rstate, parent_team) return None
Return the parent of a given team if it exists.
src/ghaudit/schema.py
team_parent
DistantThunder/ghaudit
1
python
def team_parent(rstate: Rstate, team: Team) -> Optional[Team]: parent_team = team['node']['parentTeam'] if parent_team: return org_team_by_id(rstate, parent_team) return None
def team_parent(rstate: Rstate, team: Team) -> Optional[Team]: parent_team = team['node']['parentTeam'] if parent_team: return org_team_by_id(rstate, parent_team) return None<|docstring|>Return the parent of a given team if it exists.<|endoftext|>
56072288ff50da94f6feb451b2a441cd3c8de4f4c9899fff0c44c86f4762821a
def team_children(rstate: Rstate, team: Team) -> List[Team]: 'Return the list of direct children teams of a given team.' def mkobj(rstate: Rstate, edge: ChildTeam) -> Team: return {'node': org_team_by_id(rstate, edge['node']['id'])['node']} if (('childTeams' in team['node']) and team['node']['childTeams']): children = team['node']['childTeams']['edges'] return [mkobj(rstate, x) for x in children if (x is not None)] return []
Return the list of direct children teams of a given team.
src/ghaudit/schema.py
team_children
DistantThunder/ghaudit
1
python
def team_children(rstate: Rstate, team: Team) -> List[Team]: def mkobj(rstate: Rstate, edge: ChildTeam) -> Team: return {'node': org_team_by_id(rstate, edge['node']['id'])['node']} if (('childTeams' in team['node']) and team['node']['childTeams']): children = team['node']['childTeams']['edges'] return [mkobj(rstate, x) for x in children if (x is not None)] return []
def team_children(rstate: Rstate, team: Team) -> List[Team]: def mkobj(rstate: Rstate, edge: ChildTeam) -> Team: return {'node': org_team_by_id(rstate, edge['node']['id'])['node']} if (('childTeams' in team['node']) and team['node']['childTeams']): children = team['node']['childTeams']['edges'] return [mkobj(rstate, x) for x in children if (x is not None)] return []<|docstring|>Return the list of direct children teams of a given team.<|endoftext|>
13072a2686e5fe678bd808370720ce1179729bdb0fb5571fdda33a9e33ccdb54
def user_name(user: User) -> Optional[str]: 'Return the name of a given user.' return user['node']['name']
Return the name of a given user.
src/ghaudit/schema.py
user_name
DistantThunder/ghaudit
1
python
def user_name(user: User) -> Optional[str]: return user['node']['name']
def user_name(user: User) -> Optional[str]: return user['node']['name']<|docstring|>Return the name of a given user.<|endoftext|>
a9f344d61d2b2a8946520c6d0f75bb06a350a7819b88d7fa5716a926648c7419
def user_login(user: User) -> str: 'Return the login of a given user.' return user['node']['login']
Return the login of a given user.
src/ghaudit/schema.py
user_login
DistantThunder/ghaudit
1
python
def user_login(user: User) -> str: return user['node']['login']
def user_login(user: User) -> str: return user['node']['login']<|docstring|>Return the login of a given user.<|endoftext|>
1fdfaa300d7e1046f7583203daa36fedb09cfe2b0bf711c25ce8ff98886b42df
def user_email(user: User) -> str: 'Return the email of a given user, as set in their account.' return user['node']['email']
Return the email of a given user, as set in their account.
src/ghaudit/schema.py
user_email
DistantThunder/ghaudit
1
python
def user_email(user: User) -> str: return user['node']['email']
def user_email(user: User) -> str: return user['node']['email']<|docstring|>Return the email of a given user, as set in their account.<|endoftext|>
aaec58022297c4b79fd51eaf5325eb5b571ead1172d472c13412a03788cb82c5
def user_company(user: User) -> str: 'Return the company of a given user, as set in their account.' return user['node']['company']
Return the company of a given user, as set in their account.
src/ghaudit/schema.py
user_company
DistantThunder/ghaudit
1
python
def user_company(user: User) -> str: return user['node']['company']
def user_company(user: User) -> str: return user['node']['company']<|docstring|>Return the company of a given user, as set in their account.<|endoftext|>
aa52b5e216a72ded2e72344477acb53c5accad8bb648eb2b52a109f1f5171d3a
def user_is_owner(user: UserWithOrgRole) -> bool: 'Return whether a given user has the owner role in the organisation.' return (('role' in user) and (user['role'] == 'ADMIN'))
Return whether a given user has the owner role in the organisation.
src/ghaudit/schema.py
user_is_owner
DistantThunder/ghaudit
1
python
def user_is_owner(user: UserWithOrgRole) -> bool: return (('role' in user) and (user['role'] == 'ADMIN'))
def user_is_owner(user: UserWithOrgRole) -> bool: return (('role' in user) and (user['role'] == 'ADMIN'))<|docstring|>Return whether a given user has the owner role in the organisation.<|endoftext|>
fb52dc41d5d7103f29374d1dfb00099682495e26ba79f596b106e62d91b6a928
def branch_protection_id(rule: BranchProtectionRuleNode) -> Hashable: 'Return the ID of a given branch protection rule.' return rule['id']
Return the ID of a given branch protection rule.
src/ghaudit/schema.py
branch_protection_id
DistantThunder/ghaudit
1
python
def branch_protection_id(rule: BranchProtectionRuleNode) -> Hashable: return rule['id']
def branch_protection_id(rule: BranchProtectionRuleNode) -> Hashable: return rule['id']<|docstring|>Return the ID of a given branch protection rule.<|endoftext|>
468501b02a21b980d9ec3d35e8107e40480d4e8c025519ee547ec43afe7e67b2
def branch_protection_pattern(rule: BranchProtectionRuleNode) -> str: 'Return the branch pattern name of a given branch protection rule.' return rule['pattern']
Return the branch pattern name of a given branch protection rule.
src/ghaudit/schema.py
branch_protection_pattern
DistantThunder/ghaudit
1
python
def branch_protection_pattern(rule: BranchProtectionRuleNode) -> str: return rule['pattern']
def branch_protection_pattern(rule: BranchProtectionRuleNode) -> str: return rule['pattern']<|docstring|>Return the branch pattern name of a given branch protection rule.<|endoftext|>
1344070642ed964c57e009b0e4f2ecd089e7bc469724acdbd1054e359b5c9086
def branch_protection_admin_enforced(rule: BranchProtectionRuleNode) -> bool: 'Return whether an admin can overwrite branches.\n\n Return whether an admin can overwrite branches with a name mathing to the\n pattern of the given branch protection.\n ' return rule['isAdminEnforced']
Return whether an admin can overwrite branches. Return whether an admin can overwrite branches with a name mathing to the pattern of the given branch protection.
src/ghaudit/schema.py
branch_protection_admin_enforced
DistantThunder/ghaudit
1
python
def branch_protection_admin_enforced(rule: BranchProtectionRuleNode) -> bool: 'Return whether an admin can overwrite branches.\n\n Return whether an admin can overwrite branches with a name mathing to the\n pattern of the given branch protection.\n ' return rule['isAdminEnforced']
def branch_protection_admin_enforced(rule: BranchProtectionRuleNode) -> bool: 'Return whether an admin can overwrite branches.\n\n Return whether an admin can overwrite branches with a name mathing to the\n pattern of the given branch protection.\n ' return rule['isAdminEnforced']<|docstring|>Return whether an admin can overwrite branches. Return whether an admin can overwrite branches with a name mathing to the pattern of the given branch protection.<|endoftext|>
6d0387225039b999df6c9a01fb773fe244585339946e926b30084ae23a0be787
def branch_protection_approvals(rule: BranchProtectionRuleNode) -> int: 'Are approving reviews required to update matching branches.' if rule['requiresApprovingReviews']: return rule['requiredApprovingReviewCount'] return 0
Are approving reviews required to update matching branches.
src/ghaudit/schema.py
branch_protection_approvals
DistantThunder/ghaudit
1
python
def branch_protection_approvals(rule: BranchProtectionRuleNode) -> int: if rule['requiresApprovingReviews']: return rule['requiredApprovingReviewCount'] return 0
def branch_protection_approvals(rule: BranchProtectionRuleNode) -> int: if rule['requiresApprovingReviews']: return rule['requiredApprovingReviewCount'] return 0<|docstring|>Are approving reviews required to update matching branches.<|endoftext|>
fdcb93f832bf945bc9083674585ddf435b1bae74c48be36dd835aa07209bd2c1
def branch_protection_owner_approval(rule: BranchProtectionRuleNode) -> bool: 'Are approving reviews required to update matching branches.' return rule['requiresCodeOwnerReviews']
Are approving reviews required to update matching branches.
src/ghaudit/schema.py
branch_protection_owner_approval
DistantThunder/ghaudit
1
python
def branch_protection_owner_approval(rule: BranchProtectionRuleNode) -> bool: return rule['requiresCodeOwnerReviews']
def branch_protection_owner_approval(rule: BranchProtectionRuleNode) -> bool: return rule['requiresCodeOwnerReviews']<|docstring|>Are approving reviews required to update matching branches.<|endoftext|>
29c53bd4057af9f8915af49d2a514aced04f75ab1c515d826acf06db765141bc
def branch_protection_commit_signatures(rule: BranchProtectionRuleNode) -> bool: 'Are commits required to be signed to update matching branches.' return rule['requiresCommitSignatures']
Are commits required to be signed to update matching branches.
src/ghaudit/schema.py
branch_protection_commit_signatures
DistantThunder/ghaudit
1
python
def branch_protection_commit_signatures(rule: BranchProtectionRuleNode) -> bool: return rule['requiresCommitSignatures']
def branch_protection_commit_signatures(rule: BranchProtectionRuleNode) -> bool: return rule['requiresCommitSignatures']<|docstring|>Are commits required to be signed to update matching branches.<|endoftext|>
b165a00572f71fdfe62fc3e952e93a0b21dae96096f25f9b8be130079d0dea55
def branch_protection_linear_history(rule: BranchProtectionRuleNode) -> bool: 'Are merge commits prohibited from being pushed to matching branches.' return rule['requiresLinearHistory']
Are merge commits prohibited from being pushed to matching branches.
src/ghaudit/schema.py
branch_protection_linear_history
DistantThunder/ghaudit
1
python
def branch_protection_linear_history(rule: BranchProtectionRuleNode) -> bool: return rule['requiresLinearHistory']
def branch_protection_linear_history(rule: BranchProtectionRuleNode) -> bool: return rule['requiresLinearHistory']<|docstring|>Are merge commits prohibited from being pushed to matching branches.<|endoftext|>
a6ca7871de44398e503f36ada5250b0b04848786cb7ccf77f0e72428663f2707
def branch_protection_restrict_pushes(rule: BranchProtectionRuleNode) -> bool: 'Is pushing to matching branches restricted.' return rule['restrictsPushes']
Is pushing to matching branches restricted.
src/ghaudit/schema.py
branch_protection_restrict_pushes
DistantThunder/ghaudit
1
python
def branch_protection_restrict_pushes(rule: BranchProtectionRuleNode) -> bool: return rule['restrictsPushes']
def branch_protection_restrict_pushes(rule: BranchProtectionRuleNode) -> bool: return rule['restrictsPushes']<|docstring|>Is pushing to matching branches restricted.<|endoftext|>
18a5af7dda4e32092cf67ea272473c258647268c95da198a5d106fc17565c133
def branch_protection_restrict_deletion(rule: BranchProtectionRuleNode) -> bool: 'Can matching branches be deleted.' return (not rule['allowsDeletions'])
Can matching branches be deleted.
src/ghaudit/schema.py
branch_protection_restrict_deletion
DistantThunder/ghaudit
1
python
def branch_protection_restrict_deletion(rule: BranchProtectionRuleNode) -> bool: return (not rule['allowsDeletions'])
def branch_protection_restrict_deletion(rule: BranchProtectionRuleNode) -> bool: return (not rule['allowsDeletions'])<|docstring|>Can matching branches be deleted.<|endoftext|>
3de1ea710bbfdd301be7b33be442957573a92a3a75b45959acf142fc3a80b61a
def branch_protection_creator(rule: BranchProtectionRuleNode) -> str: 'Return the actor who created the given branch protection rule.' return rule['creator']['login']
Return the actor who created the given branch protection rule.
src/ghaudit/schema.py
branch_protection_creator
DistantThunder/ghaudit
1
python
def branch_protection_creator(rule: BranchProtectionRuleNode) -> str: return rule['creator']['login']
def branch_protection_creator(rule: BranchProtectionRuleNode) -> str: return rule['creator']['login']<|docstring|>Return the actor who created the given branch protection rule.<|endoftext|>