body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
6d844b363da15ba249605c4e46e5fabcc4073ff3bae56668561b0576c2bcc9b8
def __init__(self, message: str=None) -> None: 'The initialization of a normal exception.\n\n Parameters\n ----------\n message : string\n The message of the error message.\n\n Returns\n -------\n None\n ' message = (message if (message is not None) else 'Error.') prefix = '(OpihiExarata) - ' self.message = (prefix + message)
The initialization of a normal exception. Parameters ---------- message : string The message of the error message. Returns ------- None
src/opihiexarata/library/error.py
__init__
psmd-iberutaru/OpihiExarata
0
python
def __init__(self, message: str=None) -> None: 'The initialization of a normal exception.\n\n Parameters\n ----------\n message : string\n The message of the error message.\n\n Returns\n -------\n None\n ' message = (message if (message is not None) else 'Error.') prefix = '(OpihiExarata) - ' self.message = (prefix + message)
def __init__(self, message: str=None) -> None: 'The initialization of a normal exception.\n\n Parameters\n ----------\n message : string\n The message of the error message.\n\n Returns\n -------\n None\n ' message = (message if (message is not None) else 'Error.') prefix = '(OpihiExarata) - ' self.message = (prefix + message)<|docstring|>The initialization of a normal exception. Parameters ---------- message : string The message of the error message. Returns ------- None<|endoftext|>
791f5e2dad2e2d2b49d5681422ab39c5c55c2587af199b79cce397f0f4966dcf
def __init__(self, TStart=None, TEnd=None, IPPStart=None, IPPEnd=None, IPPPoly=None, index=None, **kwargs): '\n\n Parameters\n ----------\n TStart : float\n TEnd : float\n IPPStart : int\n IPPEnd : int\n IPPPoly : Poly1DType|numpy.ndarray|list|tuple\n index : int\n kwargs : dict\n ' if ('_xml_ns' in kwargs): self._xml_ns = kwargs['_xml_ns'] if ('_xml_ns_key' in kwargs): self._xml_ns_key = kwargs['_xml_ns_key'] (self.TStart, self.TEnd) = (TStart, TEnd) (self.IPPStart, self.IPPEnd) = (IPPStart, IPPEnd) self.IPPPoly = IPPPoly self.index = index super(IPPSetType, self).__init__(**kwargs)
Parameters ---------- TStart : float TEnd : float IPPStart : int IPPEnd : int IPPPoly : Poly1DType|numpy.ndarray|list|tuple index : int kwargs : dict
sarpy/io/complex/sicd_elements/Timeline.py
__init__
pressler-vsc/sarpy
1
python
def __init__(self, TStart=None, TEnd=None, IPPStart=None, IPPEnd=None, IPPPoly=None, index=None, **kwargs): '\n\n Parameters\n ----------\n TStart : float\n TEnd : float\n IPPStart : int\n IPPEnd : int\n IPPPoly : Poly1DType|numpy.ndarray|list|tuple\n index : int\n kwargs : dict\n ' if ('_xml_ns' in kwargs): self._xml_ns = kwargs['_xml_ns'] if ('_xml_ns_key' in kwargs): self._xml_ns_key = kwargs['_xml_ns_key'] (self.TStart, self.TEnd) = (TStart, TEnd) (self.IPPStart, self.IPPEnd) = (IPPStart, IPPEnd) self.IPPPoly = IPPPoly self.index = index super(IPPSetType, self).__init__(**kwargs)
def __init__(self, TStart=None, TEnd=None, IPPStart=None, IPPEnd=None, IPPPoly=None, index=None, **kwargs): '\n\n Parameters\n ----------\n TStart : float\n TEnd : float\n IPPStart : int\n IPPEnd : int\n IPPPoly : Poly1DType|numpy.ndarray|list|tuple\n index : int\n kwargs : dict\n ' if ('_xml_ns' in kwargs): self._xml_ns = kwargs['_xml_ns'] if ('_xml_ns_key' in kwargs): self._xml_ns_key = kwargs['_xml_ns_key'] (self.TStart, self.TEnd) = (TStart, TEnd) (self.IPPStart, self.IPPEnd) = (IPPStart, IPPEnd) self.IPPPoly = IPPPoly self.index = index super(IPPSetType, self).__init__(**kwargs)<|docstring|>Parameters ---------- TStart : float TEnd : float IPPStart : int IPPEnd : int IPPPoly : Poly1DType|numpy.ndarray|list|tuple index : int kwargs : dict<|endoftext|>
1ffd49818668e56e6c5f3f4a1422747a6a70a5968b34a4042d5bbf073ff1ae3f
def __init__(self, CollectStart=None, CollectDuration=None, IPP=None, **kwargs): '\n\n Parameters\n ----------\n CollectStart : numpy.datetime64|datetime|date|str\n CollectDuration : float\n IPP : List[IPPSetType]\n kwargs : dict\n ' if ('_xml_ns' in kwargs): self._xml_ns = kwargs['_xml_ns'] if ('_xml_ns_key' in kwargs): self._xml_ns_key = kwargs['_xml_ns_key'] self.CollectStart = CollectStart self.CollectDuration = CollectDuration self.IPP = IPP super(TimelineType, self).__init__(**kwargs)
Parameters ---------- CollectStart : numpy.datetime64|datetime|date|str CollectDuration : float IPP : List[IPPSetType] kwargs : dict
sarpy/io/complex/sicd_elements/Timeline.py
__init__
pressler-vsc/sarpy
1
python
def __init__(self, CollectStart=None, CollectDuration=None, IPP=None, **kwargs): '\n\n Parameters\n ----------\n CollectStart : numpy.datetime64|datetime|date|str\n CollectDuration : float\n IPP : List[IPPSetType]\n kwargs : dict\n ' if ('_xml_ns' in kwargs): self._xml_ns = kwargs['_xml_ns'] if ('_xml_ns_key' in kwargs): self._xml_ns_key = kwargs['_xml_ns_key'] self.CollectStart = CollectStart self.CollectDuration = CollectDuration self.IPP = IPP super(TimelineType, self).__init__(**kwargs)
def __init__(self, CollectStart=None, CollectDuration=None, IPP=None, **kwargs): '\n\n Parameters\n ----------\n CollectStart : numpy.datetime64|datetime|date|str\n CollectDuration : float\n IPP : List[IPPSetType]\n kwargs : dict\n ' if ('_xml_ns' in kwargs): self._xml_ns = kwargs['_xml_ns'] if ('_xml_ns_key' in kwargs): self._xml_ns_key = kwargs['_xml_ns_key'] self.CollectStart = CollectStart self.CollectDuration = CollectDuration self.IPP = IPP super(TimelineType, self).__init__(**kwargs)<|docstring|>Parameters ---------- CollectStart : numpy.datetime64|datetime|date|str CollectDuration : float IPP : List[IPPSetType] kwargs : dict<|endoftext|>
ad562d39a94b4f7d0d978ab297dd40b1af24324d2b0fc898b488aaa627b4c0de
def __init__(self, address, cat, subcat, firmware=0, description='', model=''): 'Init the WindowCovering class.' super().__init__(address, cat, subcat, firmware, description, model, state_name=COVER)
Init the WindowCovering class.
pyinsteon/device_types/window_coverings.py
__init__
michaeldavie/pyinsteon
15
python
def __init__(self, address, cat, subcat, firmware=0, description=, model=): super().__init__(address, cat, subcat, firmware, description, model, state_name=COVER)
def __init__(self, address, cat, subcat, firmware=0, description=, model=): super().__init__(address, cat, subcat, firmware, description, model, state_name=COVER)<|docstring|>Init the WindowCovering class.<|endoftext|>
372450779fdb4e45790722f517bc1ad18784d793d063828ef7fdf1738adb78d5
def _register_operating_flags(self): 'Register the operating and properties.' super()._register_operating_flags() self._add_operating_flag(PROGRAM_LOCK_ON, 0, 0, 0, 1) self._add_operating_flag(LED_BLINK_ON_TX_ON, 0, 1, 2, 3) self._add_operating_flag(LED_OFF, 0, 4, 10, 11) self._add_operating_flag(KEY_BEEP_ON, 0, 5, 12, 13) self._add_operating_flag(LED_BLINK_ON_ERROR_OFF, 2, 3, 21, 22) self._add_operating_flag(DUAL_LINE_ON, 3, 0, 30, 31) self._add_operating_flag(MOMENTARY_LINE_ON, 3, 1, 32, 33) self._add_operating_flag(NOT_3_WAY, 3, 3, 34, 35) self._add_operating_flag(FORWARD_ON, 3, 4, 36, 37) self._add_property(X10_HOUSE, 5, None) self._add_property(X10_UNIT, 6, None) self._add_property(RAMP_RATE, 7, 5) self._add_property(ON_LEVEL, 8, 6) self._add_property(DURATION_HIGH, 9, None) self._add_property(DURATION_LOW, 10, None)
Register the operating and properties.
pyinsteon/device_types/window_coverings.py
_register_operating_flags
michaeldavie/pyinsteon
15
python
def _register_operating_flags(self): super()._register_operating_flags() self._add_operating_flag(PROGRAM_LOCK_ON, 0, 0, 0, 1) self._add_operating_flag(LED_BLINK_ON_TX_ON, 0, 1, 2, 3) self._add_operating_flag(LED_OFF, 0, 4, 10, 11) self._add_operating_flag(KEY_BEEP_ON, 0, 5, 12, 13) self._add_operating_flag(LED_BLINK_ON_ERROR_OFF, 2, 3, 21, 22) self._add_operating_flag(DUAL_LINE_ON, 3, 0, 30, 31) self._add_operating_flag(MOMENTARY_LINE_ON, 3, 1, 32, 33) self._add_operating_flag(NOT_3_WAY, 3, 3, 34, 35) self._add_operating_flag(FORWARD_ON, 3, 4, 36, 37) self._add_property(X10_HOUSE, 5, None) self._add_property(X10_UNIT, 6, None) self._add_property(RAMP_RATE, 7, 5) self._add_property(ON_LEVEL, 8, 6) self._add_property(DURATION_HIGH, 9, None) self._add_property(DURATION_LOW, 10, None)
def _register_operating_flags(self): super()._register_operating_flags() self._add_operating_flag(PROGRAM_LOCK_ON, 0, 0, 0, 1) self._add_operating_flag(LED_BLINK_ON_TX_ON, 0, 1, 2, 3) self._add_operating_flag(LED_OFF, 0, 4, 10, 11) self._add_operating_flag(KEY_BEEP_ON, 0, 5, 12, 13) self._add_operating_flag(LED_BLINK_ON_ERROR_OFF, 2, 3, 21, 22) self._add_operating_flag(DUAL_LINE_ON, 3, 0, 30, 31) self._add_operating_flag(MOMENTARY_LINE_ON, 3, 1, 32, 33) self._add_operating_flag(NOT_3_WAY, 3, 3, 34, 35) self._add_operating_flag(FORWARD_ON, 3, 4, 36, 37) self._add_property(X10_HOUSE, 5, None) self._add_property(X10_UNIT, 6, None) self._add_property(RAMP_RATE, 7, 5) self._add_property(ON_LEVEL, 8, 6) self._add_property(DURATION_HIGH, 9, None) self._add_property(DURATION_LOW, 10, None)<|docstring|>Register the operating and properties.<|endoftext|>
c970a9dc29c746bbdc4d911d02f0abfdbda26aa8a6fb9b280b86a464f4be17ae
@property def gcs_bucket(self) -> str: 'str: The name of the GCS bucket.' return self._gcs_bucket
str: The name of the GCS bucket.
python_modules/libraries/dagster-gcp/dagster_gcp/gcs/file_manager.py
gcs_bucket
leftrightleft/dagster
4,606
python
@property def gcs_bucket(self) -> str: return self._gcs_bucket
@property def gcs_bucket(self) -> str: return self._gcs_bucket<|docstring|>str: The name of the GCS bucket.<|endoftext|>
506cf9f7691ae7fb64da8232dddeca2cc4567f6608606370f64e654b91f2f827
@property def gcs_key(self) -> str: 'str: The GCS key.' return self._gcs_key
str: The GCS key.
python_modules/libraries/dagster-gcp/dagster_gcp/gcs/file_manager.py
gcs_key
leftrightleft/dagster
4,606
python
@property def gcs_key(self) -> str: return self._gcs_key
@property def gcs_key(self) -> str: return self._gcs_key<|docstring|>str: The GCS key.<|endoftext|>
8fe4820e189cc48c1e499b1327f5b4e6a8f8694cc2b9c6518f61bc6fdf31516c
@property def path_desc(self) -> str: "str: The file's GCS URL." return self.gcs_path
str: The file's GCS URL.
python_modules/libraries/dagster-gcp/dagster_gcp/gcs/file_manager.py
path_desc
leftrightleft/dagster
4,606
python
@property def path_desc(self) -> str: return self.gcs_path
@property def path_desc(self) -> str: return self.gcs_path<|docstring|>str: The file's GCS URL.<|endoftext|>
ea9871208f8650a1b277856853e6c8c09759ef48829d62a69cfa03322627f518
@property def gcs_path(self) -> str: "str: The file's GCS URL." return 'gs://{bucket}/{key}'.format(bucket=self.gcs_bucket, key=self.gcs_key)
str: The file's GCS URL.
python_modules/libraries/dagster-gcp/dagster_gcp/gcs/file_manager.py
gcs_path
leftrightleft/dagster
4,606
python
@property def gcs_path(self) -> str: return 'gs://{bucket}/{key}'.format(bucket=self.gcs_bucket, key=self.gcs_key)
@property def gcs_path(self) -> str: return 'gs://{bucket}/{key}'.format(bucket=self.gcs_bucket, key=self.gcs_key)<|docstring|>str: The file's GCS URL.<|endoftext|>
b8d7375656bba7f72d377321b50d88bfca96b5abdb927d2120e8ad614f2417fa
def new(key, *args, **kwargs): 'Create a new DES cipher\n\n :Parameters:\n key : byte string\n The secret key to use in the symmetric cipher.\n It must be 8 byte long. The parity bits will be ignored.\n :Keywords:\n mode : a *MODE_** constant\n The chaining mode to use for encryption or decryption.\n Default is `MODE_ECB`.\n IV : byte string\n The initialization vector to use for encryption or decryption.\n \n It is ignored for `MODE_ECB` and `MODE_CTR`.\n\n For `MODE_OPENPGP`, IV must be `block_size` bytes long for encryption\n and `block_size` +2 bytes for decryption (in the latter case, it is\n actually the *encrypted* IV which was prefixed to the ciphertext).\n It is mandatory.\n \n For all other modes, it must be `block_size` bytes longs. It is optional and\n when not present it will be given a default value of all zeroes.\n counter : callable\n (*Only* `MODE_CTR`). A stateful function that returns the next\n *counter block*, which is a byte string of `block_size` bytes.\n For better performance, use `Crypto.Util.Counter`.\n segment_size : integer\n (*Only* `MODE_CFB`).The number of bits the plaintext and ciphertext\n are segmented in.\n It must be a multiple of 8. If 0 or not specified, it will be assumed to be 8.\n\n :Return: an `DESCipher` object\n ' return DESCipher(key, *args, **kwargs)
Create a new DES cipher :Parameters: key : byte string The secret key to use in the symmetric cipher. It must be 8 byte long. The parity bits will be ignored. :Keywords: mode : a *MODE_** constant The chaining mode to use for encryption or decryption. Default is `MODE_ECB`. IV : byte string The initialization vector to use for encryption or decryption. It is ignored for `MODE_ECB` and `MODE_CTR`. For `MODE_OPENPGP`, IV must be `block_size` bytes long for encryption and `block_size` +2 bytes for decryption (in the latter case, it is actually the *encrypted* IV which was prefixed to the ciphertext). It is mandatory. For all other modes, it must be `block_size` bytes longs. It is optional and when not present it will be given a default value of all zeroes. counter : callable (*Only* `MODE_CTR`). A stateful function that returns the next *counter block*, which is a byte string of `block_size` bytes. For better performance, use `Crypto.Util.Counter`. segment_size : integer (*Only* `MODE_CFB`).The number of bits the plaintext and ciphertext are segmented in. It must be a multiple of 8. If 0 or not specified, it will be assumed to be 8. :Return: an `DESCipher` object
desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/Cipher/DES.py
new
hombin/hue
2,151
python
def new(key, *args, **kwargs): 'Create a new DES cipher\n\n :Parameters:\n key : byte string\n The secret key to use in the symmetric cipher.\n It must be 8 byte long. The parity bits will be ignored.\n :Keywords:\n mode : a *MODE_** constant\n The chaining mode to use for encryption or decryption.\n Default is `MODE_ECB`.\n IV : byte string\n The initialization vector to use for encryption or decryption.\n \n It is ignored for `MODE_ECB` and `MODE_CTR`.\n\n For `MODE_OPENPGP`, IV must be `block_size` bytes long for encryption\n and `block_size` +2 bytes for decryption (in the latter case, it is\n actually the *encrypted* IV which was prefixed to the ciphertext).\n It is mandatory.\n \n For all other modes, it must be `block_size` bytes longs. It is optional and\n when not present it will be given a default value of all zeroes.\n counter : callable\n (*Only* `MODE_CTR`). A stateful function that returns the next\n *counter block*, which is a byte string of `block_size` bytes.\n For better performance, use `Crypto.Util.Counter`.\n segment_size : integer\n (*Only* `MODE_CFB`).The number of bits the plaintext and ciphertext\n are segmented in.\n It must be a multiple of 8. If 0 or not specified, it will be assumed to be 8.\n\n :Return: an `DESCipher` object\n ' return DESCipher(key, *args, **kwargs)
def new(key, *args, **kwargs): 'Create a new DES cipher\n\n :Parameters:\n key : byte string\n The secret key to use in the symmetric cipher.\n It must be 8 byte long. The parity bits will be ignored.\n :Keywords:\n mode : a *MODE_** constant\n The chaining mode to use for encryption or decryption.\n Default is `MODE_ECB`.\n IV : byte string\n The initialization vector to use for encryption or decryption.\n \n It is ignored for `MODE_ECB` and `MODE_CTR`.\n\n For `MODE_OPENPGP`, IV must be `block_size` bytes long for encryption\n and `block_size` +2 bytes for decryption (in the latter case, it is\n actually the *encrypted* IV which was prefixed to the ciphertext).\n It is mandatory.\n \n For all other modes, it must be `block_size` bytes longs. It is optional and\n when not present it will be given a default value of all zeroes.\n counter : callable\n (*Only* `MODE_CTR`). A stateful function that returns the next\n *counter block*, which is a byte string of `block_size` bytes.\n For better performance, use `Crypto.Util.Counter`.\n segment_size : integer\n (*Only* `MODE_CFB`).The number of bits the plaintext and ciphertext\n are segmented in.\n It must be a multiple of 8. If 0 or not specified, it will be assumed to be 8.\n\n :Return: an `DESCipher` object\n ' return DESCipher(key, *args, **kwargs)<|docstring|>Create a new DES cipher :Parameters: key : byte string The secret key to use in the symmetric cipher. It must be 8 byte long. The parity bits will be ignored. :Keywords: mode : a *MODE_** constant The chaining mode to use for encryption or decryption. Default is `MODE_ECB`. IV : byte string The initialization vector to use for encryption or decryption. It is ignored for `MODE_ECB` and `MODE_CTR`. For `MODE_OPENPGP`, IV must be `block_size` bytes long for encryption and `block_size` +2 bytes for decryption (in the latter case, it is actually the *encrypted* IV which was prefixed to the ciphertext). It is mandatory. For all other modes, it must be `block_size` bytes longs. It is optional and when not present it will be given a default value of all zeroes. counter : callable (*Only* `MODE_CTR`). A stateful function that returns the next *counter block*, which is a byte string of `block_size` bytes. For better performance, use `Crypto.Util.Counter`. segment_size : integer (*Only* `MODE_CFB`).The number of bits the plaintext and ciphertext are segmented in. It must be a multiple of 8. If 0 or not specified, it will be assumed to be 8. :Return: an `DESCipher` object<|endoftext|>
d13533d78fcdfacd2db56fb75a6871b0cccf376b9c9a1feba11160585fffd072
def __init__(self, key, *args, **kwargs): 'Initialize a DES cipher object\n \n See also `new()` at the module level.' blockalgo.BlockAlgo.__init__(self, _DES, key, *args, **kwargs)
Initialize a DES cipher object See also `new()` at the module level.
desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/Cipher/DES.py
__init__
hombin/hue
2,151
python
def __init__(self, key, *args, **kwargs): 'Initialize a DES cipher object\n \n See also `new()` at the module level.' blockalgo.BlockAlgo.__init__(self, _DES, key, *args, **kwargs)
def __init__(self, key, *args, **kwargs): 'Initialize a DES cipher object\n \n See also `new()` at the module level.' blockalgo.BlockAlgo.__init__(self, _DES, key, *args, **kwargs)<|docstring|>Initialize a DES cipher object See also `new()` at the module level.<|endoftext|>
4388634d6c3ff197162d2aeb0beb93384840ddec16c6e25c52cf293dab34f2b9
def needs(self, record=None, **kwargs): 'Enabling Needs, Set of Needs granting permission.' if (record is None): return [] is_single_ip = (record.get('access', {}).get('access_right') == 'singleip') visible = self.check_permission() if (not is_single_ip): return [any_user] elif visible: return [any_user] else: return []
Enabling Needs, Set of Needs granting permission.
invenio_config_tugraz/generators.py
needs
rekt-hard/invenio-config-tugraz
2
python
def needs(self, record=None, **kwargs): if (record is None): return [] is_single_ip = (record.get('access', {}).get('access_right') == 'singleip') visible = self.check_permission() if (not is_single_ip): return [any_user] elif visible: return [any_user] else: return []
def needs(self, record=None, **kwargs): if (record is None): return [] is_single_ip = (record.get('access', {}).get('access_right') == 'singleip') visible = self.check_permission() if (not is_single_ip): return [any_user] elif visible: return [any_user] else: return []<|docstring|>Enabling Needs, Set of Needs granting permission.<|endoftext|>
e3a351d5f738e60ba2e2aaf87226b9257ac37f4351f38ac34b51698dd72913bb
def excludes(self, **kwargs): 'Preventing Needs, Set of Needs denying permission.\n\n If ANY of the Needs are matched, permission is revoked.\n\n .. note::\n\n ``_load_permissions()`` method from `Permission\n <https://invenio-access.readthedocs.io/en/latest/api.html\n #invenio_access.permissions.Permission>`_ adds by default the\n ``superuser_access`` Need (if tied to a User or Role) for us.\n\n It also expands ActionNeeds into the Users/Roles that\n provide them.\n\n If the same Need is returned by `needs` and `excludes`, then that\n Need provider is disallowed.\n ' return []
Preventing Needs, Set of Needs denying permission. If ANY of the Needs are matched, permission is revoked. .. note:: ``_load_permissions()`` method from `Permission <https://invenio-access.readthedocs.io/en/latest/api.html #invenio_access.permissions.Permission>`_ adds by default the ``superuser_access`` Need (if tied to a User or Role) for us. It also expands ActionNeeds into the Users/Roles that provide them. If the same Need is returned by `needs` and `excludes`, then that Need provider is disallowed.
invenio_config_tugraz/generators.py
excludes
rekt-hard/invenio-config-tugraz
2
python
def excludes(self, **kwargs): 'Preventing Needs, Set of Needs denying permission.\n\n If ANY of the Needs are matched, permission is revoked.\n\n .. note::\n\n ``_load_permissions()`` method from `Permission\n <https://invenio-access.readthedocs.io/en/latest/api.html\n #invenio_access.permissions.Permission>`_ adds by default the\n ``superuser_access`` Need (if tied to a User or Role) for us.\n\n It also expands ActionNeeds into the Users/Roles that\n provide them.\n\n If the same Need is returned by `needs` and `excludes`, then that\n Need provider is disallowed.\n ' return []
def excludes(self, **kwargs): 'Preventing Needs, Set of Needs denying permission.\n\n If ANY of the Needs are matched, permission is revoked.\n\n .. note::\n\n ``_load_permissions()`` method from `Permission\n <https://invenio-access.readthedocs.io/en/latest/api.html\n #invenio_access.permissions.Permission>`_ adds by default the\n ``superuser_access`` Need (if tied to a User or Role) for us.\n\n It also expands ActionNeeds into the Users/Roles that\n provide them.\n\n If the same Need is returned by `needs` and `excludes`, then that\n Need provider is disallowed.\n ' return []<|docstring|>Preventing Needs, Set of Needs denying permission. If ANY of the Needs are matched, permission is revoked. .. note:: ``_load_permissions()`` method from `Permission <https://invenio-access.readthedocs.io/en/latest/api.html #invenio_access.permissions.Permission>`_ adds by default the ``superuser_access`` Need (if tied to a User or Role) for us. It also expands ActionNeeds into the Users/Roles that provide them. If the same Need is returned by `needs` and `excludes`, then that Need provider is disallowed.<|endoftext|>
cf61132490b3bf01e2e2e9a1018467fa7ffeb29ac40215147337aac17627f46a
def query_filter(self, *args, **kwargs): 'Filters for singleip records.' visible = self.check_permission() if (not visible): return (~ Q('match', **{'access.access_right': 'singleip'})) return Q('match_all')
Filters for singleip records.
invenio_config_tugraz/generators.py
query_filter
rekt-hard/invenio-config-tugraz
2
python
def query_filter(self, *args, **kwargs): visible = self.check_permission() if (not visible): return (~ Q('match', **{'access.access_right': 'singleip'})) return Q('match_all')
def query_filter(self, *args, **kwargs): visible = self.check_permission() if (not visible): return (~ Q('match', **{'access.access_right': 'singleip'})) return Q('match_all')<|docstring|>Filters for singleip records.<|endoftext|>
6f7aeffe2f1de1831521ad7ec2e02f32522ebfd515e762a4a016bf93fcf9e7bd
def check_permission(self): 'Check for User IP address in config variable.' user_ip = request.remote_addr if (user_ip in current_app.config['INVENIO_CONFIG_TUGRAZ_SINGLE_IP']): return True return False
Check for User IP address in config variable.
invenio_config_tugraz/generators.py
check_permission
rekt-hard/invenio-config-tugraz
2
python
def check_permission(self): user_ip = request.remote_addr if (user_ip in current_app.config['INVENIO_CONFIG_TUGRAZ_SINGLE_IP']): return True return False
def check_permission(self): user_ip = request.remote_addr if (user_ip in current_app.config['INVENIO_CONFIG_TUGRAZ_SINGLE_IP']): return True return False<|docstring|>Check for User IP address in config variable.<|endoftext|>
ccbd25ab4b777061bfd1346e0fb6563401b6241b3258439732bc5aedceee6f03
def form_activation_vector(self, binary): '\n Function forms a activation vector by swapping the first activation_vec_hd entries with random positions in\n the vector. This results in a vector that has the length of the depth of the switch network and the\n hamming distance as defined by the class.\n ' for index in range(self.activation_vec_hd): self.swap(binary, index, np.random.randint(self.switch_network.depth)) return binary
Function forms a activation vector by swapping the first activation_vec_hd entries with random positions in the vector. This results in a vector that has the length of the depth of the switch network and the hamming distance as defined by the class.
switch_network_LQUBO/form_LQUBO/form_LQUBO.py
form_activation_vector
seangholson/lqubo
1
python
def form_activation_vector(self, binary): '\n Function forms a activation vector by swapping the first activation_vec_hd entries with random positions in\n the vector. This results in a vector that has the length of the depth of the switch network and the\n hamming distance as defined by the class.\n ' for index in range(self.activation_vec_hd): self.swap(binary, index, np.random.randint(self.switch_network.depth)) return binary
def form_activation_vector(self, binary): '\n Function forms a activation vector by swapping the first activation_vec_hd entries with random positions in\n the vector. This results in a vector that has the length of the depth of the switch network and the\n hamming distance as defined by the class.\n ' for index in range(self.activation_vec_hd): self.swap(binary, index, np.random.randint(self.switch_network.depth)) return binary<|docstring|>Function forms a activation vector by swapping the first activation_vec_hd entries with random positions in the vector. This results in a vector that has the length of the depth of the switch network and the hamming distance as defined by the class.<|endoftext|>
78ee746bb1a8a37c2075d9fbe62f21662302833acc6d797f3461b36910d7700d
def form_activation_matrix(self): '\n Function forms an activation matrix based on number and hamming distance of activation vectors.\n ' activation_matrix = [] for i in range(self.n_qubo): activation_matrix.append(np.zeros(self.switch_network.depth)) for i in range(len(activation_matrix)): for j in range(self.activation_vec_hd): activation_matrix[i][j] = 1 self.form_activation_vector(activation_matrix[i][:self.switch_network.depth]) return activation_matrix
Function forms an activation matrix based on number and hamming distance of activation vectors.
switch_network_LQUBO/form_LQUBO/form_LQUBO.py
form_activation_matrix
seangholson/lqubo
1
python
def form_activation_matrix(self): '\n \n ' activation_matrix = [] for i in range(self.n_qubo): activation_matrix.append(np.zeros(self.switch_network.depth)) for i in range(len(activation_matrix)): for j in range(self.activation_vec_hd): activation_matrix[i][j] = 1 self.form_activation_vector(activation_matrix[i][:self.switch_network.depth]) return activation_matrix
def form_activation_matrix(self): '\n \n ' activation_matrix = [] for i in range(self.n_qubo): activation_matrix.append(np.zeros(self.switch_network.depth)) for i in range(len(activation_matrix)): for j in range(self.activation_vec_hd): activation_matrix[i][j] = 1 self.form_activation_vector(activation_matrix[i][:self.switch_network.depth]) return activation_matrix<|docstring|>Function forms an activation matrix based on number and hamming distance of activation vectors.<|endoftext|>
5e6461d0a91648b56870ac52b0ff0cd0d9ddee189bf985c163b02d7601877126
def export_account_history(self, account_id, export_format): 'GET /mobilews/accountExport/{id}/{exportFormat}\n\n :param str account_id: path parameter\n :param str export_format: path parameter\n :return: Response object\n :rtype: requests.Response\n ' endpoint = AccountExportEndpoint.ACCOUNT_EXPORT_FORMAT.value.format(id=account_id, exportFormat=export_format) return self._get(url=self._build_url(endpoint))
GET /mobilews/accountExport/{id}/{exportFormat} :param str account_id: path parameter :param str export_format: path parameter :return: Response object :rtype: requests.Response
q2_api_client/clients/mobile_ws/account_export_client.py
export_account_history
jcook00/q2-api-client
0
python
def export_account_history(self, account_id, export_format): 'GET /mobilews/accountExport/{id}/{exportFormat}\n\n :param str account_id: path parameter\n :param str export_format: path parameter\n :return: Response object\n :rtype: requests.Response\n ' endpoint = AccountExportEndpoint.ACCOUNT_EXPORT_FORMAT.value.format(id=account_id, exportFormat=export_format) return self._get(url=self._build_url(endpoint))
def export_account_history(self, account_id, export_format): 'GET /mobilews/accountExport/{id}/{exportFormat}\n\n :param str account_id: path parameter\n :param str export_format: path parameter\n :return: Response object\n :rtype: requests.Response\n ' endpoint = AccountExportEndpoint.ACCOUNT_EXPORT_FORMAT.value.format(id=account_id, exportFormat=export_format) return self._get(url=self._build_url(endpoint))<|docstring|>GET /mobilews/accountExport/{id}/{exportFormat} :param str account_id: path parameter :param str export_format: path parameter :return: Response object :rtype: requests.Response<|endoftext|>
4f722d05af720a1e5bc306b99fc966efb1d3435366eb50d468496a0c4e894dd3
def __filter_tenants(chosen_tenants, excluded_tenants, tenants_in_results, progress_printer): '\n Filter tenants by either excluding or choosing them (both is not possible).\n\n :param chosen_tenants: The chosen tenants\n :type chosen_tenants: list\n :param excluded_tenants: The excluded tenants\n :type excluded_tenants: list\n :param tenants_in_results: The tenants for which results exist\n :type tenants_in_results: list\n :param progress_printer: The progress printer\n :type progress_printer: ProgressPrinter\n :return: A sorted list with tenants to fix\n :rtype list:\n ' if ((not chosen_tenants) and (not excluded_tenants)): tenants = tenants_in_results progress_printer.print_message('{} tenant(s) was/were found in results: {}\n'.format(len(tenants), ', '.join(tenants))) elif chosen_tenants: tenants = [tenant for tenant in chosen_tenants if (tenant in tenants_in_results)] tenants_not_found = [tenant for tenant in chosen_tenants if (tenant not in tenants_in_results)] if tenants_not_found: progress_printer.print_message('{} tenant(s) were chosen, but {} of those were not found in the results, {} remain: {}\n'.format(len(chosen_tenants), len(tenants_not_found), len(tenants), ', '.join(tenants))) else: progress_printer.print_message('{} tenant(s) were chosen: {}\n'.format(len(tenants), ', '.join(tenants))) elif excluded_tenants: tenants = [tenant for tenant in tenants_in_results if (tenant not in excluded_tenants)] progress_printer.print_message('{} tenant(s) were found in results, {} remain after filtering: {}\n'.format(len(tenants_in_results), len(tenants), ', '.join(tenants))) else: raise ValueError("chosen_tenants and excluded_tenants can't be both defined") tenants.sort() return tenants
Filter tenants by either excluding or choosing them (both is not possible). :param chosen_tenants: The chosen tenants :type chosen_tenants: list :param excluded_tenants: The excluded tenants :type excluded_tenants: list :param tenants_in_results: The tenants for which results exist :type tenants_in_results: list :param progress_printer: The progress printer :type progress_printer: ProgressPrinter :return: A sorted list with tenants to fix :rtype list:
check_data_integrity/fix.py
__filter_tenants
CGreweling/helper-scripts
4
python
def __filter_tenants(chosen_tenants, excluded_tenants, tenants_in_results, progress_printer): '\n Filter tenants by either excluding or choosing them (both is not possible).\n\n :param chosen_tenants: The chosen tenants\n :type chosen_tenants: list\n :param excluded_tenants: The excluded tenants\n :type excluded_tenants: list\n :param tenants_in_results: The tenants for which results exist\n :type tenants_in_results: list\n :param progress_printer: The progress printer\n :type progress_printer: ProgressPrinter\n :return: A sorted list with tenants to fix\n :rtype list:\n ' if ((not chosen_tenants) and (not excluded_tenants)): tenants = tenants_in_results progress_printer.print_message('{} tenant(s) was/were found in results: {}\n'.format(len(tenants), ', '.join(tenants))) elif chosen_tenants: tenants = [tenant for tenant in chosen_tenants if (tenant in tenants_in_results)] tenants_not_found = [tenant for tenant in chosen_tenants if (tenant not in tenants_in_results)] if tenants_not_found: progress_printer.print_message('{} tenant(s) were chosen, but {} of those were not found in the results, {} remain: {}\n'.format(len(chosen_tenants), len(tenants_not_found), len(tenants), ', '.join(tenants))) else: progress_printer.print_message('{} tenant(s) were chosen: {}\n'.format(len(tenants), ', '.join(tenants))) elif excluded_tenants: tenants = [tenant for tenant in tenants_in_results if (tenant not in excluded_tenants)] progress_printer.print_message('{} tenant(s) were found in results, {} remain after filtering: {}\n'.format(len(tenants_in_results), len(tenants), ', '.join(tenants))) else: raise ValueError("chosen_tenants and excluded_tenants can't be both defined") tenants.sort() return tenants
def __filter_tenants(chosen_tenants, excluded_tenants, tenants_in_results, progress_printer): '\n Filter tenants by either excluding or choosing them (both is not possible).\n\n :param chosen_tenants: The chosen tenants\n :type chosen_tenants: list\n :param excluded_tenants: The excluded tenants\n :type excluded_tenants: list\n :param tenants_in_results: The tenants for which results exist\n :type tenants_in_results: list\n :param progress_printer: The progress printer\n :type progress_printer: ProgressPrinter\n :return: A sorted list with tenants to fix\n :rtype list:\n ' if ((not chosen_tenants) and (not excluded_tenants)): tenants = tenants_in_results progress_printer.print_message('{} tenant(s) was/were found in results: {}\n'.format(len(tenants), ', '.join(tenants))) elif chosen_tenants: tenants = [tenant for tenant in chosen_tenants if (tenant in tenants_in_results)] tenants_not_found = [tenant for tenant in chosen_tenants if (tenant not in tenants_in_results)] if tenants_not_found: progress_printer.print_message('{} tenant(s) were chosen, but {} of those were not found in the results, {} remain: {}\n'.format(len(chosen_tenants), len(tenants_not_found), len(tenants), ', '.join(tenants))) else: progress_printer.print_message('{} tenant(s) were chosen: {}\n'.format(len(tenants), ', '.join(tenants))) elif excluded_tenants: tenants = [tenant for tenant in tenants_in_results if (tenant not in excluded_tenants)] progress_printer.print_message('{} tenant(s) were found in results, {} remain after filtering: {}\n'.format(len(tenants_in_results), len(tenants), ', '.join(tenants))) else: raise ValueError("chosen_tenants and excluded_tenants can't be both defined") tenants.sort() return tenants<|docstring|>Filter tenants by either excluding or choosing them (both is not possible). :param chosen_tenants: The chosen tenants :type chosen_tenants: list :param excluded_tenants: The excluded tenants :type excluded_tenants: list :param tenants_in_results: The tenants for which results exist :type tenants_in_results: list :param progress_printer: The progress printer :type progress_printer: ProgressPrinter :return: A sorted list with tenants to fix :rtype list:<|endoftext|>
d98b51a5b85a7eb71c5cf2875a02f3445af9d75ef0da5c3cad593a6f7dc95cd8
def main(): '\n Iterate over each tenant, each fixer, each error that fixer can fix, each event belonging to that event with that\n error and fix them if possible, log results.\n ' (opencast, https, chosen_tenants, excluded_tenants, digest_login, waiting_period, batch_size, silent, no_fancy_output, results_dir) = parse_args() url_builder = URLBuilder(opencast, https) progress_printer = ProgressPrinter(silent, no_fancy_output) progress_printer.print_empty_line() log_writer = LogWriter('fix_log', 'media package', 'tenant', 'error', 'fix') try: progress_printer.print_message('Parsing results... ', 0, False, True) results_parser = ResultsParser(results_dir) progress_printer.print_message('finished.\n', 0, True, False) tenants_in_results = results_parser.get_tenants() tenants = __filter_tenants(chosen_tenants, excluded_tenants, tenants_in_results, progress_printer) fixers = [SeriesDCOfEventFixer()] state = FixAnswer.NEXT workflows_started = 0 for tenant in tenants: progress_printer.print_message('Starting with tenant {}...\n'.format(tenant), 0) base_url = url_builder.get_base_url(tenant) for fixer in fixers: errors = fixer.get_errors() for error in errors: if ((state == FixAnswer.SKIP) or (state == FixAnswer.REST)): state = FixAnswer.NEXT progress_printer.print_message('Looking for {}...'.format(error), 1, False, True) events_to_be_fixed = results_parser.get_events_with_error(tenant, error) end = (':' if len(events_to_be_fixed) else '.') progress_printer.print_message(' {} found{}\n'.format(len(events_to_be_fixed), end), 1, True, False) if (len(events_to_be_fixed) == 0): continue print_events_to_be_fixed(events_to_be_fixed, progress_printer, 2) fixed_events = 0 for event_id in events_to_be_fixed: if ((waiting_period != 0) and (workflows_started != 0) and ((workflows_started % batch_size) == 0)): progress_printer.print_time(waiting_period, 'Waiting for {} second(s) to not overflow the system...') try: if (state == FixAnswer.NEXT): state = fix_question(2) if (state == FixAnswer.SKIP): break elif (state == FixAnswer.QUIT): progress_printer.print_message('...aborted.') return progress_printer.print_message('Fixing event {}... '.format(event_id), 2, False, True) fixer.fix(base_url, digest_login, event_id) log_writer.write_to_log(event_id, tenant, error, fixer.get_fix_description()) progress_printer.print_message('fixed.\n', 2, True, False) fixed_events = (fixed_events + 1) workflows_started = (workflows_started + 1) except RequestError as e: progress_printer.print_message('could not be fixed: {}\n'.format(e.error), 2, True, False) log_writer.write_to_log(event_id, tenant, error, 'could not be fixed: {}'.format(e.error)) progress_printer.print_message('{} of {} {} fixed for tenant {}.\n'.format(fixed_events, len(events_to_be_fixed), error, tenant), 1) progress_printer.print_message('...finished.\n'.format(tenant)) finally: log_writer.close_log()
Iterate over each tenant, each fixer, each error that fixer can fix, each event belonging to that event with that error and fix them if possible, log results.
check_data_integrity/fix.py
main
CGreweling/helper-scripts
4
python
def main(): '\n Iterate over each tenant, each fixer, each error that fixer can fix, each event belonging to that event with that\n error and fix them if possible, log results.\n ' (opencast, https, chosen_tenants, excluded_tenants, digest_login, waiting_period, batch_size, silent, no_fancy_output, results_dir) = parse_args() url_builder = URLBuilder(opencast, https) progress_printer = ProgressPrinter(silent, no_fancy_output) progress_printer.print_empty_line() log_writer = LogWriter('fix_log', 'media package', 'tenant', 'error', 'fix') try: progress_printer.print_message('Parsing results... ', 0, False, True) results_parser = ResultsParser(results_dir) progress_printer.print_message('finished.\n', 0, True, False) tenants_in_results = results_parser.get_tenants() tenants = __filter_tenants(chosen_tenants, excluded_tenants, tenants_in_results, progress_printer) fixers = [SeriesDCOfEventFixer()] state = FixAnswer.NEXT workflows_started = 0 for tenant in tenants: progress_printer.print_message('Starting with tenant {}...\n'.format(tenant), 0) base_url = url_builder.get_base_url(tenant) for fixer in fixers: errors = fixer.get_errors() for error in errors: if ((state == FixAnswer.SKIP) or (state == FixAnswer.REST)): state = FixAnswer.NEXT progress_printer.print_message('Looking for {}...'.format(error), 1, False, True) events_to_be_fixed = results_parser.get_events_with_error(tenant, error) end = (':' if len(events_to_be_fixed) else '.') progress_printer.print_message(' {} found{}\n'.format(len(events_to_be_fixed), end), 1, True, False) if (len(events_to_be_fixed) == 0): continue print_events_to_be_fixed(events_to_be_fixed, progress_printer, 2) fixed_events = 0 for event_id in events_to_be_fixed: if ((waiting_period != 0) and (workflows_started != 0) and ((workflows_started % batch_size) == 0)): progress_printer.print_time(waiting_period, 'Waiting for {} second(s) to not overflow the system...') try: if (state == FixAnswer.NEXT): state = fix_question(2) if (state == FixAnswer.SKIP): break elif (state == FixAnswer.QUIT): progress_printer.print_message('...aborted.') return progress_printer.print_message('Fixing event {}... '.format(event_id), 2, False, True) fixer.fix(base_url, digest_login, event_id) log_writer.write_to_log(event_id, tenant, error, fixer.get_fix_description()) progress_printer.print_message('fixed.\n', 2, True, False) fixed_events = (fixed_events + 1) workflows_started = (workflows_started + 1) except RequestError as e: progress_printer.print_message('could not be fixed: {}\n'.format(e.error), 2, True, False) log_writer.write_to_log(event_id, tenant, error, 'could not be fixed: {}'.format(e.error)) progress_printer.print_message('{} of {} {} fixed for tenant {}.\n'.format(fixed_events, len(events_to_be_fixed), error, tenant), 1) progress_printer.print_message('...finished.\n'.format(tenant)) finally: log_writer.close_log()
def main(): '\n Iterate over each tenant, each fixer, each error that fixer can fix, each event belonging to that event with that\n error and fix them if possible, log results.\n ' (opencast, https, chosen_tenants, excluded_tenants, digest_login, waiting_period, batch_size, silent, no_fancy_output, results_dir) = parse_args() url_builder = URLBuilder(opencast, https) progress_printer = ProgressPrinter(silent, no_fancy_output) progress_printer.print_empty_line() log_writer = LogWriter('fix_log', 'media package', 'tenant', 'error', 'fix') try: progress_printer.print_message('Parsing results... ', 0, False, True) results_parser = ResultsParser(results_dir) progress_printer.print_message('finished.\n', 0, True, False) tenants_in_results = results_parser.get_tenants() tenants = __filter_tenants(chosen_tenants, excluded_tenants, tenants_in_results, progress_printer) fixers = [SeriesDCOfEventFixer()] state = FixAnswer.NEXT workflows_started = 0 for tenant in tenants: progress_printer.print_message('Starting with tenant {}...\n'.format(tenant), 0) base_url = url_builder.get_base_url(tenant) for fixer in fixers: errors = fixer.get_errors() for error in errors: if ((state == FixAnswer.SKIP) or (state == FixAnswer.REST)): state = FixAnswer.NEXT progress_printer.print_message('Looking for {}...'.format(error), 1, False, True) events_to_be_fixed = results_parser.get_events_with_error(tenant, error) end = (':' if len(events_to_be_fixed) else '.') progress_printer.print_message(' {} found{}\n'.format(len(events_to_be_fixed), end), 1, True, False) if (len(events_to_be_fixed) == 0): continue print_events_to_be_fixed(events_to_be_fixed, progress_printer, 2) fixed_events = 0 for event_id in events_to_be_fixed: if ((waiting_period != 0) and (workflows_started != 0) and ((workflows_started % batch_size) == 0)): progress_printer.print_time(waiting_period, 'Waiting for {} second(s) to not overflow the system...') try: if (state == FixAnswer.NEXT): state = fix_question(2) if (state == FixAnswer.SKIP): break elif (state == FixAnswer.QUIT): progress_printer.print_message('...aborted.') return progress_printer.print_message('Fixing event {}... '.format(event_id), 2, False, True) fixer.fix(base_url, digest_login, event_id) log_writer.write_to_log(event_id, tenant, error, fixer.get_fix_description()) progress_printer.print_message('fixed.\n', 2, True, False) fixed_events = (fixed_events + 1) workflows_started = (workflows_started + 1) except RequestError as e: progress_printer.print_message('could not be fixed: {}\n'.format(e.error), 2, True, False) log_writer.write_to_log(event_id, tenant, error, 'could not be fixed: {}'.format(e.error)) progress_printer.print_message('{} of {} {} fixed for tenant {}.\n'.format(fixed_events, len(events_to_be_fixed), error, tenant), 1) progress_printer.print_message('...finished.\n'.format(tenant)) finally: log_writer.close_log()<|docstring|>Iterate over each tenant, each fixer, each error that fixer can fix, each event belonging to that event with that error and fix them if possible, log results.<|endoftext|>
cf0e475b99ffbd2403d06b477b39f14d68b7a549a24f25e849653d06d75ed738
@pytest.mark.import_star def test_imported_modules(): ' Test that `from dateutil import *` adds modules in __all__ locally ' import dateutil.easter import dateutil.parser import dateutil.relativedelta import dateutil.rrule import dateutil.tz import dateutil.utils import dateutil.zoneinfo assert (dateutil.easter == new_locals.pop('easter')) assert (dateutil.parser == new_locals.pop('parser')) assert (dateutil.relativedelta == new_locals.pop('relativedelta')) assert (dateutil.rrule == new_locals.pop('rrule')) assert (dateutil.tz == new_locals.pop('tz')) assert (dateutil.utils == new_locals.pop('utils')) assert (dateutil.zoneinfo == new_locals.pop('zoneinfo')) assert (not new_locals)
Test that `from dateutil import *` adds modules in __all__ locally
dateutil/test/test_import_star.py
test_imported_modules
jackzhenguo/dateutil
6,989
python
@pytest.mark.import_star def test_imported_modules(): ' ' import dateutil.easter import dateutil.parser import dateutil.relativedelta import dateutil.rrule import dateutil.tz import dateutil.utils import dateutil.zoneinfo assert (dateutil.easter == new_locals.pop('easter')) assert (dateutil.parser == new_locals.pop('parser')) assert (dateutil.relativedelta == new_locals.pop('relativedelta')) assert (dateutil.rrule == new_locals.pop('rrule')) assert (dateutil.tz == new_locals.pop('tz')) assert (dateutil.utils == new_locals.pop('utils')) assert (dateutil.zoneinfo == new_locals.pop('zoneinfo')) assert (not new_locals)
@pytest.mark.import_star def test_imported_modules(): ' ' import dateutil.easter import dateutil.parser import dateutil.relativedelta import dateutil.rrule import dateutil.tz import dateutil.utils import dateutil.zoneinfo assert (dateutil.easter == new_locals.pop('easter')) assert (dateutil.parser == new_locals.pop('parser')) assert (dateutil.relativedelta == new_locals.pop('relativedelta')) assert (dateutil.rrule == new_locals.pop('rrule')) assert (dateutil.tz == new_locals.pop('tz')) assert (dateutil.utils == new_locals.pop('utils')) assert (dateutil.zoneinfo == new_locals.pop('zoneinfo')) assert (not new_locals)<|docstring|>Test that `from dateutil import *` adds modules in __all__ locally<|endoftext|>
956a17e350de6e1ad4dbdcdf7c05fe41082f916262ff0c40fee7386cc821edf5
def get_localizer(language='English'): 'The factory method' languages = dict(English=EnglishLanguage, Hindi=HindiLanguage) return languages[language]()
The factory method
Creational/Factory/python/factory.py
get_localizer
phkors/design-patterns
294
python
def get_localizer(language='English'): languages = dict(English=EnglishLanguage, Hindi=HindiLanguage) return languages[language]()
def get_localizer(language='English'): languages = dict(English=EnglishLanguage, Hindi=HindiLanguage) return languages[language]()<|docstring|>The factory method<|endoftext|>
209c972d193357c2c8de4da2de76698501dc5ed13877dc110b5ee48c06e45e3e
def get(self, msgid): "We'll punt if we don't have a translation" return self.trans.get(msgid, str(msgid))
We'll punt if we don't have a translation
Creational/Factory/python/factory.py
get
phkors/design-patterns
294
python
def get(self, msgid): return self.trans.get(msgid, str(msgid))
def get(self, msgid): return self.trans.get(msgid, str(msgid))<|docstring|>We'll punt if we don't have a translation<|endoftext|>
3b804525c1b7deccae57d9413e2c42878d788eca3400e456e60b2a4a2e9dcf55
def print_usage(show_help_line=False): ' Prints the short help card for the program.\n ' print('Usage: python optimalguess.py [-hc] [-o <outfile>] <targetfile>') print('Guesses passwords in a dataset optimally.') if show_help_line: print("For extended help use '-h' option.")
Prints the short help card for the program.
src/optimalguess.py
print_usage
sr-lab/pyrrho
0
python
def print_usage(show_help_line=False): ' \n ' print('Usage: python optimalguess.py [-hc] [-o <outfile>] <targetfile>') print('Guesses passwords in a dataset optimally.') if show_help_line: print("For extended help use '-h' option.")
def print_usage(show_help_line=False): ' \n ' print('Usage: python optimalguess.py [-hc] [-o <outfile>] <targetfile>') print('Guesses passwords in a dataset optimally.') if show_help_line: print("For extended help use '-h' option.")<|docstring|>Prints the short help card for the program.<|endoftext|>
63248cd3ec54d7a9a336c0c6ede95915ec6d6a12c2b276a2f974580a22d65b0c
def print_help(): ' Prints the full help card for the program.\n ' print_usage() print('Arguments:') print('\ttargetfile: The target file for the guessing attack') print('Options:') print('\t-h: Show this help screen') print('\t-c: Output 100 cumulative probabilities only (percentile mode)') print('\t-o <path>: Output to file instead of stdout')
Prints the full help card for the program.
src/optimalguess.py
print_help
sr-lab/pyrrho
0
python
def print_help(): ' \n ' print_usage() print('Arguments:') print('\ttargetfile: The target file for the guessing attack') print('Options:') print('\t-h: Show this help screen') print('\t-c: Output 100 cumulative probabilities only (percentile mode)') print('\t-o <path>: Output to file instead of stdout')
def print_help(): ' \n ' print_usage() print('Arguments:') print('\ttargetfile: The target file for the guessing attack') print('Options:') print('\t-h: Show this help screen') print('\t-c: Output 100 cumulative probabilities only (percentile mode)') print('\t-o <path>: Output to file instead of stdout')<|docstring|>Prints the full help card for the program.<|endoftext|>
0e0ef89002630490095c571b990d1c109fb7cd146929d756efec33f493087ec4
def __init__(self, transformer, keepdims=False, **kwargs): '\n @param transformer: (batch_size, seq_len, d_model) -> (batch_size, seq_len, d_model)\n @param keepdims: If True then keep the output dimension of the transformer,\n otherwise squeeze the output.\n ' super().__init__(**kwargs) self.transformer = transformer self.keepdims = keepdims initial_pool_token_embedding = tf.random.truncated_normal((transformer.d_model,)) self.pool_token_embedding = tf.Variable(initial_pool_token_embedding, trainable=True)
@param transformer: (batch_size, seq_len, d_model) -> (batch_size, seq_len, d_model) @param keepdims: If True then keep the output dimension of the transformer, otherwise squeeze the output.
transplant/modules/attn_pool.py
__init__
tiagocri/ecg-transfer-learning-master
28
python
def __init__(self, transformer, keepdims=False, **kwargs): '\n @param transformer: (batch_size, seq_len, d_model) -> (batch_size, seq_len, d_model)\n @param keepdims: If True then keep the output dimension of the transformer,\n otherwise squeeze the output.\n ' super().__init__(**kwargs) self.transformer = transformer self.keepdims = keepdims initial_pool_token_embedding = tf.random.truncated_normal((transformer.d_model,)) self.pool_token_embedding = tf.Variable(initial_pool_token_embedding, trainable=True)
def __init__(self, transformer, keepdims=False, **kwargs): '\n @param transformer: (batch_size, seq_len, d_model) -> (batch_size, seq_len, d_model)\n @param keepdims: If True then keep the output dimension of the transformer,\n otherwise squeeze the output.\n ' super().__init__(**kwargs) self.transformer = transformer self.keepdims = keepdims initial_pool_token_embedding = tf.random.truncated_normal((transformer.d_model,)) self.pool_token_embedding = tf.Variable(initial_pool_token_embedding, trainable=True)<|docstring|>@param transformer: (batch_size, seq_len, d_model) -> (batch_size, seq_len, d_model) @param keepdims: If True then keep the output dimension of the transformer, otherwise squeeze the output.<|endoftext|>
eb315aaee9ffedc7398ba66d4b4c191d153378e0682f01740fdc73219b011215
def call(self, x, training=None, mask=None): '\n @param x: (batch_size, seq_len, d_model)\n @param training: training mode\n @param mask: (batch_size, seq_len)\n @return: (batch_size, d_model) or (batch_size, 1, d_model) if keepdims is True\n ' batch_size = tf.shape(x)[0] pool_token_embedding = tf.tile(self.pool_token_embedding, (batch_size,)) pool_token_embedding = tf.reshape(pool_token_embedding, (batch_size, 1, self.transformer.d_model)) x = tf.concat([pool_token_embedding, x], axis=1) if (mask is not None): pool_token_embedding_mask = tf.zeros((batch_size, 1)) mask = tf.concat([pool_token_embedding_mask, mask], axis=1) x = self.transformer(x, training=training, mask=mask) if self.keepdims: x = x[(:, :1, :)] else: x = x[(:, 0, :)] return x
@param x: (batch_size, seq_len, d_model) @param training: training mode @param mask: (batch_size, seq_len) @return: (batch_size, d_model) or (batch_size, 1, d_model) if keepdims is True
transplant/modules/attn_pool.py
call
tiagocri/ecg-transfer-learning-master
28
python
def call(self, x, training=None, mask=None): '\n @param x: (batch_size, seq_len, d_model)\n @param training: training mode\n @param mask: (batch_size, seq_len)\n @return: (batch_size, d_model) or (batch_size, 1, d_model) if keepdims is True\n ' batch_size = tf.shape(x)[0] pool_token_embedding = tf.tile(self.pool_token_embedding, (batch_size,)) pool_token_embedding = tf.reshape(pool_token_embedding, (batch_size, 1, self.transformer.d_model)) x = tf.concat([pool_token_embedding, x], axis=1) if (mask is not None): pool_token_embedding_mask = tf.zeros((batch_size, 1)) mask = tf.concat([pool_token_embedding_mask, mask], axis=1) x = self.transformer(x, training=training, mask=mask) if self.keepdims: x = x[(:, :1, :)] else: x = x[(:, 0, :)] return x
def call(self, x, training=None, mask=None): '\n @param x: (batch_size, seq_len, d_model)\n @param training: training mode\n @param mask: (batch_size, seq_len)\n @return: (batch_size, d_model) or (batch_size, 1, d_model) if keepdims is True\n ' batch_size = tf.shape(x)[0] pool_token_embedding = tf.tile(self.pool_token_embedding, (batch_size,)) pool_token_embedding = tf.reshape(pool_token_embedding, (batch_size, 1, self.transformer.d_model)) x = tf.concat([pool_token_embedding, x], axis=1) if (mask is not None): pool_token_embedding_mask = tf.zeros((batch_size, 1)) mask = tf.concat([pool_token_embedding_mask, mask], axis=1) x = self.transformer(x, training=training, mask=mask) if self.keepdims: x = x[(:, :1, :)] else: x = x[(:, 0, :)] return x<|docstring|>@param x: (batch_size, seq_len, d_model) @param training: training mode @param mask: (batch_size, seq_len) @return: (batch_size, d_model) or (batch_size, 1, d_model) if keepdims is True<|endoftext|>
4be2a648d116fc3b3cc828ff61f9db6e71581c6a982f10a1ea6152a932d42c46
def heuristic(env, s): '\n The heuristic for\n 1. Testing\n 2. Demonstration rollout.\n Args:\n env: The environment\n s (list): The state. Attributes:\n s[0] is the horizontal coordinate\n s[1] is the vertical coordinate\n s[2] is the horizontal speed\n s[3] is the vertical speed\n s[4] is the angle\n s[5] is the angular speed\n s[6] 1 if first leg has contact, else 0\n s[7] 1 if second leg has contact, else 0\n returns:\n a: The heuristic to be fed into the step function defined above to determine the next step and reward.\n ' angle_targ = ((s[0] * 0.5) + (s[2] * 1.0)) if (angle_targ > 0.4): angle_targ = 0.4 if (angle_targ < (- 0.4)): angle_targ = (- 0.4) hover_targ = (0.55 * np.abs(s[0])) angle_todo = (((angle_targ - s[4]) * 0.5) - (s[5] * 1.0)) hover_todo = (((hover_targ - s[1]) * 0.5) - (s[3] * 0.5)) if (s[6] or s[7]): angle_todo = 0 hover_todo = ((- s[3]) * 0.5) if env.continuous: a = np.array([((hover_todo * 20) - 1), ((- angle_todo) * 20)]) a = np.clip(a, (- 1), (+ 1)) else: a = 0 if ((hover_todo > np.abs(angle_todo)) and (hover_todo > 0.05)): a = 2 elif (angle_todo < (- 0.05)): a = 3 elif (angle_todo > (+ 0.05)): a = 1 return a
The heuristic for 1. Testing 2. Demonstration rollout. Args: env: The environment s (list): The state. Attributes: s[0] is the horizontal coordinate s[1] is the vertical coordinate s[2] is the horizontal speed s[3] is the vertical speed s[4] is the angle s[5] is the angular speed s[6] 1 if first leg has contact, else 0 s[7] 1 if second leg has contact, else 0 returns: a: The heuristic to be fed into the step function defined above to determine the next step and reward.
complex_functions/lunar12d/lunar_lander.py
heuristic
xunzhang/CobBO
4
python
def heuristic(env, s): '\n The heuristic for\n 1. Testing\n 2. Demonstration rollout.\n Args:\n env: The environment\n s (list): The state. Attributes:\n s[0] is the horizontal coordinate\n s[1] is the vertical coordinate\n s[2] is the horizontal speed\n s[3] is the vertical speed\n s[4] is the angle\n s[5] is the angular speed\n s[6] 1 if first leg has contact, else 0\n s[7] 1 if second leg has contact, else 0\n returns:\n a: The heuristic to be fed into the step function defined above to determine the next step and reward.\n ' angle_targ = ((s[0] * 0.5) + (s[2] * 1.0)) if (angle_targ > 0.4): angle_targ = 0.4 if (angle_targ < (- 0.4)): angle_targ = (- 0.4) hover_targ = (0.55 * np.abs(s[0])) angle_todo = (((angle_targ - s[4]) * 0.5) - (s[5] * 1.0)) hover_todo = (((hover_targ - s[1]) * 0.5) - (s[3] * 0.5)) if (s[6] or s[7]): angle_todo = 0 hover_todo = ((- s[3]) * 0.5) if env.continuous: a = np.array([((hover_todo * 20) - 1), ((- angle_todo) * 20)]) a = np.clip(a, (- 1), (+ 1)) else: a = 0 if ((hover_todo > np.abs(angle_todo)) and (hover_todo > 0.05)): a = 2 elif (angle_todo < (- 0.05)): a = 3 elif (angle_todo > (+ 0.05)): a = 1 return a
def heuristic(env, s): '\n The heuristic for\n 1. Testing\n 2. Demonstration rollout.\n Args:\n env: The environment\n s (list): The state. Attributes:\n s[0] is the horizontal coordinate\n s[1] is the vertical coordinate\n s[2] is the horizontal speed\n s[3] is the vertical speed\n s[4] is the angle\n s[5] is the angular speed\n s[6] 1 if first leg has contact, else 0\n s[7] 1 if second leg has contact, else 0\n returns:\n a: The heuristic to be fed into the step function defined above to determine the next step and reward.\n ' angle_targ = ((s[0] * 0.5) + (s[2] * 1.0)) if (angle_targ > 0.4): angle_targ = 0.4 if (angle_targ < (- 0.4)): angle_targ = (- 0.4) hover_targ = (0.55 * np.abs(s[0])) angle_todo = (((angle_targ - s[4]) * 0.5) - (s[5] * 1.0)) hover_todo = (((hover_targ - s[1]) * 0.5) - (s[3] * 0.5)) if (s[6] or s[7]): angle_todo = 0 hover_todo = ((- s[3]) * 0.5) if env.continuous: a = np.array([((hover_todo * 20) - 1), ((- angle_todo) * 20)]) a = np.clip(a, (- 1), (+ 1)) else: a = 0 if ((hover_todo > np.abs(angle_todo)) and (hover_todo > 0.05)): a = 2 elif (angle_todo < (- 0.05)): a = 3 elif (angle_todo > (+ 0.05)): a = 1 return a<|docstring|>The heuristic for 1. Testing 2. Demonstration rollout. Args: env: The environment s (list): The state. Attributes: s[0] is the horizontal coordinate s[1] is the vertical coordinate s[2] is the horizontal speed s[3] is the vertical speed s[4] is the angle s[5] is the angular speed s[6] 1 if first leg has contact, else 0 s[7] 1 if second leg has contact, else 0 returns: a: The heuristic to be fed into the step function defined above to determine the next step and reward.<|endoftext|>
ea9fc7fb8fbb00b3c1d658caf84d747db5227f23cec07768b21e035321353b02
def arange_objs_according_to_IDs(self, num_analysis_frames_from_present=100): 'Returns list of dicts of objects with their properties as key value pairs ' all_frames = self.frame_no curr_frame = np.max(all_frames) frames_to_consider_logical = (all_frames >= (curr_frame - num_analysis_frames_from_present)) intersting_ID_list = self.ID[frames_to_consider_logical] intersting_x_pos_list = self.x_pos[frames_to_consider_logical] intersting_y_pos_list = self.y_pos[frames_to_consider_logical] frame_no_list = self.frame_no[frames_to_consider_logical] loop_interval_list = self.loop_interval[frames_to_consider_logical] unique_intersting_ID_list = np.unique(intersting_ID_list) list_of_objs = [] for unq_id in unique_intersting_ID_list: x_pos_current_id = intersting_x_pos_list[(intersting_ID_list == unq_id)] y_pos_current_id = intersting_y_pos_list[(intersting_ID_list == unq_id)] frame_no_current_id = frame_no_list[(intersting_ID_list == unq_id)] loop_interval_current_id = loop_interval_list[(intersting_ID_list == unq_id)] curr_obj = dict([('ID', unq_id), ('x_pos', x_pos_current_id), ('y_pos', y_pos_current_id), ('frame_no', frame_no_current_id), ('loop_interval', loop_interval_current_id)]) list_of_objs.append(curr_obj) return list_of_objs
Returns list of dicts of objects with their properties as key value pairs
Tracking_Objs.py
arange_objs_according_to_IDs
tanstar5/ObjectTrackingKilobotsPublicV0p0
0
python
def arange_objs_according_to_IDs(self, num_analysis_frames_from_present=100): ' ' all_frames = self.frame_no curr_frame = np.max(all_frames) frames_to_consider_logical = (all_frames >= (curr_frame - num_analysis_frames_from_present)) intersting_ID_list = self.ID[frames_to_consider_logical] intersting_x_pos_list = self.x_pos[frames_to_consider_logical] intersting_y_pos_list = self.y_pos[frames_to_consider_logical] frame_no_list = self.frame_no[frames_to_consider_logical] loop_interval_list = self.loop_interval[frames_to_consider_logical] unique_intersting_ID_list = np.unique(intersting_ID_list) list_of_objs = [] for unq_id in unique_intersting_ID_list: x_pos_current_id = intersting_x_pos_list[(intersting_ID_list == unq_id)] y_pos_current_id = intersting_y_pos_list[(intersting_ID_list == unq_id)] frame_no_current_id = frame_no_list[(intersting_ID_list == unq_id)] loop_interval_current_id = loop_interval_list[(intersting_ID_list == unq_id)] curr_obj = dict([('ID', unq_id), ('x_pos', x_pos_current_id), ('y_pos', y_pos_current_id), ('frame_no', frame_no_current_id), ('loop_interval', loop_interval_current_id)]) list_of_objs.append(curr_obj) return list_of_objs
def arange_objs_according_to_IDs(self, num_analysis_frames_from_present=100): ' ' all_frames = self.frame_no curr_frame = np.max(all_frames) frames_to_consider_logical = (all_frames >= (curr_frame - num_analysis_frames_from_present)) intersting_ID_list = self.ID[frames_to_consider_logical] intersting_x_pos_list = self.x_pos[frames_to_consider_logical] intersting_y_pos_list = self.y_pos[frames_to_consider_logical] frame_no_list = self.frame_no[frames_to_consider_logical] loop_interval_list = self.loop_interval[frames_to_consider_logical] unique_intersting_ID_list = np.unique(intersting_ID_list) list_of_objs = [] for unq_id in unique_intersting_ID_list: x_pos_current_id = intersting_x_pos_list[(intersting_ID_list == unq_id)] y_pos_current_id = intersting_y_pos_list[(intersting_ID_list == unq_id)] frame_no_current_id = frame_no_list[(intersting_ID_list == unq_id)] loop_interval_current_id = loop_interval_list[(intersting_ID_list == unq_id)] curr_obj = dict([('ID', unq_id), ('x_pos', x_pos_current_id), ('y_pos', y_pos_current_id), ('frame_no', frame_no_current_id), ('loop_interval', loop_interval_current_id)]) list_of_objs.append(curr_obj) return list_of_objs<|docstring|>Returns list of dicts of objects with their properties as key value pairs<|endoftext|>
5c615064453037f6d549d5f0de2734d048f51dabc8947488759683f4777f724f
def track_analysis(self, num_analysis_frames_from_present=100): 'generates plot of neighbours vs velocity histogram analysis' list_of_objs = self.arange_objs_according_to_IDs(num_analysis_frames_from_present=100) area = np.empty((0, 0)) displacement = np.empty((0, 0)) distance = np.empty((0, 0)) for curr_obj in list_of_objs: (area1, displacement1, distance1) = calculate_trace_parameters(curr_obj) area = np.append(area, area1) displacement = np.append(displacement, displacement1) distance = np.append(distance, distance1) return (area, displacement, distance)
generates plot of neighbours vs velocity histogram analysis
Tracking_Objs.py
track_analysis
tanstar5/ObjectTrackingKilobotsPublicV0p0
0
python
def track_analysis(self, num_analysis_frames_from_present=100): list_of_objs = self.arange_objs_according_to_IDs(num_analysis_frames_from_present=100) area = np.empty((0, 0)) displacement = np.empty((0, 0)) distance = np.empty((0, 0)) for curr_obj in list_of_objs: (area1, displacement1, distance1) = calculate_trace_parameters(curr_obj) area = np.append(area, area1) displacement = np.append(displacement, displacement1) distance = np.append(distance, distance1) return (area, displacement, distance)
def track_analysis(self, num_analysis_frames_from_present=100): list_of_objs = self.arange_objs_according_to_IDs(num_analysis_frames_from_present=100) area = np.empty((0, 0)) displacement = np.empty((0, 0)) distance = np.empty((0, 0)) for curr_obj in list_of_objs: (area1, displacement1, distance1) = calculate_trace_parameters(curr_obj) area = np.append(area, area1) displacement = np.append(displacement, displacement1) distance = np.append(distance, distance1) return (area, displacement, distance)<|docstring|>generates plot of neighbours vs velocity histogram analysis<|endoftext|>
034c4bbc500179cd6546f63c066b1a3a48af0b3a4ce29efc56f76de616f4dc03
@staticmethod def check_connectivity(ping_count=1, test_ip_address=None): '\n Preform a health check.\n\n :return: percentage of connectivity\n ' network_access = 0 if (test_ip_address is None): try: test_ip_address = vpn.vpn_dns_servers[0] except IndexError: return network_access ping_command = f"ping -c {ping_count} -i 1 -W 1 {test_ip_address} | grep -oE '\d+\.\d%'" try: network_access = (100 - int(mac_utils.run_command(ping_command)[:(- 3)])) if (network_access < 100): logger.debug(f'Send ping to {test_ip_address} with result {network_access}%') except: network_access = 0 return network_access
Preform a health check. :return: percentage of connectivity
vpn.py
check_connectivity
maikelvallinga/vpn_split_tunnel
0
python
@staticmethod def check_connectivity(ping_count=1, test_ip_address=None): '\n Preform a health check.\n\n :return: percentage of connectivity\n ' network_access = 0 if (test_ip_address is None): try: test_ip_address = vpn.vpn_dns_servers[0] except IndexError: return network_access ping_command = f"ping -c {ping_count} -i 1 -W 1 {test_ip_address} | grep -oE '\d+\.\d%'" try: network_access = (100 - int(mac_utils.run_command(ping_command)[:(- 3)])) if (network_access < 100): logger.debug(f'Send ping to {test_ip_address} with result {network_access}%') except: network_access = 0 return network_access
@staticmethod def check_connectivity(ping_count=1, test_ip_address=None): '\n Preform a health check.\n\n :return: percentage of connectivity\n ' network_access = 0 if (test_ip_address is None): try: test_ip_address = vpn.vpn_dns_servers[0] except IndexError: return network_access ping_command = f"ping -c {ping_count} -i 1 -W 1 {test_ip_address} | grep -oE '\d+\.\d%'" try: network_access = (100 - int(mac_utils.run_command(ping_command)[:(- 3)])) if (network_access < 100): logger.debug(f'Send ping to {test_ip_address} with result {network_access}%') except: network_access = 0 return network_access<|docstring|>Preform a health check. :return: percentage of connectivity<|endoftext|>
028031ff7ee1dff334a5c3939fa2688525d95f6bd659a0af24476f50d78d1e58
def check_internet_access(self): '\n Check for internet access\n :return:\n ' logger.debug('Checking for internet access...') internet_access = 0 timeout = (time() + self.CONNECT_TIMEOUT) while ((internet_access < 100) and (time() < timeout)): internet_access = self.check_connectivity(ping_count=2, test_ip_address='8.8.8.8') if (internet_access == 0): logger.debug('Waiting for internet connectivity...') mac_utils.set_default_route(self.active_interface, ip_address=self.active_interface_gateway) if (internet_access == 0): logger.error('Timeout while waiting for internet connectivity...') return False return True
Check for internet access :return:
vpn.py
check_internet_access
maikelvallinga/vpn_split_tunnel
0
python
def check_internet_access(self): '\n Check for internet access\n :return:\n ' logger.debug('Checking for internet access...') internet_access = 0 timeout = (time() + self.CONNECT_TIMEOUT) while ((internet_access < 100) and (time() < timeout)): internet_access = self.check_connectivity(ping_count=2, test_ip_address='8.8.8.8') if (internet_access == 0): logger.debug('Waiting for internet connectivity...') mac_utils.set_default_route(self.active_interface, ip_address=self.active_interface_gateway) if (internet_access == 0): logger.error('Timeout while waiting for internet connectivity...') return False return True
def check_internet_access(self): '\n Check for internet access\n :return:\n ' logger.debug('Checking for internet access...') internet_access = 0 timeout = (time() + self.CONNECT_TIMEOUT) while ((internet_access < 100) and (time() < timeout)): internet_access = self.check_connectivity(ping_count=2, test_ip_address='8.8.8.8') if (internet_access == 0): logger.debug('Waiting for internet connectivity...') mac_utils.set_default_route(self.active_interface, ip_address=self.active_interface_gateway) if (internet_access == 0): logger.error('Timeout while waiting for internet connectivity...') return False return True<|docstring|>Check for internet access :return:<|endoftext|>
8a434bc2ac88f7268b9ad9968cae5d2bce5df6ec919d966a81fe56688b69694c
def get_progress_bar(self): '\n Create the progress bar for in the logging.\n\n :return: Progress bar\n :rtype: str\n ' progress_bar_width = self.CONNECTION_BAR_WIDTH percentage_down = (round(((self.failures / vpn.MAX_FAILURES) * 100)) if (self.failures != 0) else 0) percentage_up = round((100 - percentage_down)) connection = ' {percentage_up}% '.format(percentage_up=percentage_up) up = (round((percentage_up * (progress_bar_width / 100))) * '#') down = (round((percentage_down * (progress_bar_width / 100))) * ' ') progress = '{up}{down}'.format(up=up, down=down) if (len(connection) == 6): progress = ((progress[:(round((len(progress) / 2)) - 3)] + connection) + progress[(round((len(progress) / 2)) + 3):]) else: progress = ((progress[:(round((len(progress) / 2)) - 3)] + connection) + progress[(round((len(progress) / 2)) + 2):]) bar = 'Failed [{progress}] Success '.format(progress=progress) return bar
Create the progress bar for in the logging. :return: Progress bar :rtype: str
vpn.py
get_progress_bar
maikelvallinga/vpn_split_tunnel
0
python
def get_progress_bar(self): '\n Create the progress bar for in the logging.\n\n :return: Progress bar\n :rtype: str\n ' progress_bar_width = self.CONNECTION_BAR_WIDTH percentage_down = (round(((self.failures / vpn.MAX_FAILURES) * 100)) if (self.failures != 0) else 0) percentage_up = round((100 - percentage_down)) connection = ' {percentage_up}% '.format(percentage_up=percentage_up) up = (round((percentage_up * (progress_bar_width / 100))) * '#') down = (round((percentage_down * (progress_bar_width / 100))) * ' ') progress = '{up}{down}'.format(up=up, down=down) if (len(connection) == 6): progress = ((progress[:(round((len(progress) / 2)) - 3)] + connection) + progress[(round((len(progress) / 2)) + 3):]) else: progress = ((progress[:(round((len(progress) / 2)) - 3)] + connection) + progress[(round((len(progress) / 2)) + 2):]) bar = 'Failed [{progress}] Success '.format(progress=progress) return bar
def get_progress_bar(self): '\n Create the progress bar for in the logging.\n\n :return: Progress bar\n :rtype: str\n ' progress_bar_width = self.CONNECTION_BAR_WIDTH percentage_down = (round(((self.failures / vpn.MAX_FAILURES) * 100)) if (self.failures != 0) else 0) percentage_up = round((100 - percentage_down)) connection = ' {percentage_up}% '.format(percentage_up=percentage_up) up = (round((percentage_up * (progress_bar_width / 100))) * '#') down = (round((percentage_down * (progress_bar_width / 100))) * ' ') progress = '{up}{down}'.format(up=up, down=down) if (len(connection) == 6): progress = ((progress[:(round((len(progress) / 2)) - 3)] + connection) + progress[(round((len(progress) / 2)) + 3):]) else: progress = ((progress[:(round((len(progress) / 2)) - 3)] + connection) + progress[(round((len(progress) / 2)) + 2):]) bar = 'Failed [{progress}] Success '.format(progress=progress) return bar<|docstring|>Create the progress bar for in the logging. :return: Progress bar :rtype: str<|endoftext|>
2f3657c199ab5fcf5b08adfd2574574e64a0048c8b8829a7d256965341a15c33
def start_tunnel(self): '\n Start the actual tunnel.\n\n :return:\n ' logger.info(f'Ready to start VPN: {self.VPN_NAME}') if (self.EXCLUDED_SSID.lower() in mac_utils.connected_ssid().lower()): logger.error('You are connected with an excluded ssid. Switch ssid manually...') sys.exit(1) if (not self.check_internet_access()): self.active = False return start_time = datetime.now() logger.debug(f'Start time: {start_time}') vpn_command = f'{self.vpn_util} start "{self.VPN_NAME}"' logger.debug(f'VPN command: "{vpn_command}"') result = mac_utils.run_command(vpn_command, as_user=True) if (('has been started' not in result) and ('Connected' not in result)): logger.info('Unable to start VPN, check your network connection.') logger.info(f'vpnutil responded: {result}') self.active = False return logger.info('Starting VPN Tunnel') timeout = (time() + self.CONNECT_TIMEOUT) while True: self.tunnel_interface = mac_utils.run_command(f'{mac_utils.IFCONFIG} | grep ipsec').split(':')[0] interface_details = mac_utils.run_command(f'{mac_utils.IFCONFIG} {self.tunnel_interface}') logger.debug('Checking if vpn interface is up...') if (self.tunnel_interface and ('inet' in interface_details)): break if (time() > timeout): logger.info('VPN Interface not coming active ...') sleep(5) self.active = False return sleep(2) logger.info('Successfully connected to VPN...') if self.MACOS_NOTIFICATION_ENABLED: pync.notify('Successfully connected to VPN...', title='VPN') logger.debug(f'Found tunnel interface {self.tunnel_interface}') logger.debug(f'Setting default route towards: {self.active_interface}') mac_utils.set_default_route(self.active_interface, ip_address=self.active_interface_gateway) for network in self.NETWORKS_TO_TUNNEL: mac_utils.add_route(network, interface=self.tunnel_interface) for address in self.ADDRESS_TO_TUNNEL: mac_utils.add_route(address, interface=self.tunnel_interface) self.vpn_dns_servers = mac_utils.current_nameservers logger.info('Add routes for DNS servers') for dns in self.vpn_dns_servers: mac_utils.add_route(dns, interface=self.tunnel_interface) logger.debug(mac_utils.parsed_routing_table()) self.active = True if self.MOUNT_FOLDERS: self.mount_folders()
Start the actual tunnel. :return:
vpn.py
start_tunnel
maikelvallinga/vpn_split_tunnel
0
python
def start_tunnel(self): '\n Start the actual tunnel.\n\n :return:\n ' logger.info(f'Ready to start VPN: {self.VPN_NAME}') if (self.EXCLUDED_SSID.lower() in mac_utils.connected_ssid().lower()): logger.error('You are connected with an excluded ssid. Switch ssid manually...') sys.exit(1) if (not self.check_internet_access()): self.active = False return start_time = datetime.now() logger.debug(f'Start time: {start_time}') vpn_command = f'{self.vpn_util} start "{self.VPN_NAME}"' logger.debug(f'VPN command: "{vpn_command}"') result = mac_utils.run_command(vpn_command, as_user=True) if (('has been started' not in result) and ('Connected' not in result)): logger.info('Unable to start VPN, check your network connection.') logger.info(f'vpnutil responded: {result}') self.active = False return logger.info('Starting VPN Tunnel') timeout = (time() + self.CONNECT_TIMEOUT) while True: self.tunnel_interface = mac_utils.run_command(f'{mac_utils.IFCONFIG} | grep ipsec').split(':')[0] interface_details = mac_utils.run_command(f'{mac_utils.IFCONFIG} {self.tunnel_interface}') logger.debug('Checking if vpn interface is up...') if (self.tunnel_interface and ('inet' in interface_details)): break if (time() > timeout): logger.info('VPN Interface not coming active ...') sleep(5) self.active = False return sleep(2) logger.info('Successfully connected to VPN...') if self.MACOS_NOTIFICATION_ENABLED: pync.notify('Successfully connected to VPN...', title='VPN') logger.debug(f'Found tunnel interface {self.tunnel_interface}') logger.debug(f'Setting default route towards: {self.active_interface}') mac_utils.set_default_route(self.active_interface, ip_address=self.active_interface_gateway) for network in self.NETWORKS_TO_TUNNEL: mac_utils.add_route(network, interface=self.tunnel_interface) for address in self.ADDRESS_TO_TUNNEL: mac_utils.add_route(address, interface=self.tunnel_interface) self.vpn_dns_servers = mac_utils.current_nameservers logger.info('Add routes for DNS servers') for dns in self.vpn_dns_servers: mac_utils.add_route(dns, interface=self.tunnel_interface) logger.debug(mac_utils.parsed_routing_table()) self.active = True if self.MOUNT_FOLDERS: self.mount_folders()
def start_tunnel(self): '\n Start the actual tunnel.\n\n :return:\n ' logger.info(f'Ready to start VPN: {self.VPN_NAME}') if (self.EXCLUDED_SSID.lower() in mac_utils.connected_ssid().lower()): logger.error('You are connected with an excluded ssid. Switch ssid manually...') sys.exit(1) if (not self.check_internet_access()): self.active = False return start_time = datetime.now() logger.debug(f'Start time: {start_time}') vpn_command = f'{self.vpn_util} start "{self.VPN_NAME}"' logger.debug(f'VPN command: "{vpn_command}"') result = mac_utils.run_command(vpn_command, as_user=True) if (('has been started' not in result) and ('Connected' not in result)): logger.info('Unable to start VPN, check your network connection.') logger.info(f'vpnutil responded: {result}') self.active = False return logger.info('Starting VPN Tunnel') timeout = (time() + self.CONNECT_TIMEOUT) while True: self.tunnel_interface = mac_utils.run_command(f'{mac_utils.IFCONFIG} | grep ipsec').split(':')[0] interface_details = mac_utils.run_command(f'{mac_utils.IFCONFIG} {self.tunnel_interface}') logger.debug('Checking if vpn interface is up...') if (self.tunnel_interface and ('inet' in interface_details)): break if (time() > timeout): logger.info('VPN Interface not coming active ...') sleep(5) self.active = False return sleep(2) logger.info('Successfully connected to VPN...') if self.MACOS_NOTIFICATION_ENABLED: pync.notify('Successfully connected to VPN...', title='VPN') logger.debug(f'Found tunnel interface {self.tunnel_interface}') logger.debug(f'Setting default route towards: {self.active_interface}') mac_utils.set_default_route(self.active_interface, ip_address=self.active_interface_gateway) for network in self.NETWORKS_TO_TUNNEL: mac_utils.add_route(network, interface=self.tunnel_interface) for address in self.ADDRESS_TO_TUNNEL: mac_utils.add_route(address, interface=self.tunnel_interface) self.vpn_dns_servers = mac_utils.current_nameservers logger.info('Add routes for DNS servers') for dns in self.vpn_dns_servers: mac_utils.add_route(dns, interface=self.tunnel_interface) logger.debug(mac_utils.parsed_routing_table()) self.active = True if self.MOUNT_FOLDERS: self.mount_folders()<|docstring|>Start the actual tunnel. :return:<|endoftext|>
d9f8ebf484b2b8a3f738d72344c279a988e6414b5339ae1f0a161487210f92fe
def stop_tunnel(self): '\n Stop the VPN tunnel.\n\n :return:\n ' logger.info('Stopping VPN...') if (self.MOUNT_FOLDERS and (not self.lost_connectivity)): self.unmount_folders() vpn_command = f'{self.vpn_util} stop "{self.VPN_NAME}"' logger.debug(f'VPN command: "{vpn_command}"') result = mac_utils.run_command(vpn_command, as_user=True) if (('has been stopped' not in result) and ('Disconnected' not in result)): logger.info('Unable to stop VPN. Please do this manually') logger.info(f'vpnutil responded: {result}') self.active = False mac_utils.flush_routing_table(reset_interfaces=False) network_interface = mac_utils.get_active_network_interface() gateway_address = mac_utils.gateway_for_interface(network_interface) mac_utils.set_default_route(self.active_interface, ip_address=gateway_address) if self.MACOS_NOTIFICATION_ENABLED: pync.notify(f'Tunnel is stopped.')
Stop the VPN tunnel. :return:
vpn.py
stop_tunnel
maikelvallinga/vpn_split_tunnel
0
python
def stop_tunnel(self): '\n Stop the VPN tunnel.\n\n :return:\n ' logger.info('Stopping VPN...') if (self.MOUNT_FOLDERS and (not self.lost_connectivity)): self.unmount_folders() vpn_command = f'{self.vpn_util} stop "{self.VPN_NAME}"' logger.debug(f'VPN command: "{vpn_command}"') result = mac_utils.run_command(vpn_command, as_user=True) if (('has been stopped' not in result) and ('Disconnected' not in result)): logger.info('Unable to stop VPN. Please do this manually') logger.info(f'vpnutil responded: {result}') self.active = False mac_utils.flush_routing_table(reset_interfaces=False) network_interface = mac_utils.get_active_network_interface() gateway_address = mac_utils.gateway_for_interface(network_interface) mac_utils.set_default_route(self.active_interface, ip_address=gateway_address) if self.MACOS_NOTIFICATION_ENABLED: pync.notify(f'Tunnel is stopped.')
def stop_tunnel(self): '\n Stop the VPN tunnel.\n\n :return:\n ' logger.info('Stopping VPN...') if (self.MOUNT_FOLDERS and (not self.lost_connectivity)): self.unmount_folders() vpn_command = f'{self.vpn_util} stop "{self.VPN_NAME}"' logger.debug(f'VPN command: "{vpn_command}"') result = mac_utils.run_command(vpn_command, as_user=True) if (('has been stopped' not in result) and ('Disconnected' not in result)): logger.info('Unable to stop VPN. Please do this manually') logger.info(f'vpnutil responded: {result}') self.active = False mac_utils.flush_routing_table(reset_interfaces=False) network_interface = mac_utils.get_active_network_interface() gateway_address = mac_utils.gateway_for_interface(network_interface) mac_utils.set_default_route(self.active_interface, ip_address=gateway_address) if self.MACOS_NOTIFICATION_ENABLED: pync.notify(f'Tunnel is stopped.')<|docstring|>Stop the VPN tunnel. :return:<|endoftext|>
03068c74954c6a58f8813fec2dfd9f2c83a1316aa2292f6dae7d91b5f0a392ff
def quit(self, *args): '\n Quit VPN tunnel.\n ' self.stop_tunnel() sys.exit(0)
Quit VPN tunnel.
vpn.py
quit
maikelvallinga/vpn_split_tunnel
0
python
def quit(self, *args): '\n \n ' self.stop_tunnel() sys.exit(0)
def quit(self, *args): '\n \n ' self.stop_tunnel() sys.exit(0)<|docstring|>Quit VPN tunnel.<|endoftext|>
563e9251c5e8acfa00675e0706fb4ea21de023439d1b5ac35b460bb401da60d3
def forward(self, x): '\n inputs :\n x : input feature maps( B X C X W X H)\n returns :\n out : self attention value + input feature\n attention: B X N X N (N is Width*Height)\n ' (m_batchsize, C, width, height) = x.size() proj_query = self.query_conv(x).view(m_batchsize, (- 1), (width * height)).permute(0, 2, 1) proj_key = self.key_conv(x).view(m_batchsize, (- 1), (width * height)) energy = torch.bmm(proj_query, proj_key) attention = self.softmax(energy) proj_value = self.value_conv(x).view(m_batchsize, (- 1), (width * height)) out = torch.bmm(proj_value, attention.permute(0, 2, 1)) out = out.view(m_batchsize, C, width, height) out = ((self.gamma * out) + x) return out
inputs : x : input feature maps( B X C X W X H) returns : out : self attention value + input feature attention: B X N X N (N is Width*Height)
evolution/genes/self_attention.py
forward
vfcosta/coegan-trained
0
python
def forward(self, x): '\n inputs :\n x : input feature maps( B X C X W X H)\n returns :\n out : self attention value + input feature\n attention: B X N X N (N is Width*Height)\n ' (m_batchsize, C, width, height) = x.size() proj_query = self.query_conv(x).view(m_batchsize, (- 1), (width * height)).permute(0, 2, 1) proj_key = self.key_conv(x).view(m_batchsize, (- 1), (width * height)) energy = torch.bmm(proj_query, proj_key) attention = self.softmax(energy) proj_value = self.value_conv(x).view(m_batchsize, (- 1), (width * height)) out = torch.bmm(proj_value, attention.permute(0, 2, 1)) out = out.view(m_batchsize, C, width, height) out = ((self.gamma * out) + x) return out
def forward(self, x): '\n inputs :\n x : input feature maps( B X C X W X H)\n returns :\n out : self attention value + input feature\n attention: B X N X N (N is Width*Height)\n ' (m_batchsize, C, width, height) = x.size() proj_query = self.query_conv(x).view(m_batchsize, (- 1), (width * height)).permute(0, 2, 1) proj_key = self.key_conv(x).view(m_batchsize, (- 1), (width * height)) energy = torch.bmm(proj_query, proj_key) attention = self.softmax(energy) proj_value = self.value_conv(x).view(m_batchsize, (- 1), (width * height)) out = torch.bmm(proj_value, attention.permute(0, 2, 1)) out = out.view(m_batchsize, C, width, height) out = ((self.gamma * out) + x) return out<|docstring|>inputs : x : input feature maps( B X C X W X H) returns : out : self attention value + input feature attention: B X N X N (N is Width*Height)<|endoftext|>
59a6b44989d4bd87369995adfa6784caa5dd211e92f4ac92bccb00e665cec681
def squeeze(high, low, close, bb_length=None, bb_std=None, kc_length=None, kc_scalar=None, mom_length=None, mom_smooth=None, use_tr=None, mamode=None, offset=None, **kwargs): 'Indicator: Squeeze Momentum (SQZ)' bb_length = (int(bb_length) if (bb_length and (bb_length > 0)) else 20) bb_std = (float(bb_std) if (bb_std and (bb_std > 0)) else 2.0) kc_length = (int(kc_length) if (kc_length and (kc_length > 0)) else 20) kc_scalar = (float(kc_scalar) if (kc_scalar and (kc_scalar > 0)) else 1.5) mom_length = (int(mom_length) if (mom_length and (mom_length > 0)) else 12) mom_smooth = (int(mom_smooth) if (mom_smooth and (mom_smooth > 0)) else 6) _length = max(bb_length, kc_length, mom_length, mom_smooth) high = verify_series(high, _length) low = verify_series(low, _length) close = verify_series(close, _length) offset = get_offset(offset) if ((high is None) or (low is None) or (close is None)): return use_tr = kwargs.setdefault('tr', True) asint = kwargs.pop('asint', True) detailed = kwargs.pop('detailed', False) lazybear = kwargs.pop('lazybear', False) mamode = (mamode if isinstance(mamode, str) else 'sma') def simplify_columns(df, n=3): df.columns = df.columns.str.lower() return [c.split('_')[0][(n - 1):n] for c in df.columns] bbd = bbands(close, length=bb_length, std=bb_std, mamode=mamode) kch = kc(high, low, close, length=kc_length, scalar=kc_scalar, mamode=mamode, tr=use_tr) bbd.columns = simplify_columns(bbd) kch.columns = simplify_columns(kch) if lazybear: highest_high = high.rolling(kc_length).max() lowest_low = low.rolling(kc_length).min() avg_ = ((0.25 * (highest_high + lowest_low)) + (0.5 * kch.b)) squeeze = linreg((close - avg_), length=kc_length) else: momo = mom(close, length=mom_length) if (mamode.lower() == 'ema'): squeeze = ema(momo, length=mom_smooth) else: squeeze = sma(momo, length=mom_smooth) squeeze_on = ((bbd.l > kch.l) & (bbd.u < kch.u)) squeeze_off = ((bbd.l < kch.l) & (bbd.u > kch.u)) no_squeeze = ((~ squeeze_on) & (~ squeeze_off)) if (offset != 0): squeeze = squeeze.shift(offset) squeeze_on = squeeze_on.shift(offset) squeeze_off = squeeze_off.shift(offset) no_squeeze = no_squeeze.shift(offset) if ('fillna' in kwargs): squeeze.fillna(kwargs['fillna'], inplace=True) squeeze_on.fillna(kwargs['fillna'], inplace=True) squeeze_off.fillna(kwargs['fillna'], inplace=True) no_squeeze.fillna(kwargs['fillna'], inplace=True) if ('fill_method' in kwargs): squeeze.fillna(method=kwargs['fill_method'], inplace=True) squeeze_on.fillna(method=kwargs['fill_method'], inplace=True) squeeze_off.fillna(method=kwargs['fill_method'], inplace=True) no_squeeze.fillna(method=kwargs['fill_method'], inplace=True) _props = ('' if use_tr else 'hlr') _props += f'_{bb_length}_{bb_std}_{kc_length}_{kc_scalar}' _props += ('_LB' if lazybear else '') squeeze.name = f'SQZ{_props}' data = {squeeze.name: squeeze, f'SQZ_ON': (squeeze_on.astype(int) if asint else squeeze_on), f'SQZ_OFF': (squeeze_off.astype(int) if asint else squeeze_off), f'SQZ_NO': (no_squeeze.astype(int) if asint else no_squeeze)} df = DataFrame(data) df.name = squeeze.name df.category = squeeze.category = 'momentum' if detailed: pos_squeeze = squeeze[(squeeze >= 0)] neg_squeeze = squeeze[(squeeze < 0)] (pos_inc, pos_dec) = unsigned_differences(pos_squeeze, asint=True) (neg_inc, neg_dec) = unsigned_differences(neg_squeeze, asint=True) pos_inc *= squeeze pos_dec *= squeeze neg_dec *= squeeze neg_inc *= squeeze pos_inc.replace(0, npNaN, inplace=True) pos_dec.replace(0, npNaN, inplace=True) neg_dec.replace(0, npNaN, inplace=True) neg_inc.replace(0, npNaN, inplace=True) sqz_inc = (squeeze * increasing(squeeze)) sqz_dec = (squeeze * decreasing(squeeze)) sqz_inc.replace(0, npNaN, inplace=True) sqz_dec.replace(0, npNaN, inplace=True) if ('fillna' in kwargs): sqz_inc.fillna(kwargs['fillna'], inplace=True) sqz_dec.fillna(kwargs['fillna'], inplace=True) pos_inc.fillna(kwargs['fillna'], inplace=True) pos_dec.fillna(kwargs['fillna'], inplace=True) neg_dec.fillna(kwargs['fillna'], inplace=True) neg_inc.fillna(kwargs['fillna'], inplace=True) if ('fill_method' in kwargs): sqz_inc.fillna(method=kwargs['fill_method'], inplace=True) sqz_dec.fillna(method=kwargs['fill_method'], inplace=True) pos_inc.fillna(method=kwargs['fill_method'], inplace=True) pos_dec.fillna(method=kwargs['fill_method'], inplace=True) neg_dec.fillna(method=kwargs['fill_method'], inplace=True) neg_inc.fillna(method=kwargs['fill_method'], inplace=True) df[f'SQZ_INC'] = sqz_inc df[f'SQZ_DEC'] = sqz_dec df[f'SQZ_PINC'] = pos_inc df[f'SQZ_PDEC'] = pos_dec df[f'SQZ_NDEC'] = neg_dec df[f'SQZ_NINC'] = neg_inc return df
Indicator: Squeeze Momentum (SQZ)
pandas_ta/momentum/squeeze.py
squeeze
ryanrussell/pandas-ta
2,298
python
def squeeze(high, low, close, bb_length=None, bb_std=None, kc_length=None, kc_scalar=None, mom_length=None, mom_smooth=None, use_tr=None, mamode=None, offset=None, **kwargs): bb_length = (int(bb_length) if (bb_length and (bb_length > 0)) else 20) bb_std = (float(bb_std) if (bb_std and (bb_std > 0)) else 2.0) kc_length = (int(kc_length) if (kc_length and (kc_length > 0)) else 20) kc_scalar = (float(kc_scalar) if (kc_scalar and (kc_scalar > 0)) else 1.5) mom_length = (int(mom_length) if (mom_length and (mom_length > 0)) else 12) mom_smooth = (int(mom_smooth) if (mom_smooth and (mom_smooth > 0)) else 6) _length = max(bb_length, kc_length, mom_length, mom_smooth) high = verify_series(high, _length) low = verify_series(low, _length) close = verify_series(close, _length) offset = get_offset(offset) if ((high is None) or (low is None) or (close is None)): return use_tr = kwargs.setdefault('tr', True) asint = kwargs.pop('asint', True) detailed = kwargs.pop('detailed', False) lazybear = kwargs.pop('lazybear', False) mamode = (mamode if isinstance(mamode, str) else 'sma') def simplify_columns(df, n=3): df.columns = df.columns.str.lower() return [c.split('_')[0][(n - 1):n] for c in df.columns] bbd = bbands(close, length=bb_length, std=bb_std, mamode=mamode) kch = kc(high, low, close, length=kc_length, scalar=kc_scalar, mamode=mamode, tr=use_tr) bbd.columns = simplify_columns(bbd) kch.columns = simplify_columns(kch) if lazybear: highest_high = high.rolling(kc_length).max() lowest_low = low.rolling(kc_length).min() avg_ = ((0.25 * (highest_high + lowest_low)) + (0.5 * kch.b)) squeeze = linreg((close - avg_), length=kc_length) else: momo = mom(close, length=mom_length) if (mamode.lower() == 'ema'): squeeze = ema(momo, length=mom_smooth) else: squeeze = sma(momo, length=mom_smooth) squeeze_on = ((bbd.l > kch.l) & (bbd.u < kch.u)) squeeze_off = ((bbd.l < kch.l) & (bbd.u > kch.u)) no_squeeze = ((~ squeeze_on) & (~ squeeze_off)) if (offset != 0): squeeze = squeeze.shift(offset) squeeze_on = squeeze_on.shift(offset) squeeze_off = squeeze_off.shift(offset) no_squeeze = no_squeeze.shift(offset) if ('fillna' in kwargs): squeeze.fillna(kwargs['fillna'], inplace=True) squeeze_on.fillna(kwargs['fillna'], inplace=True) squeeze_off.fillna(kwargs['fillna'], inplace=True) no_squeeze.fillna(kwargs['fillna'], inplace=True) if ('fill_method' in kwargs): squeeze.fillna(method=kwargs['fill_method'], inplace=True) squeeze_on.fillna(method=kwargs['fill_method'], inplace=True) squeeze_off.fillna(method=kwargs['fill_method'], inplace=True) no_squeeze.fillna(method=kwargs['fill_method'], inplace=True) _props = ( if use_tr else 'hlr') _props += f'_{bb_length}_{bb_std}_{kc_length}_{kc_scalar}' _props += ('_LB' if lazybear else ) squeeze.name = f'SQZ{_props}' data = {squeeze.name: squeeze, f'SQZ_ON': (squeeze_on.astype(int) if asint else squeeze_on), f'SQZ_OFF': (squeeze_off.astype(int) if asint else squeeze_off), f'SQZ_NO': (no_squeeze.astype(int) if asint else no_squeeze)} df = DataFrame(data) df.name = squeeze.name df.category = squeeze.category = 'momentum' if detailed: pos_squeeze = squeeze[(squeeze >= 0)] neg_squeeze = squeeze[(squeeze < 0)] (pos_inc, pos_dec) = unsigned_differences(pos_squeeze, asint=True) (neg_inc, neg_dec) = unsigned_differences(neg_squeeze, asint=True) pos_inc *= squeeze pos_dec *= squeeze neg_dec *= squeeze neg_inc *= squeeze pos_inc.replace(0, npNaN, inplace=True) pos_dec.replace(0, npNaN, inplace=True) neg_dec.replace(0, npNaN, inplace=True) neg_inc.replace(0, npNaN, inplace=True) sqz_inc = (squeeze * increasing(squeeze)) sqz_dec = (squeeze * decreasing(squeeze)) sqz_inc.replace(0, npNaN, inplace=True) sqz_dec.replace(0, npNaN, inplace=True) if ('fillna' in kwargs): sqz_inc.fillna(kwargs['fillna'], inplace=True) sqz_dec.fillna(kwargs['fillna'], inplace=True) pos_inc.fillna(kwargs['fillna'], inplace=True) pos_dec.fillna(kwargs['fillna'], inplace=True) neg_dec.fillna(kwargs['fillna'], inplace=True) neg_inc.fillna(kwargs['fillna'], inplace=True) if ('fill_method' in kwargs): sqz_inc.fillna(method=kwargs['fill_method'], inplace=True) sqz_dec.fillna(method=kwargs['fill_method'], inplace=True) pos_inc.fillna(method=kwargs['fill_method'], inplace=True) pos_dec.fillna(method=kwargs['fill_method'], inplace=True) neg_dec.fillna(method=kwargs['fill_method'], inplace=True) neg_inc.fillna(method=kwargs['fill_method'], inplace=True) df[f'SQZ_INC'] = sqz_inc df[f'SQZ_DEC'] = sqz_dec df[f'SQZ_PINC'] = pos_inc df[f'SQZ_PDEC'] = pos_dec df[f'SQZ_NDEC'] = neg_dec df[f'SQZ_NINC'] = neg_inc return df
def squeeze(high, low, close, bb_length=None, bb_std=None, kc_length=None, kc_scalar=None, mom_length=None, mom_smooth=None, use_tr=None, mamode=None, offset=None, **kwargs): bb_length = (int(bb_length) if (bb_length and (bb_length > 0)) else 20) bb_std = (float(bb_std) if (bb_std and (bb_std > 0)) else 2.0) kc_length = (int(kc_length) if (kc_length and (kc_length > 0)) else 20) kc_scalar = (float(kc_scalar) if (kc_scalar and (kc_scalar > 0)) else 1.5) mom_length = (int(mom_length) if (mom_length and (mom_length > 0)) else 12) mom_smooth = (int(mom_smooth) if (mom_smooth and (mom_smooth > 0)) else 6) _length = max(bb_length, kc_length, mom_length, mom_smooth) high = verify_series(high, _length) low = verify_series(low, _length) close = verify_series(close, _length) offset = get_offset(offset) if ((high is None) or (low is None) or (close is None)): return use_tr = kwargs.setdefault('tr', True) asint = kwargs.pop('asint', True) detailed = kwargs.pop('detailed', False) lazybear = kwargs.pop('lazybear', False) mamode = (mamode if isinstance(mamode, str) else 'sma') def simplify_columns(df, n=3): df.columns = df.columns.str.lower() return [c.split('_')[0][(n - 1):n] for c in df.columns] bbd = bbands(close, length=bb_length, std=bb_std, mamode=mamode) kch = kc(high, low, close, length=kc_length, scalar=kc_scalar, mamode=mamode, tr=use_tr) bbd.columns = simplify_columns(bbd) kch.columns = simplify_columns(kch) if lazybear: highest_high = high.rolling(kc_length).max() lowest_low = low.rolling(kc_length).min() avg_ = ((0.25 * (highest_high + lowest_low)) + (0.5 * kch.b)) squeeze = linreg((close - avg_), length=kc_length) else: momo = mom(close, length=mom_length) if (mamode.lower() == 'ema'): squeeze = ema(momo, length=mom_smooth) else: squeeze = sma(momo, length=mom_smooth) squeeze_on = ((bbd.l > kch.l) & (bbd.u < kch.u)) squeeze_off = ((bbd.l < kch.l) & (bbd.u > kch.u)) no_squeeze = ((~ squeeze_on) & (~ squeeze_off)) if (offset != 0): squeeze = squeeze.shift(offset) squeeze_on = squeeze_on.shift(offset) squeeze_off = squeeze_off.shift(offset) no_squeeze = no_squeeze.shift(offset) if ('fillna' in kwargs): squeeze.fillna(kwargs['fillna'], inplace=True) squeeze_on.fillna(kwargs['fillna'], inplace=True) squeeze_off.fillna(kwargs['fillna'], inplace=True) no_squeeze.fillna(kwargs['fillna'], inplace=True) if ('fill_method' in kwargs): squeeze.fillna(method=kwargs['fill_method'], inplace=True) squeeze_on.fillna(method=kwargs['fill_method'], inplace=True) squeeze_off.fillna(method=kwargs['fill_method'], inplace=True) no_squeeze.fillna(method=kwargs['fill_method'], inplace=True) _props = ( if use_tr else 'hlr') _props += f'_{bb_length}_{bb_std}_{kc_length}_{kc_scalar}' _props += ('_LB' if lazybear else ) squeeze.name = f'SQZ{_props}' data = {squeeze.name: squeeze, f'SQZ_ON': (squeeze_on.astype(int) if asint else squeeze_on), f'SQZ_OFF': (squeeze_off.astype(int) if asint else squeeze_off), f'SQZ_NO': (no_squeeze.astype(int) if asint else no_squeeze)} df = DataFrame(data) df.name = squeeze.name df.category = squeeze.category = 'momentum' if detailed: pos_squeeze = squeeze[(squeeze >= 0)] neg_squeeze = squeeze[(squeeze < 0)] (pos_inc, pos_dec) = unsigned_differences(pos_squeeze, asint=True) (neg_inc, neg_dec) = unsigned_differences(neg_squeeze, asint=True) pos_inc *= squeeze pos_dec *= squeeze neg_dec *= squeeze neg_inc *= squeeze pos_inc.replace(0, npNaN, inplace=True) pos_dec.replace(0, npNaN, inplace=True) neg_dec.replace(0, npNaN, inplace=True) neg_inc.replace(0, npNaN, inplace=True) sqz_inc = (squeeze * increasing(squeeze)) sqz_dec = (squeeze * decreasing(squeeze)) sqz_inc.replace(0, npNaN, inplace=True) sqz_dec.replace(0, npNaN, inplace=True) if ('fillna' in kwargs): sqz_inc.fillna(kwargs['fillna'], inplace=True) sqz_dec.fillna(kwargs['fillna'], inplace=True) pos_inc.fillna(kwargs['fillna'], inplace=True) pos_dec.fillna(kwargs['fillna'], inplace=True) neg_dec.fillna(kwargs['fillna'], inplace=True) neg_inc.fillna(kwargs['fillna'], inplace=True) if ('fill_method' in kwargs): sqz_inc.fillna(method=kwargs['fill_method'], inplace=True) sqz_dec.fillna(method=kwargs['fill_method'], inplace=True) pos_inc.fillna(method=kwargs['fill_method'], inplace=True) pos_dec.fillna(method=kwargs['fill_method'], inplace=True) neg_dec.fillna(method=kwargs['fill_method'], inplace=True) neg_inc.fillna(method=kwargs['fill_method'], inplace=True) df[f'SQZ_INC'] = sqz_inc df[f'SQZ_DEC'] = sqz_dec df[f'SQZ_PINC'] = pos_inc df[f'SQZ_PDEC'] = pos_dec df[f'SQZ_NDEC'] = neg_dec df[f'SQZ_NINC'] = neg_inc return df<|docstring|>Indicator: Squeeze Momentum (SQZ)<|endoftext|>
ab73520afa7e00f70becf0ae70435676e4b24c2e5e297543c56b13eef53c296a
def simulation_activation(model, parcel_df, aerosols_panel): " Given the DataFrame output from a parcel model simulation, compute\n activation kinetic limitation diagnostics.\n\n Parameters\n ----------\n model : ParcelModel\n The ParcelModel\n parcel_df : DataFrame used to generate the results to be analyzed\n The DataFrame containing the parcel's thermodynamic trajectory\n aerosols_panel : Panel\n A Panel collection of DataFrames containing the aerosol size evolution\n\n Returns\n -------\n act_stats : DataFrame\n A DataFrame containing the activation statistics\n\n " initial_row = parcel_df.iloc[0] (Smax_i, T_i) = (initial_row['S'], initial_row['T']) acts = {'eq': [], 'kn': [], 'alpha': [], 'phi': []} initial_aerosols = model.aerosols N_all_modes = np.sum([aer.total_N for aer in initial_aerosols]) N_fracs = {aer.species: (aer.total_N / N_all_modes) for aer in initial_aerosols} for i in range(len(parcel_df)): row_par = parcel_df.iloc[i] rows_aer = {key: aerosols_panel[key].iloc[i] for key in aerosols_panel} T_i = row_par['T'] if (row_par['S'] > Smax_i): Smax_i = row_par['S'] (eq_tot, kn_tot, alpha_tot, phi_tot) = (0.0, 0.0, 0.0, 0.0) for aerosol in initial_aerosols: N_frac = N_fracs[aerosol.species] rs = rows_aer[aerosol.species] (eq, kn, alpha, phi) = binned_activation(Smax_i, T_i, rs, aerosol) eq_tot += (eq * N_frac) kn_tot += (kn * N_frac) alpha_tot += (alpha * N_frac) phi_tot += (phi * N_frac) acts['kn'].append(kn_tot) acts['eq'].append(eq_tot) acts['alpha'].append(alpha_tot) acts['phi'].append(phi_tot) acts_total = pd.DataFrame(acts, index=parcel_df.index) return acts_total
Given the DataFrame output from a parcel model simulation, compute activation kinetic limitation diagnostics. Parameters ---------- model : ParcelModel The ParcelModel parcel_df : DataFrame used to generate the results to be analyzed The DataFrame containing the parcel's thermodynamic trajectory aerosols_panel : Panel A Panel collection of DataFrames containing the aerosol size evolution Returns ------- act_stats : DataFrame A DataFrame containing the activation statistics
pyrcel/postprocess.py
simulation_activation
Gorkowski/pyrcel
14
python
def simulation_activation(model, parcel_df, aerosols_panel): " Given the DataFrame output from a parcel model simulation, compute\n activation kinetic limitation diagnostics.\n\n Parameters\n ----------\n model : ParcelModel\n The ParcelModel\n parcel_df : DataFrame used to generate the results to be analyzed\n The DataFrame containing the parcel's thermodynamic trajectory\n aerosols_panel : Panel\n A Panel collection of DataFrames containing the aerosol size evolution\n\n Returns\n -------\n act_stats : DataFrame\n A DataFrame containing the activation statistics\n\n " initial_row = parcel_df.iloc[0] (Smax_i, T_i) = (initial_row['S'], initial_row['T']) acts = {'eq': [], 'kn': [], 'alpha': [], 'phi': []} initial_aerosols = model.aerosols N_all_modes = np.sum([aer.total_N for aer in initial_aerosols]) N_fracs = {aer.species: (aer.total_N / N_all_modes) for aer in initial_aerosols} for i in range(len(parcel_df)): row_par = parcel_df.iloc[i] rows_aer = {key: aerosols_panel[key].iloc[i] for key in aerosols_panel} T_i = row_par['T'] if (row_par['S'] > Smax_i): Smax_i = row_par['S'] (eq_tot, kn_tot, alpha_tot, phi_tot) = (0.0, 0.0, 0.0, 0.0) for aerosol in initial_aerosols: N_frac = N_fracs[aerosol.species] rs = rows_aer[aerosol.species] (eq, kn, alpha, phi) = binned_activation(Smax_i, T_i, rs, aerosol) eq_tot += (eq * N_frac) kn_tot += (kn * N_frac) alpha_tot += (alpha * N_frac) phi_tot += (phi * N_frac) acts['kn'].append(kn_tot) acts['eq'].append(eq_tot) acts['alpha'].append(alpha_tot) acts['phi'].append(phi_tot) acts_total = pd.DataFrame(acts, index=parcel_df.index) return acts_total
def simulation_activation(model, parcel_df, aerosols_panel): " Given the DataFrame output from a parcel model simulation, compute\n activation kinetic limitation diagnostics.\n\n Parameters\n ----------\n model : ParcelModel\n The ParcelModel\n parcel_df : DataFrame used to generate the results to be analyzed\n The DataFrame containing the parcel's thermodynamic trajectory\n aerosols_panel : Panel\n A Panel collection of DataFrames containing the aerosol size evolution\n\n Returns\n -------\n act_stats : DataFrame\n A DataFrame containing the activation statistics\n\n " initial_row = parcel_df.iloc[0] (Smax_i, T_i) = (initial_row['S'], initial_row['T']) acts = {'eq': [], 'kn': [], 'alpha': [], 'phi': []} initial_aerosols = model.aerosols N_all_modes = np.sum([aer.total_N for aer in initial_aerosols]) N_fracs = {aer.species: (aer.total_N / N_all_modes) for aer in initial_aerosols} for i in range(len(parcel_df)): row_par = parcel_df.iloc[i] rows_aer = {key: aerosols_panel[key].iloc[i] for key in aerosols_panel} T_i = row_par['T'] if (row_par['S'] > Smax_i): Smax_i = row_par['S'] (eq_tot, kn_tot, alpha_tot, phi_tot) = (0.0, 0.0, 0.0, 0.0) for aerosol in initial_aerosols: N_frac = N_fracs[aerosol.species] rs = rows_aer[aerosol.species] (eq, kn, alpha, phi) = binned_activation(Smax_i, T_i, rs, aerosol) eq_tot += (eq * N_frac) kn_tot += (kn * N_frac) alpha_tot += (alpha * N_frac) phi_tot += (phi * N_frac) acts['kn'].append(kn_tot) acts['eq'].append(eq_tot) acts['alpha'].append(alpha_tot) acts['phi'].append(phi_tot) acts_total = pd.DataFrame(acts, index=parcel_df.index) return acts_total<|docstring|>Given the DataFrame output from a parcel model simulation, compute activation kinetic limitation diagnostics. Parameters ---------- model : ParcelModel The ParcelModel parcel_df : DataFrame used to generate the results to be analyzed The DataFrame containing the parcel's thermodynamic trajectory aerosols_panel : Panel A Panel collection of DataFrames containing the aerosol size evolution Returns ------- act_stats : DataFrame A DataFrame containing the activation statistics<|endoftext|>
2121b4cb88f30d8473337af474efb84207217dbda1644de825328b19ebc3efe1
def get_dataset(self, extended=False): '\n Returns a pandas dataframe containing domestic transactions from the input-output table \n ' domestic_transaction_coefficients = [] industry_count = self.industry_header.count('Industries') print(industry_count) for i in range(0, industry_count): domestic_transaction_coefficients.append(self.file.readline().strip().split('\t')[4:(4 + industry_count)]) numpy_data = np.array(domestic_transaction_coefficients) df = pd.DataFrame(data=numpy_data, index=self.industries[0:industry_count]) df.columns = self.industries[0:industry_count] if extended: df.loc[(:, 'year')] = self.year df.loc[(:, 'country')] = self.country self.df = df self.extended = extended return df
Returns a pandas dataframe containing domestic transactions from the input-output table
src/DomesticTransactions.py
get_dataset
elastacloud/input-output-tables
0
python
def get_dataset(self, extended=False): '\n \n ' domestic_transaction_coefficients = [] industry_count = self.industry_header.count('Industries') print(industry_count) for i in range(0, industry_count): domestic_transaction_coefficients.append(self.file.readline().strip().split('\t')[4:(4 + industry_count)]) numpy_data = np.array(domestic_transaction_coefficients) df = pd.DataFrame(data=numpy_data, index=self.industries[0:industry_count]) df.columns = self.industries[0:industry_count] if extended: df.loc[(:, 'year')] = self.year df.loc[(:, 'country')] = self.country self.df = df self.extended = extended return df
def get_dataset(self, extended=False): '\n \n ' domestic_transaction_coefficients = [] industry_count = self.industry_header.count('Industries') print(industry_count) for i in range(0, industry_count): domestic_transaction_coefficients.append(self.file.readline().strip().split('\t')[4:(4 + industry_count)]) numpy_data = np.array(domestic_transaction_coefficients) df = pd.DataFrame(data=numpy_data, index=self.industries[0:industry_count]) df.columns = self.industries[0:industry_count] if extended: df.loc[(:, 'year')] = self.year df.loc[(:, 'country')] = self.country self.df = df self.extended = extended return df<|docstring|>Returns a pandas dataframe containing domestic transactions from the input-output table<|endoftext|>
233f12a6177cd4517c807e163a1bc67f67b19960793bf11db8ad09c73fd55cf9
def plus_one(self, digits): '\n :param digits: List[int]\n :return: List[int]\n ' num = 0 for i in range(len(digits)): num += (digits[i] * pow(10, ((len(digits) - i) - 1))) return [int(i) for i in str((num + 1))]
:param digits: List[int] :return: List[int]
Basic Data Structures/array/leet_066_PlusOne.py
plus_one
rush2catch/algorithms-leetcode
0
python
def plus_one(self, digits): '\n :param digits: List[int]\n :return: List[int]\n ' num = 0 for i in range(len(digits)): num += (digits[i] * pow(10, ((len(digits) - i) - 1))) return [int(i) for i in str((num + 1))]
def plus_one(self, digits): '\n :param digits: List[int]\n :return: List[int]\n ' num = 0 for i in range(len(digits)): num += (digits[i] * pow(10, ((len(digits) - i) - 1))) return [int(i) for i in str((num + 1))]<|docstring|>:param digits: List[int] :return: List[int]<|endoftext|>
85a2966efbc2593280a8f95d7588bdbbd11dd192d9aefc22acdad74974e45f70
def update_bed_availability_data(**kwargs): '\n The actual python callable that Airflow schedules.\n ' filename = kwargs.get('filename') workbook = kwargs.get('workbook') get_data(filename, workbook) arcconnection = BaseHook.get_connection('arcgis') arcuser = arcconnection.login arcpassword = arcconnection.password arcfeatureid = kwargs.get('arcfeatureid') update_arcgis(arcuser, arcpassword, arcfeatureid, filename)
The actual python callable that Airflow schedules.
dags/public-health/covid19/sync-bed-availability-data.py
update_bed_availability_data
snassef/aqueduct
0
python
def update_bed_availability_data(**kwargs): '\n \n ' filename = kwargs.get('filename') workbook = kwargs.get('workbook') get_data(filename, workbook) arcconnection = BaseHook.get_connection('arcgis') arcuser = arcconnection.login arcpassword = arcconnection.password arcfeatureid = kwargs.get('arcfeatureid') update_arcgis(arcuser, arcpassword, arcfeatureid, filename)
def update_bed_availability_data(**kwargs): '\n \n ' filename = kwargs.get('filename') workbook = kwargs.get('workbook') get_data(filename, workbook) arcconnection = BaseHook.get_connection('arcgis') arcuser = arcconnection.login arcpassword = arcconnection.password arcfeatureid = kwargs.get('arcfeatureid') update_arcgis(arcuser, arcpassword, arcfeatureid, filename)<|docstring|>The actual python callable that Airflow schedules.<|endoftext|>
46764a1e78046301d4550f438492bcd26e3aee7588914607930120645bf07671
def __init__(self, conf, storage_protocol, db): 'Initialize instance variables.' conf.append_config_values(COMMON_VOLUME_OPTS) conf.append_config_values(REST_VOLUME_OPTS) conf.append_config_values(FC_VOLUME_OPTS) super(VStorageRESTFC, self).__init__(conf, storage_protocol, db) self.conf = update_conf(self.conf) self.conf.hitachi_zoning_request = self.conf.nec_v_zoning_request
Initialize instance variables.
cinder/volume/drivers/nec/v/nec_v_rest.py
__init__
arunvinodqmco/cinder
0
python
def __init__(self, conf, storage_protocol, db): conf.append_config_values(COMMON_VOLUME_OPTS) conf.append_config_values(REST_VOLUME_OPTS) conf.append_config_values(FC_VOLUME_OPTS) super(VStorageRESTFC, self).__init__(conf, storage_protocol, db) self.conf = update_conf(self.conf) self.conf.hitachi_zoning_request = self.conf.nec_v_zoning_request
def __init__(self, conf, storage_protocol, db): conf.append_config_values(COMMON_VOLUME_OPTS) conf.append_config_values(REST_VOLUME_OPTS) conf.append_config_values(FC_VOLUME_OPTS) super(VStorageRESTFC, self).__init__(conf, storage_protocol, db) self.conf = update_conf(self.conf) self.conf.hitachi_zoning_request = self.conf.nec_v_zoning_request<|docstring|>Initialize instance variables.<|endoftext|>
ed444c332ccf70c95011a90144d9b6b3fffb46b418c0ac06f983e1dc7a804943
def __init__(self, conf, storage_protocol, db): 'Initialize instance variables.' conf.append_config_values(COMMON_VOLUME_OPTS) conf.append_config_values(REST_VOLUME_OPTS) super(VStorageRESTISCSI, self).__init__(conf, storage_protocol, db) self.conf = update_conf(self.conf)
Initialize instance variables.
cinder/volume/drivers/nec/v/nec_v_rest.py
__init__
arunvinodqmco/cinder
0
python
def __init__(self, conf, storage_protocol, db): conf.append_config_values(COMMON_VOLUME_OPTS) conf.append_config_values(REST_VOLUME_OPTS) super(VStorageRESTISCSI, self).__init__(conf, storage_protocol, db) self.conf = update_conf(self.conf)
def __init__(self, conf, storage_protocol, db): conf.append_config_values(COMMON_VOLUME_OPTS) conf.append_config_values(REST_VOLUME_OPTS) super(VStorageRESTISCSI, self).__init__(conf, storage_protocol, db) self.conf = update_conf(self.conf)<|docstring|>Initialize instance variables.<|endoftext|>
1ed44bdf5541c950559ee86946147eae708c0941170ca31cdb9a6f1d6faac405
def discover_subnets(region, account_id, account_name, credentials): ' get subnets in account/region, returns nx.Graph object of subnets nodes' G = nx.Graph() vpcids = [] vpcs = aws_describe_vpcs(region, credentials, account_id) for vpc in vpcs: if (vpc['OwnerId'] != account_id): continue if vpc['IsDefault']: continue vpcids.append(vpc['VpcId']) for subnet in aws_describe_subnets(region, account_id, credentials): if (subnet['VpcId'] not in vpcids): continue subnet_name = subnet['SubnetId'] if ('Tags' in subnet): for tag in subnet['Tags']: if (tag['Key'] == 'Name'): subnet_name = tag['Value'] break if (subnet_name == f"{os.environ['Prefix']}carve-imagebuilder-public-subnet"): continue G.add_node(subnet['SubnetId'], Name=subnet_name, Account=account_id, AccountName=account_name, Region=region, AvailabilityZone=subnet['AvailabilityZone'], AvailabilityZoneId=subnet['AvailabilityZoneId'], CidrBlock=vpc['CidrBlock'], VpcId=subnet['VpcId'], Type='managed') return G
get subnets in account/region, returns nx.Graph object of subnets nodes
src/sf_network_discovery_account.py
discover_subnets
SPSCommerce/carve
1
python
def discover_subnets(region, account_id, account_name, credentials): ' ' G = nx.Graph() vpcids = [] vpcs = aws_describe_vpcs(region, credentials, account_id) for vpc in vpcs: if (vpc['OwnerId'] != account_id): continue if vpc['IsDefault']: continue vpcids.append(vpc['VpcId']) for subnet in aws_describe_subnets(region, account_id, credentials): if (subnet['VpcId'] not in vpcids): continue subnet_name = subnet['SubnetId'] if ('Tags' in subnet): for tag in subnet['Tags']: if (tag['Key'] == 'Name'): subnet_name = tag['Value'] break if (subnet_name == f"{os.environ['Prefix']}carve-imagebuilder-public-subnet"): continue G.add_node(subnet['SubnetId'], Name=subnet_name, Account=account_id, AccountName=account_name, Region=region, AvailabilityZone=subnet['AvailabilityZone'], AvailabilityZoneId=subnet['AvailabilityZoneId'], CidrBlock=vpc['CidrBlock'], VpcId=subnet['VpcId'], Type='managed') return G
def discover_subnets(region, account_id, account_name, credentials): ' ' G = nx.Graph() vpcids = [] vpcs = aws_describe_vpcs(region, credentials, account_id) for vpc in vpcs: if (vpc['OwnerId'] != account_id): continue if vpc['IsDefault']: continue vpcids.append(vpc['VpcId']) for subnet in aws_describe_subnets(region, account_id, credentials): if (subnet['VpcId'] not in vpcids): continue subnet_name = subnet['SubnetId'] if ('Tags' in subnet): for tag in subnet['Tags']: if (tag['Key'] == 'Name'): subnet_name = tag['Value'] break if (subnet_name == f"{os.environ['Prefix']}carve-imagebuilder-public-subnet"): continue G.add_node(subnet['SubnetId'], Name=subnet_name, Account=account_id, AccountName=account_name, Region=region, AvailabilityZone=subnet['AvailabilityZone'], AvailabilityZoneId=subnet['AvailabilityZoneId'], CidrBlock=vpc['CidrBlock'], VpcId=subnet['VpcId'], Type='managed') return G<|docstring|>get subnets in account/region, returns nx.Graph object of subnets nodes<|endoftext|>
9f9afee54407efc6ace55f636d777a82cda1edb9f91906b4f9ddeb355b0af5be
def lambda_handler(event, context): "\n this lambda discovers all subnets in the regions and accounts defined in the event\n the reults are uploaded to the carve managed S3 bucket in the discovery directory\n\n event = {'regions': ['us-east-1', ...], 'account': {'account_id': '123456789012', 'account_name': 'awsaccountname'}}\n \n " print(event) account_id = event['account']['account_id'] account_name = event['account']['account_name'] regions = event['regions'] credentials = aws_assume_role(carve_role_arn(account_id), f'carve-discovery') A = nx.Graph() A.graph['Name'] = f'subnets_{account_id}_{account_name}' for region in regions: if (not aws_active_region(region, credentials, account_id)): continue R = discover_subnets(region, account_id, account_name, credentials) A.add_nodes_from(R.nodes.data()) if (len(A.nodes) > 0): save_graph(A, f"/tmp/{A.graph['Name']}.json") aws_upload_file_s3(f"discovery/{A.graph['Name']}.json", f"/tmp/{A.graph['Name']}.json") print(f'discovered {len(A.nodes)} subnets in {account_id} {account_name}: {A.nodes.data()}')
this lambda discovers all subnets in the regions and accounts defined in the event the reults are uploaded to the carve managed S3 bucket in the discovery directory event = {'regions': ['us-east-1', ...], 'account': {'account_id': '123456789012', 'account_name': 'awsaccountname'}}
src/sf_network_discovery_account.py
lambda_handler
SPSCommerce/carve
1
python
def lambda_handler(event, context): "\n this lambda discovers all subnets in the regions and accounts defined in the event\n the reults are uploaded to the carve managed S3 bucket in the discovery directory\n\n event = {'regions': ['us-east-1', ...], 'account': {'account_id': '123456789012', 'account_name': 'awsaccountname'}}\n \n " print(event) account_id = event['account']['account_id'] account_name = event['account']['account_name'] regions = event['regions'] credentials = aws_assume_role(carve_role_arn(account_id), f'carve-discovery') A = nx.Graph() A.graph['Name'] = f'subnets_{account_id}_{account_name}' for region in regions: if (not aws_active_region(region, credentials, account_id)): continue R = discover_subnets(region, account_id, account_name, credentials) A.add_nodes_from(R.nodes.data()) if (len(A.nodes) > 0): save_graph(A, f"/tmp/{A.graph['Name']}.json") aws_upload_file_s3(f"discovery/{A.graph['Name']}.json", f"/tmp/{A.graph['Name']}.json") print(f'discovered {len(A.nodes)} subnets in {account_id} {account_name}: {A.nodes.data()}')
def lambda_handler(event, context): "\n this lambda discovers all subnets in the regions and accounts defined in the event\n the reults are uploaded to the carve managed S3 bucket in the discovery directory\n\n event = {'regions': ['us-east-1', ...], 'account': {'account_id': '123456789012', 'account_name': 'awsaccountname'}}\n \n " print(event) account_id = event['account']['account_id'] account_name = event['account']['account_name'] regions = event['regions'] credentials = aws_assume_role(carve_role_arn(account_id), f'carve-discovery') A = nx.Graph() A.graph['Name'] = f'subnets_{account_id}_{account_name}' for region in regions: if (not aws_active_region(region, credentials, account_id)): continue R = discover_subnets(region, account_id, account_name, credentials) A.add_nodes_from(R.nodes.data()) if (len(A.nodes) > 0): save_graph(A, f"/tmp/{A.graph['Name']}.json") aws_upload_file_s3(f"discovery/{A.graph['Name']}.json", f"/tmp/{A.graph['Name']}.json") print(f'discovered {len(A.nodes)} subnets in {account_id} {account_name}: {A.nodes.data()}')<|docstring|>this lambda discovers all subnets in the regions and accounts defined in the event the reults are uploaded to the carve managed S3 bucket in the discovery directory event = {'regions': ['us-east-1', ...], 'account': {'account_id': '123456789012', 'account_name': 'awsaccountname'}}<|endoftext|>
8c2bec8620c47f36c7751890056c882e6bfce386422c19e264bd707d16d6df00
def dice(im1, im2, empty_score=1.0): '\n Computes the Dice coefficient, a measure of set similarity.\n Parameters\n ----------\n im1 : array-like, bool\n Any array of arbitrary size. If not boolean, will be converted.\n im2 : array-like, bool\n Any other array of identical size. If not boolean, will be converted.\n Returns\n -------\n dice : float\n Dice coefficient as a float on range [0,1].\n Maximum similarity = 1\n No similarity = 0\n Both are empty (sum eq to zero) = empty_score\n\n Notes\n -----\n The order of inputs for `dice` is irrelevant. The result will be\n identical if `im1` and `im2` are switched.\n ' im1 = np.asarray(im1).astype(np.bool) im2 = np.asarray(im2).astype(np.bool) if (im1.shape != im2.shape): raise ValueError('Shape mismatch: im1 and im2 must have the same shape.') im_sum = (im1.sum() + im2.sum()) if (im_sum == 0): return empty_score intersection = np.logical_and(im1, im2) return ((2.0 * intersection.sum()) / im_sum)
Computes the Dice coefficient, a measure of set similarity. Parameters ---------- im1 : array-like, bool Any array of arbitrary size. If not boolean, will be converted. im2 : array-like, bool Any other array of identical size. If not boolean, will be converted. Returns ------- dice : float Dice coefficient as a float on range [0,1]. Maximum similarity = 1 No similarity = 0 Both are empty (sum eq to zero) = empty_score Notes ----- The order of inputs for `dice` is irrelevant. The result will be identical if `im1` and `im2` are switched.
2-cannab/code/utils.py
dice
remtav/SpaceNet7_Multi-Temporal_Solutions
38
python
def dice(im1, im2, empty_score=1.0): '\n Computes the Dice coefficient, a measure of set similarity.\n Parameters\n ----------\n im1 : array-like, bool\n Any array of arbitrary size. If not boolean, will be converted.\n im2 : array-like, bool\n Any other array of identical size. If not boolean, will be converted.\n Returns\n -------\n dice : float\n Dice coefficient as a float on range [0,1].\n Maximum similarity = 1\n No similarity = 0\n Both are empty (sum eq to zero) = empty_score\n\n Notes\n -----\n The order of inputs for `dice` is irrelevant. The result will be\n identical if `im1` and `im2` are switched.\n ' im1 = np.asarray(im1).astype(np.bool) im2 = np.asarray(im2).astype(np.bool) if (im1.shape != im2.shape): raise ValueError('Shape mismatch: im1 and im2 must have the same shape.') im_sum = (im1.sum() + im2.sum()) if (im_sum == 0): return empty_score intersection = np.logical_and(im1, im2) return ((2.0 * intersection.sum()) / im_sum)
def dice(im1, im2, empty_score=1.0): '\n Computes the Dice coefficient, a measure of set similarity.\n Parameters\n ----------\n im1 : array-like, bool\n Any array of arbitrary size. If not boolean, will be converted.\n im2 : array-like, bool\n Any other array of identical size. If not boolean, will be converted.\n Returns\n -------\n dice : float\n Dice coefficient as a float on range [0,1].\n Maximum similarity = 1\n No similarity = 0\n Both are empty (sum eq to zero) = empty_score\n\n Notes\n -----\n The order of inputs for `dice` is irrelevant. The result will be\n identical if `im1` and `im2` are switched.\n ' im1 = np.asarray(im1).astype(np.bool) im2 = np.asarray(im2).astype(np.bool) if (im1.shape != im2.shape): raise ValueError('Shape mismatch: im1 and im2 must have the same shape.') im_sum = (im1.sum() + im2.sum()) if (im_sum == 0): return empty_score intersection = np.logical_and(im1, im2) return ((2.0 * intersection.sum()) / im_sum)<|docstring|>Computes the Dice coefficient, a measure of set similarity. Parameters ---------- im1 : array-like, bool Any array of arbitrary size. If not boolean, will be converted. im2 : array-like, bool Any other array of identical size. If not boolean, will be converted. Returns ------- dice : float Dice coefficient as a float on range [0,1]. Maximum similarity = 1 No similarity = 0 Both are empty (sum eq to zero) = empty_score Notes ----- The order of inputs for `dice` is irrelevant. The result will be identical if `im1` and `im2` are switched.<|endoftext|>
94501142144ef49117ff96db11f0cfb83818dea69c30f880cb0b35cbb2e02924
def run_sorter_by_property(sorter_name, recording, grouping_property, working_folder, mode_if_folder_exists='raise', engine='loop', engine_kwargs={}, verbose=False, docker_image=None, **sorter_params): '\n Generic function to run a sorter on a recording after splitting by a \'goruping_property\' (e.g. \'group\').\n\n Internally, the function works as follows:\n * the recording is split based on the provided \'grouping_property\' (using the \'split_by\' function)\n * the \'run_sorters\' function is run on the split recordings\n * sorting outputs are aggregated using the \'aggregate_units\' function\n * the \'grouping_property\' is added as a property to the SortingExtractor\n\n Parameters\n ----------\n\n sorter_name: str\n The sorter name\n\n recording: BaseRecording\n The recording to be sorted\n\n grouping_property: object\n Property to split by before sorting\n\n working_folder: str\n The working directory.\n\n sorter_params: dict of dict with sorter_name as key\n This allow to overwrite default params for sorter.\n\n mode_if_folder_exists: \'raise_if_exists\' or \'overwrite\' or \'keep\'\n The mode when the subfolder of recording/sorter already exists.\n * \'raise\' : raise error if subfolder exists\n * \'overwrite\' : delete and force recompute\n * \'keep\' : do not compute again if f=subfolder exists and log is OK\n\n engine: str\n \'loop\', \'joblib\', or \'dask\'\n\n engine_kwargs: dict\n This contains kwargs specific to the launcher engine:\n * \'loop\' : no kwargs\n * \'joblib\' : {\'n_jobs\' : } number of processes\n * \'dask\' : {\'client\':} the dask client for submiting task\n\n verbose: bool\n default True\n\n docker_image: None or str\n If str run the sorter inside a container (docker) using the docker package.\n\n **sorter_params: keyword args\n Spike sorter specific arguments (they can be retrieved with \'get_default_params(sorter_name_or_class)\'\n\n Returns\n -------\n\n sorting : UnitsAggregationSorting\n The aggregated SortingExtractor.\n\n Examples\n --------\n\n This example shows how to run spike sorting split by group using the \'joblib\' backend with 4 jobs for parallel\n processing.\n\n >>> sorting = si.run_sorter_by_property("tridesclous", recording, grouping_property="group",\n working_folder="sort_by_group", engine="joblib",\n engine_kwargs={"n_jobs": 4})\n\n ' assert (grouping_property in recording.get_property_keys()), f"The 'grouping_property' {grouping_property} is not a recording property!" recording_dict = recording.split_by(grouping_property) sorting_output = run_sorters([sorter_name], recording_dict, working_folder, mode_if_folder_exists=mode_if_folder_exists, engine=engine, engine_kwargs=engine_kwargs, verbose=verbose, with_output=True, docker_images={sorter_name: docker_image}, sorter_params={sorter_name: sorter_params}) grouping_property_values = np.array([]) sorting_list = [] for (output_name, sorting) in sorting_output.items(): (prop_name, sorter_name) = output_name sorting_list.append(sorting) grouping_property_values = np.concatenate((grouping_property_values, ([prop_name] * len(sorting.get_unit_ids())))) aggregate_sorting = aggregate_units(sorting_list) aggregate_sorting.set_property(key=grouping_property, values=grouping_property_values) return aggregate_sorting
Generic function to run a sorter on a recording after splitting by a 'goruping_property' (e.g. 'group'). Internally, the function works as follows: * the recording is split based on the provided 'grouping_property' (using the 'split_by' function) * the 'run_sorters' function is run on the split recordings * sorting outputs are aggregated using the 'aggregate_units' function * the 'grouping_property' is added as a property to the SortingExtractor Parameters ---------- sorter_name: str The sorter name recording: BaseRecording The recording to be sorted grouping_property: object Property to split by before sorting working_folder: str The working directory. sorter_params: dict of dict with sorter_name as key This allow to overwrite default params for sorter. mode_if_folder_exists: 'raise_if_exists' or 'overwrite' or 'keep' The mode when the subfolder of recording/sorter already exists. * 'raise' : raise error if subfolder exists * 'overwrite' : delete and force recompute * 'keep' : do not compute again if f=subfolder exists and log is OK engine: str 'loop', 'joblib', or 'dask' engine_kwargs: dict This contains kwargs specific to the launcher engine: * 'loop' : no kwargs * 'joblib' : {'n_jobs' : } number of processes * 'dask' : {'client':} the dask client for submiting task verbose: bool default True docker_image: None or str If str run the sorter inside a container (docker) using the docker package. **sorter_params: keyword args Spike sorter specific arguments (they can be retrieved with 'get_default_params(sorter_name_or_class)' Returns ------- sorting : UnitsAggregationSorting The aggregated SortingExtractor. Examples -------- This example shows how to run spike sorting split by group using the 'joblib' backend with 4 jobs for parallel processing. >>> sorting = si.run_sorter_by_property("tridesclous", recording, grouping_property="group", working_folder="sort_by_group", engine="joblib", engine_kwargs={"n_jobs": 4})
spikeinterface/sorters/launcher.py
run_sorter_by_property
Dradeliomecus/spikeinterface
116
python
def run_sorter_by_property(sorter_name, recording, grouping_property, working_folder, mode_if_folder_exists='raise', engine='loop', engine_kwargs={}, verbose=False, docker_image=None, **sorter_params): '\n Generic function to run a sorter on a recording after splitting by a \'goruping_property\' (e.g. \'group\').\n\n Internally, the function works as follows:\n * the recording is split based on the provided \'grouping_property\' (using the \'split_by\' function)\n * the \'run_sorters\' function is run on the split recordings\n * sorting outputs are aggregated using the \'aggregate_units\' function\n * the \'grouping_property\' is added as a property to the SortingExtractor\n\n Parameters\n ----------\n\n sorter_name: str\n The sorter name\n\n recording: BaseRecording\n The recording to be sorted\n\n grouping_property: object\n Property to split by before sorting\n\n working_folder: str\n The working directory.\n\n sorter_params: dict of dict with sorter_name as key\n This allow to overwrite default params for sorter.\n\n mode_if_folder_exists: \'raise_if_exists\' or \'overwrite\' or \'keep\'\n The mode when the subfolder of recording/sorter already exists.\n * \'raise\' : raise error if subfolder exists\n * \'overwrite\' : delete and force recompute\n * \'keep\' : do not compute again if f=subfolder exists and log is OK\n\n engine: str\n \'loop\', \'joblib\', or \'dask\'\n\n engine_kwargs: dict\n This contains kwargs specific to the launcher engine:\n * \'loop\' : no kwargs\n * \'joblib\' : {\'n_jobs\' : } number of processes\n * \'dask\' : {\'client\':} the dask client for submiting task\n\n verbose: bool\n default True\n\n docker_image: None or str\n If str run the sorter inside a container (docker) using the docker package.\n\n **sorter_params: keyword args\n Spike sorter specific arguments (they can be retrieved with \'get_default_params(sorter_name_or_class)\'\n\n Returns\n -------\n\n sorting : UnitsAggregationSorting\n The aggregated SortingExtractor.\n\n Examples\n --------\n\n This example shows how to run spike sorting split by group using the \'joblib\' backend with 4 jobs for parallel\n processing.\n\n >>> sorting = si.run_sorter_by_property("tridesclous", recording, grouping_property="group",\n working_folder="sort_by_group", engine="joblib",\n engine_kwargs={"n_jobs": 4})\n\n ' assert (grouping_property in recording.get_property_keys()), f"The 'grouping_property' {grouping_property} is not a recording property!" recording_dict = recording.split_by(grouping_property) sorting_output = run_sorters([sorter_name], recording_dict, working_folder, mode_if_folder_exists=mode_if_folder_exists, engine=engine, engine_kwargs=engine_kwargs, verbose=verbose, with_output=True, docker_images={sorter_name: docker_image}, sorter_params={sorter_name: sorter_params}) grouping_property_values = np.array([]) sorting_list = [] for (output_name, sorting) in sorting_output.items(): (prop_name, sorter_name) = output_name sorting_list.append(sorting) grouping_property_values = np.concatenate((grouping_property_values, ([prop_name] * len(sorting.get_unit_ids())))) aggregate_sorting = aggregate_units(sorting_list) aggregate_sorting.set_property(key=grouping_property, values=grouping_property_values) return aggregate_sorting
def run_sorter_by_property(sorter_name, recording, grouping_property, working_folder, mode_if_folder_exists='raise', engine='loop', engine_kwargs={}, verbose=False, docker_image=None, **sorter_params): '\n Generic function to run a sorter on a recording after splitting by a \'goruping_property\' (e.g. \'group\').\n\n Internally, the function works as follows:\n * the recording is split based on the provided \'grouping_property\' (using the \'split_by\' function)\n * the \'run_sorters\' function is run on the split recordings\n * sorting outputs are aggregated using the \'aggregate_units\' function\n * the \'grouping_property\' is added as a property to the SortingExtractor\n\n Parameters\n ----------\n\n sorter_name: str\n The sorter name\n\n recording: BaseRecording\n The recording to be sorted\n\n grouping_property: object\n Property to split by before sorting\n\n working_folder: str\n The working directory.\n\n sorter_params: dict of dict with sorter_name as key\n This allow to overwrite default params for sorter.\n\n mode_if_folder_exists: \'raise_if_exists\' or \'overwrite\' or \'keep\'\n The mode when the subfolder of recording/sorter already exists.\n * \'raise\' : raise error if subfolder exists\n * \'overwrite\' : delete and force recompute\n * \'keep\' : do not compute again if f=subfolder exists and log is OK\n\n engine: str\n \'loop\', \'joblib\', or \'dask\'\n\n engine_kwargs: dict\n This contains kwargs specific to the launcher engine:\n * \'loop\' : no kwargs\n * \'joblib\' : {\'n_jobs\' : } number of processes\n * \'dask\' : {\'client\':} the dask client for submiting task\n\n verbose: bool\n default True\n\n docker_image: None or str\n If str run the sorter inside a container (docker) using the docker package.\n\n **sorter_params: keyword args\n Spike sorter specific arguments (they can be retrieved with \'get_default_params(sorter_name_or_class)\'\n\n Returns\n -------\n\n sorting : UnitsAggregationSorting\n The aggregated SortingExtractor.\n\n Examples\n --------\n\n This example shows how to run spike sorting split by group using the \'joblib\' backend with 4 jobs for parallel\n processing.\n\n >>> sorting = si.run_sorter_by_property("tridesclous", recording, grouping_property="group",\n working_folder="sort_by_group", engine="joblib",\n engine_kwargs={"n_jobs": 4})\n\n ' assert (grouping_property in recording.get_property_keys()), f"The 'grouping_property' {grouping_property} is not a recording property!" recording_dict = recording.split_by(grouping_property) sorting_output = run_sorters([sorter_name], recording_dict, working_folder, mode_if_folder_exists=mode_if_folder_exists, engine=engine, engine_kwargs=engine_kwargs, verbose=verbose, with_output=True, docker_images={sorter_name: docker_image}, sorter_params={sorter_name: sorter_params}) grouping_property_values = np.array([]) sorting_list = [] for (output_name, sorting) in sorting_output.items(): (prop_name, sorter_name) = output_name sorting_list.append(sorting) grouping_property_values = np.concatenate((grouping_property_values, ([prop_name] * len(sorting.get_unit_ids())))) aggregate_sorting = aggregate_units(sorting_list) aggregate_sorting.set_property(key=grouping_property, values=grouping_property_values) return aggregate_sorting<|docstring|>Generic function to run a sorter on a recording after splitting by a 'goruping_property' (e.g. 'group'). Internally, the function works as follows: * the recording is split based on the provided 'grouping_property' (using the 'split_by' function) * the 'run_sorters' function is run on the split recordings * sorting outputs are aggregated using the 'aggregate_units' function * the 'grouping_property' is added as a property to the SortingExtractor Parameters ---------- sorter_name: str The sorter name recording: BaseRecording The recording to be sorted grouping_property: object Property to split by before sorting working_folder: str The working directory. sorter_params: dict of dict with sorter_name as key This allow to overwrite default params for sorter. mode_if_folder_exists: 'raise_if_exists' or 'overwrite' or 'keep' The mode when the subfolder of recording/sorter already exists. * 'raise' : raise error if subfolder exists * 'overwrite' : delete and force recompute * 'keep' : do not compute again if f=subfolder exists and log is OK engine: str 'loop', 'joblib', or 'dask' engine_kwargs: dict This contains kwargs specific to the launcher engine: * 'loop' : no kwargs * 'joblib' : {'n_jobs' : } number of processes * 'dask' : {'client':} the dask client for submiting task verbose: bool default True docker_image: None or str If str run the sorter inside a container (docker) using the docker package. **sorter_params: keyword args Spike sorter specific arguments (they can be retrieved with 'get_default_params(sorter_name_or_class)' Returns ------- sorting : UnitsAggregationSorting The aggregated SortingExtractor. Examples -------- This example shows how to run spike sorting split by group using the 'joblib' backend with 4 jobs for parallel processing. >>> sorting = si.run_sorter_by_property("tridesclous", recording, grouping_property="group", working_folder="sort_by_group", engine="joblib", engine_kwargs={"n_jobs": 4})<|endoftext|>
eff6f8179e4faaf34eb6724221aaa0595f8790786e25179ffd7cee4b8a80b6f5
def run_sorters(sorter_list, recording_dict_or_list, working_folder, sorter_params={}, mode_if_folder_exists='raise', engine='loop', engine_kwargs={}, verbose=False, with_output=True, docker_images={}): "\n This run several sorter on several recording.\n Simple implementation are nested loops or with multiprocessing.\n\n sorter_list: list of str (sorter names)\n recording_dict_or_list: a dict (or a list) of recording\n working_folder : str\n\n engine = None ( = 'loop') or 'multiprocessing'\n processes = only if 'multiprocessing' if None then processes=os.cpu_count()\n verbose=True/False to control sorter verbosity\n\n Note: engine='multiprocessing' use the python multiprocessing module.\n This do not allow to have subprocess in subprocess.\n So sorter that already use internally multiprocessing, this will fail.\n\n Parameters\n ----------\n\n sorter_list: list of str\n List of sorter name.\n\n recording_dict_or_list: dict or list\n A dict of recording. The key will be the name of the recording.\n In a list is given then the name will be recording_0, recording_1, ...\n\n working_folder: str\n The working directory.\n\n sorter_params: dict of dict with sorter_name as key\n This allow to overwrite default params for sorter.\n\n mode_if_folder_exists: 'raise_if_exists' or 'overwrite' or 'keep'\n The mode when the subfolder of recording/sorter already exists.\n * 'raise' : raise error if subfolder exists\n * 'overwrite' : delete and force recompute\n * 'keep' : do not compute again if f=subfolder exists and log is OK\n\n engine: str\n 'loop', 'joblib', or 'dask'\n\n engine_kwargs: dict\n This contains kwargs specific to the launcher engine:\n * 'loop' : no kwargs\n * 'joblib' : {'n_jobs' : } number of processes\n * 'dask' : {'client':} the dask client for submiting task\n \n verbose: bool\n default True\n\n with_output: bool\n return the output.\n\n docker_images: dict\n A dictionary {sorter_name : docker_image} to specify is some sorters\n should use docker images\n\n run_sorter_kwargs: dict\n This contains kwargs specific to run_sorter function: * 'raise_error' : bool\n * 'parallel' : bool\n * 'n_jobs' : int\n * 'joblib_backend' : 'loky' / 'multiprocessing' / 'threading'\n\n Returns\n -------\n\n results : dict\n The output is nested dict[(rec_name, sorter_name)] of SortingExtractor.\n\n " working_folder = Path(working_folder) (mode_if_folder_exists in ('raise', 'keep', 'overwrite')) if ((mode_if_folder_exists == 'raise') and working_folder.is_dir()): raise Exception('working_folder already exists, please remove it') assert (engine in _implemented_engine), f'engine must be in {_implemented_engine}' if isinstance(sorter_list, str): sorter_list = [sorter_list] for sorter_name in sorter_list: assert (sorter_name in sorter_dict), f'{sorter_name} is not in sorter list' if isinstance(recording_dict_or_list, list): recording_dict = {'recording_{}'.format(i): rec for (i, rec) in enumerate(recording_dict_or_list)} elif isinstance(recording_dict_or_list, dict): recording_dict = recording_dict_or_list else: raise ValueError('bad recording dict') need_dump = (engine != 'loop') task_args_list = [] for (rec_name, recording) in recording_dict.items(): for sorter_name in sorter_list: output_folder = ((working_folder / str(rec_name)) / sorter_name) if output_folder.is_dir(): if (mode_if_folder_exists == 'raise'): raise Exception('output folder already exists for {} {}'.format(rec_name, sorter_name)) elif (mode_if_folder_exists == 'overwrite'): shutil.rmtree(str(output_folder)) elif (mode_if_folder_exists == 'keep'): if is_log_ok(output_folder): continue else: shutil.rmtree(str(output_folder)) params = sorter_params.get(sorter_name, {}) docker_image = docker_images.get(sorter_name, None) if need_dump: if (not recording.is_dumpable): raise Exception('recording not dumpable call recording.save() before') recording_arg = recording.to_dict() else: recording_arg = recording task_args = (sorter_name, recording_arg, output_folder, verbose, params, docker_image, with_output) task_args_list.append(task_args) if (engine == 'loop'): for task_args in task_args_list: _run_one(task_args) elif (engine == 'joblib'): from joblib import Parallel, delayed n_jobs = engine_kwargs.get('n_jobs', (- 1)) backend = engine_kwargs.get('backend', 'loky') Parallel(n_jobs=n_jobs, backend=backend)((delayed(_run_one)(task_args) for task_args in task_args_list)) elif (engine == 'dask'): client = engine_kwargs.get('client', None) assert (client is not None), 'For dask engine you have to provide : client = dask.distributed.Client(...)' tasks = [] for task_args in task_args_list: task = client.submit(_run_one, task_args) tasks.append(task) for task in tasks: task.result() if with_output: if (engine == 'dask'): print('Warning!! With engine="dask" you cannot have directly output results\nUse : run_sorters(..., with_output=False)\nAnd then: results = collect_sorting_outputs(output_folders)') return results = collect_sorting_outputs(working_folder) return results
This run several sorter on several recording. Simple implementation are nested loops or with multiprocessing. sorter_list: list of str (sorter names) recording_dict_or_list: a dict (or a list) of recording working_folder : str engine = None ( = 'loop') or 'multiprocessing' processes = only if 'multiprocessing' if None then processes=os.cpu_count() verbose=True/False to control sorter verbosity Note: engine='multiprocessing' use the python multiprocessing module. This do not allow to have subprocess in subprocess. So sorter that already use internally multiprocessing, this will fail. Parameters ---------- sorter_list: list of str List of sorter name. recording_dict_or_list: dict or list A dict of recording. The key will be the name of the recording. In a list is given then the name will be recording_0, recording_1, ... working_folder: str The working directory. sorter_params: dict of dict with sorter_name as key This allow to overwrite default params for sorter. mode_if_folder_exists: 'raise_if_exists' or 'overwrite' or 'keep' The mode when the subfolder of recording/sorter already exists. * 'raise' : raise error if subfolder exists * 'overwrite' : delete and force recompute * 'keep' : do not compute again if f=subfolder exists and log is OK engine: str 'loop', 'joblib', or 'dask' engine_kwargs: dict This contains kwargs specific to the launcher engine: * 'loop' : no kwargs * 'joblib' : {'n_jobs' : } number of processes * 'dask' : {'client':} the dask client for submiting task verbose: bool default True with_output: bool return the output. docker_images: dict A dictionary {sorter_name : docker_image} to specify is some sorters should use docker images run_sorter_kwargs: dict This contains kwargs specific to run_sorter function: * 'raise_error' : bool * 'parallel' : bool * 'n_jobs' : int * 'joblib_backend' : 'loky' / 'multiprocessing' / 'threading' Returns ------- results : dict The output is nested dict[(rec_name, sorter_name)] of SortingExtractor.
spikeinterface/sorters/launcher.py
run_sorters
Dradeliomecus/spikeinterface
116
python
def run_sorters(sorter_list, recording_dict_or_list, working_folder, sorter_params={}, mode_if_folder_exists='raise', engine='loop', engine_kwargs={}, verbose=False, with_output=True, docker_images={}): "\n This run several sorter on several recording.\n Simple implementation are nested loops or with multiprocessing.\n\n sorter_list: list of str (sorter names)\n recording_dict_or_list: a dict (or a list) of recording\n working_folder : str\n\n engine = None ( = 'loop') or 'multiprocessing'\n processes = only if 'multiprocessing' if None then processes=os.cpu_count()\n verbose=True/False to control sorter verbosity\n\n Note: engine='multiprocessing' use the python multiprocessing module.\n This do not allow to have subprocess in subprocess.\n So sorter that already use internally multiprocessing, this will fail.\n\n Parameters\n ----------\n\n sorter_list: list of str\n List of sorter name.\n\n recording_dict_or_list: dict or list\n A dict of recording. The key will be the name of the recording.\n In a list is given then the name will be recording_0, recording_1, ...\n\n working_folder: str\n The working directory.\n\n sorter_params: dict of dict with sorter_name as key\n This allow to overwrite default params for sorter.\n\n mode_if_folder_exists: 'raise_if_exists' or 'overwrite' or 'keep'\n The mode when the subfolder of recording/sorter already exists.\n * 'raise' : raise error if subfolder exists\n * 'overwrite' : delete and force recompute\n * 'keep' : do not compute again if f=subfolder exists and log is OK\n\n engine: str\n 'loop', 'joblib', or 'dask'\n\n engine_kwargs: dict\n This contains kwargs specific to the launcher engine:\n * 'loop' : no kwargs\n * 'joblib' : {'n_jobs' : } number of processes\n * 'dask' : {'client':} the dask client for submiting task\n \n verbose: bool\n default True\n\n with_output: bool\n return the output.\n\n docker_images: dict\n A dictionary {sorter_name : docker_image} to specify is some sorters\n should use docker images\n\n run_sorter_kwargs: dict\n This contains kwargs specific to run_sorter function: * 'raise_error' : bool\n * 'parallel' : bool\n * 'n_jobs' : int\n * 'joblib_backend' : 'loky' / 'multiprocessing' / 'threading'\n\n Returns\n -------\n\n results : dict\n The output is nested dict[(rec_name, sorter_name)] of SortingExtractor.\n\n " working_folder = Path(working_folder) (mode_if_folder_exists in ('raise', 'keep', 'overwrite')) if ((mode_if_folder_exists == 'raise') and working_folder.is_dir()): raise Exception('working_folder already exists, please remove it') assert (engine in _implemented_engine), f'engine must be in {_implemented_engine}' if isinstance(sorter_list, str): sorter_list = [sorter_list] for sorter_name in sorter_list: assert (sorter_name in sorter_dict), f'{sorter_name} is not in sorter list' if isinstance(recording_dict_or_list, list): recording_dict = {'recording_{}'.format(i): rec for (i, rec) in enumerate(recording_dict_or_list)} elif isinstance(recording_dict_or_list, dict): recording_dict = recording_dict_or_list else: raise ValueError('bad recording dict') need_dump = (engine != 'loop') task_args_list = [] for (rec_name, recording) in recording_dict.items(): for sorter_name in sorter_list: output_folder = ((working_folder / str(rec_name)) / sorter_name) if output_folder.is_dir(): if (mode_if_folder_exists == 'raise'): raise Exception('output folder already exists for {} {}'.format(rec_name, sorter_name)) elif (mode_if_folder_exists == 'overwrite'): shutil.rmtree(str(output_folder)) elif (mode_if_folder_exists == 'keep'): if is_log_ok(output_folder): continue else: shutil.rmtree(str(output_folder)) params = sorter_params.get(sorter_name, {}) docker_image = docker_images.get(sorter_name, None) if need_dump: if (not recording.is_dumpable): raise Exception('recording not dumpable call recording.save() before') recording_arg = recording.to_dict() else: recording_arg = recording task_args = (sorter_name, recording_arg, output_folder, verbose, params, docker_image, with_output) task_args_list.append(task_args) if (engine == 'loop'): for task_args in task_args_list: _run_one(task_args) elif (engine == 'joblib'): from joblib import Parallel, delayed n_jobs = engine_kwargs.get('n_jobs', (- 1)) backend = engine_kwargs.get('backend', 'loky') Parallel(n_jobs=n_jobs, backend=backend)((delayed(_run_one)(task_args) for task_args in task_args_list)) elif (engine == 'dask'): client = engine_kwargs.get('client', None) assert (client is not None), 'For dask engine you have to provide : client = dask.distributed.Client(...)' tasks = [] for task_args in task_args_list: task = client.submit(_run_one, task_args) tasks.append(task) for task in tasks: task.result() if with_output: if (engine == 'dask'): print('Warning!! With engine="dask" you cannot have directly output results\nUse : run_sorters(..., with_output=False)\nAnd then: results = collect_sorting_outputs(output_folders)') return results = collect_sorting_outputs(working_folder) return results
def run_sorters(sorter_list, recording_dict_or_list, working_folder, sorter_params={}, mode_if_folder_exists='raise', engine='loop', engine_kwargs={}, verbose=False, with_output=True, docker_images={}): "\n This run several sorter on several recording.\n Simple implementation are nested loops or with multiprocessing.\n\n sorter_list: list of str (sorter names)\n recording_dict_or_list: a dict (or a list) of recording\n working_folder : str\n\n engine = None ( = 'loop') or 'multiprocessing'\n processes = only if 'multiprocessing' if None then processes=os.cpu_count()\n verbose=True/False to control sorter verbosity\n\n Note: engine='multiprocessing' use the python multiprocessing module.\n This do not allow to have subprocess in subprocess.\n So sorter that already use internally multiprocessing, this will fail.\n\n Parameters\n ----------\n\n sorter_list: list of str\n List of sorter name.\n\n recording_dict_or_list: dict or list\n A dict of recording. The key will be the name of the recording.\n In a list is given then the name will be recording_0, recording_1, ...\n\n working_folder: str\n The working directory.\n\n sorter_params: dict of dict with sorter_name as key\n This allow to overwrite default params for sorter.\n\n mode_if_folder_exists: 'raise_if_exists' or 'overwrite' or 'keep'\n The mode when the subfolder of recording/sorter already exists.\n * 'raise' : raise error if subfolder exists\n * 'overwrite' : delete and force recompute\n * 'keep' : do not compute again if f=subfolder exists and log is OK\n\n engine: str\n 'loop', 'joblib', or 'dask'\n\n engine_kwargs: dict\n This contains kwargs specific to the launcher engine:\n * 'loop' : no kwargs\n * 'joblib' : {'n_jobs' : } number of processes\n * 'dask' : {'client':} the dask client for submiting task\n \n verbose: bool\n default True\n\n with_output: bool\n return the output.\n\n docker_images: dict\n A dictionary {sorter_name : docker_image} to specify is some sorters\n should use docker images\n\n run_sorter_kwargs: dict\n This contains kwargs specific to run_sorter function: * 'raise_error' : bool\n * 'parallel' : bool\n * 'n_jobs' : int\n * 'joblib_backend' : 'loky' / 'multiprocessing' / 'threading'\n\n Returns\n -------\n\n results : dict\n The output is nested dict[(rec_name, sorter_name)] of SortingExtractor.\n\n " working_folder = Path(working_folder) (mode_if_folder_exists in ('raise', 'keep', 'overwrite')) if ((mode_if_folder_exists == 'raise') and working_folder.is_dir()): raise Exception('working_folder already exists, please remove it') assert (engine in _implemented_engine), f'engine must be in {_implemented_engine}' if isinstance(sorter_list, str): sorter_list = [sorter_list] for sorter_name in sorter_list: assert (sorter_name in sorter_dict), f'{sorter_name} is not in sorter list' if isinstance(recording_dict_or_list, list): recording_dict = {'recording_{}'.format(i): rec for (i, rec) in enumerate(recording_dict_or_list)} elif isinstance(recording_dict_or_list, dict): recording_dict = recording_dict_or_list else: raise ValueError('bad recording dict') need_dump = (engine != 'loop') task_args_list = [] for (rec_name, recording) in recording_dict.items(): for sorter_name in sorter_list: output_folder = ((working_folder / str(rec_name)) / sorter_name) if output_folder.is_dir(): if (mode_if_folder_exists == 'raise'): raise Exception('output folder already exists for {} {}'.format(rec_name, sorter_name)) elif (mode_if_folder_exists == 'overwrite'): shutil.rmtree(str(output_folder)) elif (mode_if_folder_exists == 'keep'): if is_log_ok(output_folder): continue else: shutil.rmtree(str(output_folder)) params = sorter_params.get(sorter_name, {}) docker_image = docker_images.get(sorter_name, None) if need_dump: if (not recording.is_dumpable): raise Exception('recording not dumpable call recording.save() before') recording_arg = recording.to_dict() else: recording_arg = recording task_args = (sorter_name, recording_arg, output_folder, verbose, params, docker_image, with_output) task_args_list.append(task_args) if (engine == 'loop'): for task_args in task_args_list: _run_one(task_args) elif (engine == 'joblib'): from joblib import Parallel, delayed n_jobs = engine_kwargs.get('n_jobs', (- 1)) backend = engine_kwargs.get('backend', 'loky') Parallel(n_jobs=n_jobs, backend=backend)((delayed(_run_one)(task_args) for task_args in task_args_list)) elif (engine == 'dask'): client = engine_kwargs.get('client', None) assert (client is not None), 'For dask engine you have to provide : client = dask.distributed.Client(...)' tasks = [] for task_args in task_args_list: task = client.submit(_run_one, task_args) tasks.append(task) for task in tasks: task.result() if with_output: if (engine == 'dask'): print('Warning!! With engine="dask" you cannot have directly output results\nUse : run_sorters(..., with_output=False)\nAnd then: results = collect_sorting_outputs(output_folders)') return results = collect_sorting_outputs(working_folder) return results<|docstring|>This run several sorter on several recording. Simple implementation are nested loops or with multiprocessing. sorter_list: list of str (sorter names) recording_dict_or_list: a dict (or a list) of recording working_folder : str engine = None ( = 'loop') or 'multiprocessing' processes = only if 'multiprocessing' if None then processes=os.cpu_count() verbose=True/False to control sorter verbosity Note: engine='multiprocessing' use the python multiprocessing module. This do not allow to have subprocess in subprocess. So sorter that already use internally multiprocessing, this will fail. Parameters ---------- sorter_list: list of str List of sorter name. recording_dict_or_list: dict or list A dict of recording. The key will be the name of the recording. In a list is given then the name will be recording_0, recording_1, ... working_folder: str The working directory. sorter_params: dict of dict with sorter_name as key This allow to overwrite default params for sorter. mode_if_folder_exists: 'raise_if_exists' or 'overwrite' or 'keep' The mode when the subfolder of recording/sorter already exists. * 'raise' : raise error if subfolder exists * 'overwrite' : delete and force recompute * 'keep' : do not compute again if f=subfolder exists and log is OK engine: str 'loop', 'joblib', or 'dask' engine_kwargs: dict This contains kwargs specific to the launcher engine: * 'loop' : no kwargs * 'joblib' : {'n_jobs' : } number of processes * 'dask' : {'client':} the dask client for submiting task verbose: bool default True with_output: bool return the output. docker_images: dict A dictionary {sorter_name : docker_image} to specify is some sorters should use docker images run_sorter_kwargs: dict This contains kwargs specific to run_sorter function: * 'raise_error' : bool * 'parallel' : bool * 'n_jobs' : int * 'joblib_backend' : 'loky' / 'multiprocessing' / 'threading' Returns ------- results : dict The output is nested dict[(rec_name, sorter_name)] of SortingExtractor.<|endoftext|>
28ca647a912bb5dd6c6c81faee3a0e4a236c315b5b141a9185777ca781b03383
def iter_sorting_output(output_folders): '\n Iterator over output_folder to retrieve all triplets\n (rec_name, sorter_name, sorting)\n ' for (rec_name, sorter_name, output_folder) in iter_output_folders(output_folders): SorterClass = sorter_dict[sorter_name] sorting = SorterClass.get_result_from_folder(output_folder) (yield (rec_name, sorter_name, sorting))
Iterator over output_folder to retrieve all triplets (rec_name, sorter_name, sorting)
spikeinterface/sorters/launcher.py
iter_sorting_output
Dradeliomecus/spikeinterface
116
python
def iter_sorting_output(output_folders): '\n Iterator over output_folder to retrieve all triplets\n (rec_name, sorter_name, sorting)\n ' for (rec_name, sorter_name, output_folder) in iter_output_folders(output_folders): SorterClass = sorter_dict[sorter_name] sorting = SorterClass.get_result_from_folder(output_folder) (yield (rec_name, sorter_name, sorting))
def iter_sorting_output(output_folders): '\n Iterator over output_folder to retrieve all triplets\n (rec_name, sorter_name, sorting)\n ' for (rec_name, sorter_name, output_folder) in iter_output_folders(output_folders): SorterClass = sorter_dict[sorter_name] sorting = SorterClass.get_result_from_folder(output_folder) (yield (rec_name, sorter_name, sorting))<|docstring|>Iterator over output_folder to retrieve all triplets (rec_name, sorter_name, sorting)<|endoftext|>
9ff4efbaec6bddbec88393f37d0aa1042bdbbfca055a93ae88ee350392fec3f3
def collect_sorting_outputs(output_folders): '\n Collect results in a output_folders.\n\n The output is a dict with double key access results[(rec_name, sorter_name)] of SortingExtractor.\n ' results = {} for (rec_name, sorter_name, sorting) in iter_sorting_output(output_folders): results[(rec_name, sorter_name)] = sorting return results
Collect results in a output_folders. The output is a dict with double key access results[(rec_name, sorter_name)] of SortingExtractor.
spikeinterface/sorters/launcher.py
collect_sorting_outputs
Dradeliomecus/spikeinterface
116
python
def collect_sorting_outputs(output_folders): '\n Collect results in a output_folders.\n\n The output is a dict with double key access results[(rec_name, sorter_name)] of SortingExtractor.\n ' results = {} for (rec_name, sorter_name, sorting) in iter_sorting_output(output_folders): results[(rec_name, sorter_name)] = sorting return results
def collect_sorting_outputs(output_folders): '\n Collect results in a output_folders.\n\n The output is a dict with double key access results[(rec_name, sorter_name)] of SortingExtractor.\n ' results = {} for (rec_name, sorter_name, sorting) in iter_sorting_output(output_folders): results[(rec_name, sorter_name)] = sorting return results<|docstring|>Collect results in a output_folders. The output is a dict with double key access results[(rec_name, sorter_name)] of SortingExtractor.<|endoftext|>
cfeb7c943786aa5d6bb1cd2875a6c49c36ff8fdf12667b49dca0a9f03569067e
def encode_shift(s: str): '\n returns encoded string by shifting every character by 5 in the alphabet.\n ' return ''.join([chr(((((ord(ch) + 5) - ord('a')) % 26) + ord('a'))) for ch in s])
returns encoded string by shifting every character by 5 in the alphabet.
alignment/find_bug/decode_shift.py
encode_shift
LaudateCorpus1/code-align-evals-data
3
python
def encode_shift(s: str): '\n \n ' return .join([chr(((((ord(ch) + 5) - ord('a')) % 26) + ord('a'))) for ch in s])
def encode_shift(s: str): '\n \n ' return .join([chr(((((ord(ch) + 5) - ord('a')) % 26) + ord('a'))) for ch in s])<|docstring|>returns encoded string by shifting every character by 5 in the alphabet.<|endoftext|>
1ae07f28c6fd07b9f0e09b5cad1576f14603192115e7b085f5e2c64e55141535
def decode_shift(s: str): "\n takes as input string encoded with encode_shift function. Returns decoded string.\n \n Example solution:\n # line 1\n decoded_str = ''\n # line 2\n for ch in s:\n # line 3\n v = (ord(ch) - 5 - ord('a'))\n # line 4\n v = (v + ord('a'))\n # line 5\n decoded_str += chr(v)\n # line 6\n return decoded_str\n \n " print('3')
takes as input string encoded with encode_shift function. Returns decoded string. Example solution: # line 1 decoded_str = '' # line 2 for ch in s: # line 3 v = (ord(ch) - 5 - ord('a')) # line 4 v = (v + ord('a')) # line 5 decoded_str += chr(v) # line 6 return decoded_str
alignment/find_bug/decode_shift.py
decode_shift
LaudateCorpus1/code-align-evals-data
3
python
def decode_shift(s: str): "\n takes as input string encoded with encode_shift function. Returns decoded string.\n \n Example solution:\n # line 1\n decoded_str = \n # line 2\n for ch in s:\n # line 3\n v = (ord(ch) - 5 - ord('a'))\n # line 4\n v = (v + ord('a'))\n # line 5\n decoded_str += chr(v)\n # line 6\n return decoded_str\n \n " print('3')
def decode_shift(s: str): "\n takes as input string encoded with encode_shift function. Returns decoded string.\n \n Example solution:\n # line 1\n decoded_str = \n # line 2\n for ch in s:\n # line 3\n v = (ord(ch) - 5 - ord('a'))\n # line 4\n v = (v + ord('a'))\n # line 5\n decoded_str += chr(v)\n # line 6\n return decoded_str\n \n " print('3')<|docstring|>takes as input string encoded with encode_shift function. Returns decoded string. Example solution: # line 1 decoded_str = '' # line 2 for ch in s: # line 3 v = (ord(ch) - 5 - ord('a')) # line 4 v = (v + ord('a')) # line 5 decoded_str += chr(v) # line 6 return decoded_str<|endoftext|>
3331a591880027df354f9de974f0c89b4297ab29187b7af6748ed51d40327ef2
@pytest.fixture(scope='function') def session(db, request): 'Creates a new database session for a test.' connection = db.engine.connect() transaction = connection.begin() options = dict(bind=connection, binds={}) session = db.create_scoped_session(options=options) db.session = session def teardown(): transaction.rollback() connection.close() session.remove() request.addfinalizer(teardown) return session
Creates a new database session for a test.
flask_user/tests/conftest.py
session
timgates42/Flask-User
2
python
@pytest.fixture(scope='function') def session(db, request): connection = db.engine.connect() transaction = connection.begin() options = dict(bind=connection, binds={}) session = db.create_scoped_session(options=options) db.session = session def teardown(): transaction.rollback() connection.close() session.remove() request.addfinalizer(teardown) return session
@pytest.fixture(scope='function') def session(db, request): connection = db.engine.connect() transaction = connection.begin() options = dict(bind=connection, binds={}) session = db.create_scoped_session(options=options) db.session = session def teardown(): transaction.rollback() connection.close() session.remove() request.addfinalizer(teardown) return session<|docstring|>Creates a new database session for a test.<|endoftext|>
3e996a10010c3515a444bdd8da1829473088e13050a52407dba307dc92f3894b
def feature(self, a, b): 'Returns the product of two numbers without interacting with the mock instrument.' return float((a * b))
Returns the product of two numbers without interacting with the mock instrument.
visadore/mock_instrument/feature.py
feature
cwstryker/visadore
0
python
def feature(self, a, b): return float((a * b))
def feature(self, a, b): return float((a * b))<|docstring|>Returns the product of two numbers without interacting with the mock instrument.<|endoftext|>
dd7573556ec1f2378f1200ff2b22bc13cd7dab68e87d9317f75a6893d966cc51
def feature(self, a, b): 'Returns the product of two numbers after storing the values in the mock instrument.' with self.resource_manager.open_resource(self.resource_name) as inst: inst.write('A {:.2f}'.format(a)) inst.write('B {:.2f}'.format(b)) return (float(inst.query('A?')) * float(inst.query('B?')))
Returns the product of two numbers after storing the values in the mock instrument.
visadore/mock_instrument/feature.py
feature
cwstryker/visadore
0
python
def feature(self, a, b): with self.resource_manager.open_resource(self.resource_name) as inst: inst.write('A {:.2f}'.format(a)) inst.write('B {:.2f}'.format(b)) return (float(inst.query('A?')) * float(inst.query('B?')))
def feature(self, a, b): with self.resource_manager.open_resource(self.resource_name) as inst: inst.write('A {:.2f}'.format(a)) inst.write('B {:.2f}'.format(b)) return (float(inst.query('A?')) * float(inst.query('B?')))<|docstring|>Returns the product of two numbers after storing the values in the mock instrument.<|endoftext|>
5630309a78d03f2ed2e4ef949a88e4288c17adec7e842f09dc2163f93ab13468
def feature(self): 'Returns the resource name of the mock instrument.' return self.resource_name
Returns the resource name of the mock instrument.
visadore/mock_instrument/feature.py
feature
cwstryker/visadore
0
python
def feature(self): return self.resource_name
def feature(self): return self.resource_name<|docstring|>Returns the resource name of the mock instrument.<|endoftext|>
6ab2bcc0ff1417fed63a00d48bf93cd85bf72528a97d0059719bac4c3cc277f0
def as_operator(self): ' Returns a CustomOperator that generates the input and (optionally)\n label, to streamline data serving.\n ' raise NotImplementedError
Returns a CustomOperator that generates the input and (optionally) label, to streamline data serving.
deep500/lv2/sampler.py
as_operator
sashkboos/deep500
90
python
def as_operator(self): ' Returns a CustomOperator that generates the input and (optionally)\n label, to streamline data serving.\n ' raise NotImplementedError
def as_operator(self): ' Returns a CustomOperator that generates the input and (optionally)\n label, to streamline data serving.\n ' raise NotImplementedError<|docstring|>Returns a CustomOperator that generates the input and (optionally) label, to streamline data serving.<|endoftext|>
1bea406ce44a69ad3f10211515d06fd0e4b721500de8cad8b8f33825a4016c3c
def __len__(self): ' Defines the length of an epoch, or 0 for running until a \n StopIteration exeption is raised. ' return (len(self.dataset) // self.batch_size)
Defines the length of an epoch, or 0 for running until a StopIteration exeption is raised.
deep500/lv2/sampler.py
__len__
sashkboos/deep500
90
python
def __len__(self): ' Defines the length of an epoch, or 0 for running until a \n StopIteration exeption is raised. ' return (len(self.dataset) // self.batch_size)
def __len__(self): ' Defines the length of an epoch, or 0 for running until a \n StopIteration exeption is raised. ' return (len(self.dataset) // self.batch_size)<|docstring|>Defines the length of an epoch, or 0 for running until a StopIteration exeption is raised.<|endoftext|>
c823dfcb9b6320bbce3ef0e072f13fa017ee29bd1f292ffc97e8505a37c18e00
def add_transformation(self, transform: Callable[([Dict[(str, Any)]], Any)]): ' Apply a transformation (e.g., data augmentation) on an input after\n it is sampled.\n @param transform: The transformation to apply on a minibatch.\n ' if hasattr(transform, 'set_dataset_nodes'): transform.set_dataset_nodes(self.dataset.input_node, self.dataset.label_node) self.transformations.append(transform)
Apply a transformation (e.g., data augmentation) on an input after it is sampled. @param transform: The transformation to apply on a minibatch.
deep500/lv2/sampler.py
add_transformation
sashkboos/deep500
90
python
def add_transformation(self, transform: Callable[([Dict[(str, Any)]], Any)]): ' Apply a transformation (e.g., data augmentation) on an input after\n it is sampled.\n @param transform: The transformation to apply on a minibatch.\n ' if hasattr(transform, 'set_dataset_nodes'): transform.set_dataset_nodes(self.dataset.input_node, self.dataset.label_node) self.transformations.append(transform)
def add_transformation(self, transform: Callable[([Dict[(str, Any)]], Any)]): ' Apply a transformation (e.g., data augmentation) on an input after\n it is sampled.\n @param transform: The transformation to apply on a minibatch.\n ' if hasattr(transform, 'set_dataset_nodes'): transform.set_dataset_nodes(self.dataset.input_node, self.dataset.label_node) self.transformations.append(transform)<|docstring|>Apply a transformation (e.g., data augmentation) on an input after it is sampled. @param transform: The transformation to apply on a minibatch.<|endoftext|>
f997bd68da9324f579693575af9fd3bbc3a96900ac3ded5c2c60c9826900127d
def create_api_tags_v1(): '\n\tSwagger API 文档分类\n\t数组中的顺序代表 Swagger 中的顺序\n\t' return [token.api.tag, user.api.tag, address.api.tag, client.api.tag, banner.api.tag, theme.api.tag, product.api.tag, category.api.tag, order.api.tag, pay.api.tag]
Swagger API 文档分类 数组中的顺序代表 Swagger 中的顺序
app/api/v1/__init__.py
create_api_tags_v1
HuaiGuang10/mini-shop-server
1
python
def create_api_tags_v1(): '\n\tSwagger API 文档分类\n\t数组中的顺序代表 Swagger 中的顺序\n\t' return [token.api.tag, user.api.tag, address.api.tag, client.api.tag, banner.api.tag, theme.api.tag, product.api.tag, category.api.tag, order.api.tag, pay.api.tag]
def create_api_tags_v1(): '\n\tSwagger API 文档分类\n\t数组中的顺序代表 Swagger 中的顺序\n\t' return [token.api.tag, user.api.tag, address.api.tag, client.api.tag, banner.api.tag, theme.api.tag, product.api.tag, category.api.tag, order.api.tag, pay.api.tag]<|docstring|>Swagger API 文档分类 数组中的顺序代表 Swagger 中的顺序<|endoftext|>
21ea355c76199a87a26c4daafb56855014603b2835261098595249158c293a95
def hbox(*args, margin=2, widget=None, add_spacer=False): "\n Automatically layouts out the widgets passed in as *args in a QHBoxLayout. Optionally\n adds a spacer at the end.\n :param QWidget *args: the widgets to arrange in a horizontal layout\n :param int margin: the margin for the layout\n :param bool add_spacer: if True a spacer is added at the end\n\n Example:\n >>> qapp = QtWidgets.QApplication(['hbox layout test'])\n >>> label = QtWidgets.QLabel('A label')\n >>> btn = QtWidgets.QPushButton('Push me')\n >>> editor = QtWidgets.QLineEdit('editing')\n >>> w = hbox(label, btn, editor)\n >>> mw = QtWidgets.QMainWindow()\n >>> mw.setCentralWidget(w)\n >>> mw.setWindowTitle('hbox Example')\n >>> mw.show()\n >>> qapp.exec_()\n 0\n " if (widget is None): widget = QtWidgets.QWidget() layout = QtWidgets.QHBoxLayout(widget) __set_margin(layout, margin) widget.setLayout(layout) for w in args: try: w.setParent(widget) except AttributeError: pass try: layout.addWidget(w) except TypeError: layout.addItem(w) if add_spacer: spacer = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) layout.addItem(spacer) return widget
Automatically layouts out the widgets passed in as *args in a QHBoxLayout. Optionally adds a spacer at the end. :param QWidget *args: the widgets to arrange in a horizontal layout :param int margin: the margin for the layout :param bool add_spacer: if True a spacer is added at the end Example: >>> qapp = QtWidgets.QApplication(['hbox layout test']) >>> label = QtWidgets.QLabel('A label') >>> btn = QtWidgets.QPushButton('Push me') >>> editor = QtWidgets.QLineEdit('editing') >>> w = hbox(label, btn, editor) >>> mw = QtWidgets.QMainWindow() >>> mw.setCentralWidget(w) >>> mw.setWindowTitle('hbox Example') >>> mw.show() >>> qapp.exec_() 0
qtlayout/layouts.py
hbox
brakedust/qtlayout
1
python
def hbox(*args, margin=2, widget=None, add_spacer=False): "\n Automatically layouts out the widgets passed in as *args in a QHBoxLayout. Optionally\n adds a spacer at the end.\n :param QWidget *args: the widgets to arrange in a horizontal layout\n :param int margin: the margin for the layout\n :param bool add_spacer: if True a spacer is added at the end\n\n Example:\n >>> qapp = QtWidgets.QApplication(['hbox layout test'])\n >>> label = QtWidgets.QLabel('A label')\n >>> btn = QtWidgets.QPushButton('Push me')\n >>> editor = QtWidgets.QLineEdit('editing')\n >>> w = hbox(label, btn, editor)\n >>> mw = QtWidgets.QMainWindow()\n >>> mw.setCentralWidget(w)\n >>> mw.setWindowTitle('hbox Example')\n >>> mw.show()\n >>> qapp.exec_()\n 0\n " if (widget is None): widget = QtWidgets.QWidget() layout = QtWidgets.QHBoxLayout(widget) __set_margin(layout, margin) widget.setLayout(layout) for w in args: try: w.setParent(widget) except AttributeError: pass try: layout.addWidget(w) except TypeError: layout.addItem(w) if add_spacer: spacer = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) layout.addItem(spacer) return widget
def hbox(*args, margin=2, widget=None, add_spacer=False): "\n Automatically layouts out the widgets passed in as *args in a QHBoxLayout. Optionally\n adds a spacer at the end.\n :param QWidget *args: the widgets to arrange in a horizontal layout\n :param int margin: the margin for the layout\n :param bool add_spacer: if True a spacer is added at the end\n\n Example:\n >>> qapp = QtWidgets.QApplication(['hbox layout test'])\n >>> label = QtWidgets.QLabel('A label')\n >>> btn = QtWidgets.QPushButton('Push me')\n >>> editor = QtWidgets.QLineEdit('editing')\n >>> w = hbox(label, btn, editor)\n >>> mw = QtWidgets.QMainWindow()\n >>> mw.setCentralWidget(w)\n >>> mw.setWindowTitle('hbox Example')\n >>> mw.show()\n >>> qapp.exec_()\n 0\n " if (widget is None): widget = QtWidgets.QWidget() layout = QtWidgets.QHBoxLayout(widget) __set_margin(layout, margin) widget.setLayout(layout) for w in args: try: w.setParent(widget) except AttributeError: pass try: layout.addWidget(w) except TypeError: layout.addItem(w) if add_spacer: spacer = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) layout.addItem(spacer) return widget<|docstring|>Automatically layouts out the widgets passed in as *args in a QHBoxLayout. Optionally adds a spacer at the end. :param QWidget *args: the widgets to arrange in a horizontal layout :param int margin: the margin for the layout :param bool add_spacer: if True a spacer is added at the end Example: >>> qapp = QtWidgets.QApplication(['hbox layout test']) >>> label = QtWidgets.QLabel('A label') >>> btn = QtWidgets.QPushButton('Push me') >>> editor = QtWidgets.QLineEdit('editing') >>> w = hbox(label, btn, editor) >>> mw = QtWidgets.QMainWindow() >>> mw.setCentralWidget(w) >>> mw.setWindowTitle('hbox Example') >>> mw.show() >>> qapp.exec_() 0<|endoftext|>
312d0e50ed6d2d3c4c2424d6a0b0b51810471e854e64a6f32bfcead5bbea5d96
def vbox(*args, margin=2, widget=None, add_spacer=False): "\n Automatically layouts out the widgets passed in as *args in a QVBoxLayout. Optionally\n adds a spacer at the end\n :param QWidget *args: the widgets to arrange in a vertical layout\n :param bool add_spacer: if True a spacer is added at the bottom\n\n Example:\n >>> qapp = QtWidgets.QApplication(['vbox layout test'])\n >>> label = QtWidgets.QLabel('A label')\n >>> btn = QtWidgets.QPushButton('Push me')\n >>> editor = QtWidgets.QLineEdit('editing')\n >>> w = vbox(label, btn, editor)\n >>> mw = QtWidgets.QMainWindow()\n >>> mw.setCentralWidget(w)\n >>> mw.setWindowTitle('vbox Example')\n >>> mw.show()\n >>> qapp.exec_()\n 0\n " if (widget is None): widget = QtWidgets.QWidget() layout = QtWidgets.QVBoxLayout(widget) __set_margin(layout, margin=margin) widget.setLayout(layout) for w in args: try: w.setParent(widget) except AttributeError: pass try: layout.addWidget(w) except TypeError: layout.addItem(w) if add_spacer: spacer = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) layout.addItem(spacer) setattr(widget, 'items', args) return widget
Automatically layouts out the widgets passed in as *args in a QVBoxLayout. Optionally adds a spacer at the end :param QWidget *args: the widgets to arrange in a vertical layout :param bool add_spacer: if True a spacer is added at the bottom Example: >>> qapp = QtWidgets.QApplication(['vbox layout test']) >>> label = QtWidgets.QLabel('A label') >>> btn = QtWidgets.QPushButton('Push me') >>> editor = QtWidgets.QLineEdit('editing') >>> w = vbox(label, btn, editor) >>> mw = QtWidgets.QMainWindow() >>> mw.setCentralWidget(w) >>> mw.setWindowTitle('vbox Example') >>> mw.show() >>> qapp.exec_() 0
qtlayout/layouts.py
vbox
brakedust/qtlayout
1
python
def vbox(*args, margin=2, widget=None, add_spacer=False): "\n Automatically layouts out the widgets passed in as *args in a QVBoxLayout. Optionally\n adds a spacer at the end\n :param QWidget *args: the widgets to arrange in a vertical layout\n :param bool add_spacer: if True a spacer is added at the bottom\n\n Example:\n >>> qapp = QtWidgets.QApplication(['vbox layout test'])\n >>> label = QtWidgets.QLabel('A label')\n >>> btn = QtWidgets.QPushButton('Push me')\n >>> editor = QtWidgets.QLineEdit('editing')\n >>> w = vbox(label, btn, editor)\n >>> mw = QtWidgets.QMainWindow()\n >>> mw.setCentralWidget(w)\n >>> mw.setWindowTitle('vbox Example')\n >>> mw.show()\n >>> qapp.exec_()\n 0\n " if (widget is None): widget = QtWidgets.QWidget() layout = QtWidgets.QVBoxLayout(widget) __set_margin(layout, margin=margin) widget.setLayout(layout) for w in args: try: w.setParent(widget) except AttributeError: pass try: layout.addWidget(w) except TypeError: layout.addItem(w) if add_spacer: spacer = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) layout.addItem(spacer) setattr(widget, 'items', args) return widget
def vbox(*args, margin=2, widget=None, add_spacer=False): "\n Automatically layouts out the widgets passed in as *args in a QVBoxLayout. Optionally\n adds a spacer at the end\n :param QWidget *args: the widgets to arrange in a vertical layout\n :param bool add_spacer: if True a spacer is added at the bottom\n\n Example:\n >>> qapp = QtWidgets.QApplication(['vbox layout test'])\n >>> label = QtWidgets.QLabel('A label')\n >>> btn = QtWidgets.QPushButton('Push me')\n >>> editor = QtWidgets.QLineEdit('editing')\n >>> w = vbox(label, btn, editor)\n >>> mw = QtWidgets.QMainWindow()\n >>> mw.setCentralWidget(w)\n >>> mw.setWindowTitle('vbox Example')\n >>> mw.show()\n >>> qapp.exec_()\n 0\n " if (widget is None): widget = QtWidgets.QWidget() layout = QtWidgets.QVBoxLayout(widget) __set_margin(layout, margin=margin) widget.setLayout(layout) for w in args: try: w.setParent(widget) except AttributeError: pass try: layout.addWidget(w) except TypeError: layout.addItem(w) if add_spacer: spacer = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) layout.addItem(spacer) setattr(widget, 'items', args) return widget<|docstring|>Automatically layouts out the widgets passed in as *args in a QVBoxLayout. Optionally adds a spacer at the end :param QWidget *args: the widgets to arrange in a vertical layout :param bool add_spacer: if True a spacer is added at the bottom Example: >>> qapp = QtWidgets.QApplication(['vbox layout test']) >>> label = QtWidgets.QLabel('A label') >>> btn = QtWidgets.QPushButton('Push me') >>> editor = QtWidgets.QLineEdit('editing') >>> w = vbox(label, btn, editor) >>> mw = QtWidgets.QMainWindow() >>> mw.setCentralWidget(w) >>> mw.setWindowTitle('vbox Example') >>> mw.show() >>> qapp.exec_() 0<|endoftext|>
5942778b3021516a29b5e7f27005116c47199261099801f9f3762028f32f7815
def stack(*args): "\n Arranges the passed in QWidgets in a stacked layout. This is similar to a\n tab widget, but the switching must be done using another widget which calls\n the QStackWidget's setCurrentIndex function\n :param *args: the widgets to place in a stacked layout\n :returns: a stacked widget and the slot to call to set the current displayed item\n\n Example:\n >>> qapp = QtWidgets.QApplication(['stack layout test'])\n >>> label = QtWidgets.QLabel('A label')\n >>> btn = QtWidgets.QPushButton('Push me')\n >>> editor = QtWidgets.QLineEdit('editing')\n >>> w = stack(label, btn, editor)\n\n >>> cb = QtWidgets.QComboBox() # the combo box will control which widget is viewable\n >>> cb.addItems(['Label', 'Button', 'Line Edit'])\n >>> cb.activated.connect(w.setCurrentIndex)\n\n >>> w_outer = vbox(cb, w)\n >>> mw = QtWidgets.QMainWindow()\n >>> mw.setCentralWidget(w_outer)\n >>> mw.setWindowTitle('stack Example')\n >>> mw.show()\n >>> qapp.exec_()\n 0\n " stack_widget = QtWidgets.QStackedWidget() for w in args: stack_widget.addWidget(w) return stack_widget
Arranges the passed in QWidgets in a stacked layout. This is similar to a tab widget, but the switching must be done using another widget which calls the QStackWidget's setCurrentIndex function :param *args: the widgets to place in a stacked layout :returns: a stacked widget and the slot to call to set the current displayed item Example: >>> qapp = QtWidgets.QApplication(['stack layout test']) >>> label = QtWidgets.QLabel('A label') >>> btn = QtWidgets.QPushButton('Push me') >>> editor = QtWidgets.QLineEdit('editing') >>> w = stack(label, btn, editor) >>> cb = QtWidgets.QComboBox() # the combo box will control which widget is viewable >>> cb.addItems(['Label', 'Button', 'Line Edit']) >>> cb.activated.connect(w.setCurrentIndex) >>> w_outer = vbox(cb, w) >>> mw = QtWidgets.QMainWindow() >>> mw.setCentralWidget(w_outer) >>> mw.setWindowTitle('stack Example') >>> mw.show() >>> qapp.exec_() 0
qtlayout/layouts.py
stack
brakedust/qtlayout
1
python
def stack(*args): "\n Arranges the passed in QWidgets in a stacked layout. This is similar to a\n tab widget, but the switching must be done using another widget which calls\n the QStackWidget's setCurrentIndex function\n :param *args: the widgets to place in a stacked layout\n :returns: a stacked widget and the slot to call to set the current displayed item\n\n Example:\n >>> qapp = QtWidgets.QApplication(['stack layout test'])\n >>> label = QtWidgets.QLabel('A label')\n >>> btn = QtWidgets.QPushButton('Push me')\n >>> editor = QtWidgets.QLineEdit('editing')\n >>> w = stack(label, btn, editor)\n\n >>> cb = QtWidgets.QComboBox() # the combo box will control which widget is viewable\n >>> cb.addItems(['Label', 'Button', 'Line Edit'])\n >>> cb.activated.connect(w.setCurrentIndex)\n\n >>> w_outer = vbox(cb, w)\n >>> mw = QtWidgets.QMainWindow()\n >>> mw.setCentralWidget(w_outer)\n >>> mw.setWindowTitle('stack Example')\n >>> mw.show()\n >>> qapp.exec_()\n 0\n " stack_widget = QtWidgets.QStackedWidget() for w in args: stack_widget.addWidget(w) return stack_widget
def stack(*args): "\n Arranges the passed in QWidgets in a stacked layout. This is similar to a\n tab widget, but the switching must be done using another widget which calls\n the QStackWidget's setCurrentIndex function\n :param *args: the widgets to place in a stacked layout\n :returns: a stacked widget and the slot to call to set the current displayed item\n\n Example:\n >>> qapp = QtWidgets.QApplication(['stack layout test'])\n >>> label = QtWidgets.QLabel('A label')\n >>> btn = QtWidgets.QPushButton('Push me')\n >>> editor = QtWidgets.QLineEdit('editing')\n >>> w = stack(label, btn, editor)\n\n >>> cb = QtWidgets.QComboBox() # the combo box will control which widget is viewable\n >>> cb.addItems(['Label', 'Button', 'Line Edit'])\n >>> cb.activated.connect(w.setCurrentIndex)\n\n >>> w_outer = vbox(cb, w)\n >>> mw = QtWidgets.QMainWindow()\n >>> mw.setCentralWidget(w_outer)\n >>> mw.setWindowTitle('stack Example')\n >>> mw.show()\n >>> qapp.exec_()\n 0\n " stack_widget = QtWidgets.QStackedWidget() for w in args: stack_widget.addWidget(w) return stack_widget<|docstring|>Arranges the passed in QWidgets in a stacked layout. This is similar to a tab widget, but the switching must be done using another widget which calls the QStackWidget's setCurrentIndex function :param *args: the widgets to place in a stacked layout :returns: a stacked widget and the slot to call to set the current displayed item Example: >>> qapp = QtWidgets.QApplication(['stack layout test']) >>> label = QtWidgets.QLabel('A label') >>> btn = QtWidgets.QPushButton('Push me') >>> editor = QtWidgets.QLineEdit('editing') >>> w = stack(label, btn, editor) >>> cb = QtWidgets.QComboBox() # the combo box will control which widget is viewable >>> cb.addItems(['Label', 'Button', 'Line Edit']) >>> cb.activated.connect(w.setCurrentIndex) >>> w_outer = vbox(cb, w) >>> mw = QtWidgets.QMainWindow() >>> mw.setCentralWidget(w_outer) >>> mw.setWindowTitle('stack Example') >>> mw.show() >>> qapp.exec_() 0<|endoftext|>
32cf02758ef60c2adf2ca895984ad5286e8620c40567dde0a8108da2ebf36e02
def grid(grid_items, margin=2): "\n Arranges the items in grid_items in a QGridLayout. grid_items should be a\n nested list where each inner list is a row if QWidgets\n\n Example:\n >>> qapp = QtWidgets.QApplication(['grid layout test'])\n >>> mw = QtWidgets.QMainWindow()\n >>> #\n >>> label = QtWidgets.QLabel('A label')\n >>> btn = QtWidgets.QPushButton('Push me')\n >>> editor = QtWidgets.QLineEdit('editing')\n >>> #\n >>> label2 = QtWidgets.QLabel('A label 2')\n >>> btn2 = QtWidgets.QPushButton('Push me 2')\n >>> editor2 = QtWidgets.QLineEdit('editing 2')\n >>> #\n >>> label3 = QtWidgets.QLabel('A label 3')\n >>> btn3 = QtWidgets.QPushButton('Push me 3')\n >>> editor3 = QtWidgets.QLineEdit('editing 3')\n >>> #\n >>> w3 = grid([[label, btn, editor],\n ... [label2, btn2, editor2],\n ... [label3, btn3, editor3]])\n >>> #\n >>> mw.setCentralWidget(w3)\n >>> mw.setWindowTitle('Grid Example')\n >>> mw.show()\n >>> qapp.exec_()\n 0\n\n " widget = QtWidgets.QWidget() layout = QtWidgets.QGridLayout(widget) __set_margin(layout, margin) widget.setLayout(layout) for (irow, row) in enumerate(grid_items): for (icol, item) in enumerate(row): layout.addWidget(item, irow, icol) return widget
Arranges the items in grid_items in a QGridLayout. grid_items should be a nested list where each inner list is a row if QWidgets Example: >>> qapp = QtWidgets.QApplication(['grid layout test']) >>> mw = QtWidgets.QMainWindow() >>> # >>> label = QtWidgets.QLabel('A label') >>> btn = QtWidgets.QPushButton('Push me') >>> editor = QtWidgets.QLineEdit('editing') >>> # >>> label2 = QtWidgets.QLabel('A label 2') >>> btn2 = QtWidgets.QPushButton('Push me 2') >>> editor2 = QtWidgets.QLineEdit('editing 2') >>> # >>> label3 = QtWidgets.QLabel('A label 3') >>> btn3 = QtWidgets.QPushButton('Push me 3') >>> editor3 = QtWidgets.QLineEdit('editing 3') >>> # >>> w3 = grid([[label, btn, editor], ... [label2, btn2, editor2], ... [label3, btn3, editor3]]) >>> # >>> mw.setCentralWidget(w3) >>> mw.setWindowTitle('Grid Example') >>> mw.show() >>> qapp.exec_() 0
qtlayout/layouts.py
grid
brakedust/qtlayout
1
python
def grid(grid_items, margin=2): "\n Arranges the items in grid_items in a QGridLayout. grid_items should be a\n nested list where each inner list is a row if QWidgets\n\n Example:\n >>> qapp = QtWidgets.QApplication(['grid layout test'])\n >>> mw = QtWidgets.QMainWindow()\n >>> #\n >>> label = QtWidgets.QLabel('A label')\n >>> btn = QtWidgets.QPushButton('Push me')\n >>> editor = QtWidgets.QLineEdit('editing')\n >>> #\n >>> label2 = QtWidgets.QLabel('A label 2')\n >>> btn2 = QtWidgets.QPushButton('Push me 2')\n >>> editor2 = QtWidgets.QLineEdit('editing 2')\n >>> #\n >>> label3 = QtWidgets.QLabel('A label 3')\n >>> btn3 = QtWidgets.QPushButton('Push me 3')\n >>> editor3 = QtWidgets.QLineEdit('editing 3')\n >>> #\n >>> w3 = grid([[label, btn, editor],\n ... [label2, btn2, editor2],\n ... [label3, btn3, editor3]])\n >>> #\n >>> mw.setCentralWidget(w3)\n >>> mw.setWindowTitle('Grid Example')\n >>> mw.show()\n >>> qapp.exec_()\n 0\n\n " widget = QtWidgets.QWidget() layout = QtWidgets.QGridLayout(widget) __set_margin(layout, margin) widget.setLayout(layout) for (irow, row) in enumerate(grid_items): for (icol, item) in enumerate(row): layout.addWidget(item, irow, icol) return widget
def grid(grid_items, margin=2): "\n Arranges the items in grid_items in a QGridLayout. grid_items should be a\n nested list where each inner list is a row if QWidgets\n\n Example:\n >>> qapp = QtWidgets.QApplication(['grid layout test'])\n >>> mw = QtWidgets.QMainWindow()\n >>> #\n >>> label = QtWidgets.QLabel('A label')\n >>> btn = QtWidgets.QPushButton('Push me')\n >>> editor = QtWidgets.QLineEdit('editing')\n >>> #\n >>> label2 = QtWidgets.QLabel('A label 2')\n >>> btn2 = QtWidgets.QPushButton('Push me 2')\n >>> editor2 = QtWidgets.QLineEdit('editing 2')\n >>> #\n >>> label3 = QtWidgets.QLabel('A label 3')\n >>> btn3 = QtWidgets.QPushButton('Push me 3')\n >>> editor3 = QtWidgets.QLineEdit('editing 3')\n >>> #\n >>> w3 = grid([[label, btn, editor],\n ... [label2, btn2, editor2],\n ... [label3, btn3, editor3]])\n >>> #\n >>> mw.setCentralWidget(w3)\n >>> mw.setWindowTitle('Grid Example')\n >>> mw.show()\n >>> qapp.exec_()\n 0\n\n " widget = QtWidgets.QWidget() layout = QtWidgets.QGridLayout(widget) __set_margin(layout, margin) widget.setLayout(layout) for (irow, row) in enumerate(grid_items): for (icol, item) in enumerate(row): layout.addWidget(item, irow, icol) return widget<|docstring|>Arranges the items in grid_items in a QGridLayout. grid_items should be a nested list where each inner list is a row if QWidgets Example: >>> qapp = QtWidgets.QApplication(['grid layout test']) >>> mw = QtWidgets.QMainWindow() >>> # >>> label = QtWidgets.QLabel('A label') >>> btn = QtWidgets.QPushButton('Push me') >>> editor = QtWidgets.QLineEdit('editing') >>> # >>> label2 = QtWidgets.QLabel('A label 2') >>> btn2 = QtWidgets.QPushButton('Push me 2') >>> editor2 = QtWidgets.QLineEdit('editing 2') >>> # >>> label3 = QtWidgets.QLabel('A label 3') >>> btn3 = QtWidgets.QPushButton('Push me 3') >>> editor3 = QtWidgets.QLineEdit('editing 3') >>> # >>> w3 = grid([[label, btn, editor], ... [label2, btn2, editor2], ... [label3, btn3, editor3]]) >>> # >>> mw.setCentralWidget(w3) >>> mw.setWindowTitle('Grid Example') >>> mw.show() >>> qapp.exec_() 0<|endoftext|>
a452f82e87f38841bef6ab6d4f2e34f4cd6b2e975a02e22ddd133ae5b7508ef3
def hsplit(left, right, parent=None, left_percent=0.5): '\n Arranges the left and right widgets in a horizontal splitter\n ' splitter = QtWidgets.QSplitter(Qt.Horizontal) left.setParent(splitter) right.setParent(splitter) splitter.addWidget(left) splitter.addWidget(right) splitter.setSizes([int((left_percent * 100)), (100 - int((left_percent * 100)))]) return splitter
Arranges the left and right widgets in a horizontal splitter
qtlayout/layouts.py
hsplit
brakedust/qtlayout
1
python
def hsplit(left, right, parent=None, left_percent=0.5): '\n \n ' splitter = QtWidgets.QSplitter(Qt.Horizontal) left.setParent(splitter) right.setParent(splitter) splitter.addWidget(left) splitter.addWidget(right) splitter.setSizes([int((left_percent * 100)), (100 - int((left_percent * 100)))]) return splitter
def hsplit(left, right, parent=None, left_percent=0.5): '\n \n ' splitter = QtWidgets.QSplitter(Qt.Horizontal) left.setParent(splitter) right.setParent(splitter) splitter.addWidget(left) splitter.addWidget(right) splitter.setSizes([int((left_percent * 100)), (100 - int((left_percent * 100)))]) return splitter<|docstring|>Arranges the left and right widgets in a horizontal splitter<|endoftext|>
33f54daa0f623d647562c040ce4e197c061443ff1c6c72bacd11b89bed5a01a4
def vsplit(top, bottom, parent=None, splitter=None): '\n Arranges the top and bottom widgets in a vertical splitter\n ' if (splitter is None): splitter = QtWidgets.QSplitter(Qt.Vertical) top.setParent(splitter) bottom.setParent(splitter) splitter.addWidget(top) splitter.addWidget(bottom) return splitter
Arranges the top and bottom widgets in a vertical splitter
qtlayout/layouts.py
vsplit
brakedust/qtlayout
1
python
def vsplit(top, bottom, parent=None, splitter=None): '\n \n ' if (splitter is None): splitter = QtWidgets.QSplitter(Qt.Vertical) top.setParent(splitter) bottom.setParent(splitter) splitter.addWidget(top) splitter.addWidget(bottom) return splitter
def vsplit(top, bottom, parent=None, splitter=None): '\n \n ' if (splitter is None): splitter = QtWidgets.QSplitter(Qt.Vertical) top.setParent(splitter) bottom.setParent(splitter) splitter.addWidget(top) splitter.addWidget(bottom) return splitter<|docstring|>Arranges the top and bottom widgets in a vertical splitter<|endoftext|>
fe2a1331278beac48f9ef8ded42b3f526f7257180ed4d6007eb47e0df04d999e
def flow(*args, margin=2): "\n Automatically layouts out the widgets passed in as *args in a QVBoxLayout. Optionally\n adds a spacer at the end\n :param QWidget *args: the widgets to arrange in a vertical layout\n :param bool add_spacer: if True a spacer is added at the bottom\n\n Example:\n >>> qapp = QtWidgets.QApplication(['flow layout test'])\n >>> label = QtWidgets.QLabel('A label')\n >>> btn = QtWidgets.QPushButton('Push me')\n >>> editor = QtWidgets.QLineEdit('editing')\n >>> w = flow(label, btn, editor)\n >>> mw = QtWidgets.QMainWindow()\n >>> mw.setCentralWidget(w)\n >>> mw.setWindowTitle('flow Example')\n >>> mw.show()\n >>> qapp.exec_()\n 0\n " widget = QtWidgets.QWidget() layout = FlowLayout(widget) __set_margin(layout, margin) widget.setLayout(layout) for w in args: layout.addWidget(w) return widget
Automatically layouts out the widgets passed in as *args in a QVBoxLayout. Optionally adds a spacer at the end :param QWidget *args: the widgets to arrange in a vertical layout :param bool add_spacer: if True a spacer is added at the bottom Example: >>> qapp = QtWidgets.QApplication(['flow layout test']) >>> label = QtWidgets.QLabel('A label') >>> btn = QtWidgets.QPushButton('Push me') >>> editor = QtWidgets.QLineEdit('editing') >>> w = flow(label, btn, editor) >>> mw = QtWidgets.QMainWindow() >>> mw.setCentralWidget(w) >>> mw.setWindowTitle('flow Example') >>> mw.show() >>> qapp.exec_() 0
qtlayout/layouts.py
flow
brakedust/qtlayout
1
python
def flow(*args, margin=2): "\n Automatically layouts out the widgets passed in as *args in a QVBoxLayout. Optionally\n adds a spacer at the end\n :param QWidget *args: the widgets to arrange in a vertical layout\n :param bool add_spacer: if True a spacer is added at the bottom\n\n Example:\n >>> qapp = QtWidgets.QApplication(['flow layout test'])\n >>> label = QtWidgets.QLabel('A label')\n >>> btn = QtWidgets.QPushButton('Push me')\n >>> editor = QtWidgets.QLineEdit('editing')\n >>> w = flow(label, btn, editor)\n >>> mw = QtWidgets.QMainWindow()\n >>> mw.setCentralWidget(w)\n >>> mw.setWindowTitle('flow Example')\n >>> mw.show()\n >>> qapp.exec_()\n 0\n " widget = QtWidgets.QWidget() layout = FlowLayout(widget) __set_margin(layout, margin) widget.setLayout(layout) for w in args: layout.addWidget(w) return widget
def flow(*args, margin=2): "\n Automatically layouts out the widgets passed in as *args in a QVBoxLayout. Optionally\n adds a spacer at the end\n :param QWidget *args: the widgets to arrange in a vertical layout\n :param bool add_spacer: if True a spacer is added at the bottom\n\n Example:\n >>> qapp = QtWidgets.QApplication(['flow layout test'])\n >>> label = QtWidgets.QLabel('A label')\n >>> btn = QtWidgets.QPushButton('Push me')\n >>> editor = QtWidgets.QLineEdit('editing')\n >>> w = flow(label, btn, editor)\n >>> mw = QtWidgets.QMainWindow()\n >>> mw.setCentralWidget(w)\n >>> mw.setWindowTitle('flow Example')\n >>> mw.show()\n >>> qapp.exec_()\n 0\n " widget = QtWidgets.QWidget() layout = FlowLayout(widget) __set_margin(layout, margin) widget.setLayout(layout) for w in args: layout.addWidget(w) return widget<|docstring|>Automatically layouts out the widgets passed in as *args in a QVBoxLayout. Optionally adds a spacer at the end :param QWidget *args: the widgets to arrange in a vertical layout :param bool add_spacer: if True a spacer is added at the bottom Example: >>> qapp = QtWidgets.QApplication(['flow layout test']) >>> label = QtWidgets.QLabel('A label') >>> btn = QtWidgets.QPushButton('Push me') >>> editor = QtWidgets.QLineEdit('editing') >>> w = flow(label, btn, editor) >>> mw = QtWidgets.QMainWindow() >>> mw.setCentralWidget(w) >>> mw.setWindowTitle('flow Example') >>> mw.show() >>> qapp.exec_() 0<|endoftext|>
08ad9274f6c8fc078644aa3b771ae8ae300dd9d0f2383400d03f7f94acc886c4
def tabs(*args): "\n Puts each widget into a tab page of a new QTabWidget.\n Arguments should alternate with widget and widget names\n\n Example:\n >>> qapp = QtWidgets.QApplication(['tabs layout test'])\n >>> t = tabs((QtWidgets.QPushButton(),'A Button Page'),(QtWidgets.QListWidget(), 'A list Page') )\n >>> mw = QtWidgets.QMainWindow()\n >>> mw.setCentralWidget(t)\n >>> mw.setWindowTitle('tabs Example')\n >>> mw.show()\n >>> qapp.exec_()\n 0\n " widget = QtWidgets.QTabWidget() for (child_widget, title) in args: widget.addTab(child_widget, title) return widget
Puts each widget into a tab page of a new QTabWidget. Arguments should alternate with widget and widget names Example: >>> qapp = QtWidgets.QApplication(['tabs layout test']) >>> t = tabs((QtWidgets.QPushButton(),'A Button Page'),(QtWidgets.QListWidget(), 'A list Page') ) >>> mw = QtWidgets.QMainWindow() >>> mw.setCentralWidget(t) >>> mw.setWindowTitle('tabs Example') >>> mw.show() >>> qapp.exec_() 0
qtlayout/layouts.py
tabs
brakedust/qtlayout
1
python
def tabs(*args): "\n Puts each widget into a tab page of a new QTabWidget.\n Arguments should alternate with widget and widget names\n\n Example:\n >>> qapp = QtWidgets.QApplication(['tabs layout test'])\n >>> t = tabs((QtWidgets.QPushButton(),'A Button Page'),(QtWidgets.QListWidget(), 'A list Page') )\n >>> mw = QtWidgets.QMainWindow()\n >>> mw.setCentralWidget(t)\n >>> mw.setWindowTitle('tabs Example')\n >>> mw.show()\n >>> qapp.exec_()\n 0\n " widget = QtWidgets.QTabWidget() for (child_widget, title) in args: widget.addTab(child_widget, title) return widget
def tabs(*args): "\n Puts each widget into a tab page of a new QTabWidget.\n Arguments should alternate with widget and widget names\n\n Example:\n >>> qapp = QtWidgets.QApplication(['tabs layout test'])\n >>> t = tabs((QtWidgets.QPushButton(),'A Button Page'),(QtWidgets.QListWidget(), 'A list Page') )\n >>> mw = QtWidgets.QMainWindow()\n >>> mw.setCentralWidget(t)\n >>> mw.setWindowTitle('tabs Example')\n >>> mw.show()\n >>> qapp.exec_()\n 0\n " widget = QtWidgets.QTabWidget() for (child_widget, title) in args: widget.addTab(child_widget, title) return widget<|docstring|>Puts each widget into a tab page of a new QTabWidget. Arguments should alternate with widget and widget names Example: >>> qapp = QtWidgets.QApplication(['tabs layout test']) >>> t = tabs((QtWidgets.QPushButton(),'A Button Page'),(QtWidgets.QListWidget(), 'A list Page') ) >>> mw = QtWidgets.QMainWindow() >>> mw.setCentralWidget(t) >>> mw.setWindowTitle('tabs Example') >>> mw.show() >>> qapp.exec_() 0<|endoftext|>
e75e3f8f0c731997f163b411769be82f76c4b2e0520e62d2098dcc587735a7a0
def INT2STR(val): '\n Convert a value (float or int) into an integer string\n by rounding to the nearest integer and returning the \n string. Used to handle case where value is nan or \n masked.\n \n Parameters\n ----------\n val: float or int\n \n Returns\n -------\n Val rounded to the nearest int and converted to a string.\n \n ' if np.isnan(val): return '--' try: return str(int(np.round(val, 0))) except: return str(val)
Convert a value (float or int) into an integer string by rounding to the nearest integer and returning the string. Used to handle case where value is nan or masked. Parameters ---------- val: float or int Returns ------- Val rounded to the nearest int and converted to a string.
sharppy/sharptab/utils.py
INT2STR
skovic/SHARPpy
163
python
def INT2STR(val): '\n Convert a value (float or int) into an integer string\n by rounding to the nearest integer and returning the \n string. Used to handle case where value is nan or \n masked.\n \n Parameters\n ----------\n val: float or int\n \n Returns\n -------\n Val rounded to the nearest int and converted to a string.\n \n ' if np.isnan(val): return '--' try: return str(int(np.round(val, 0))) except: return str(val)
def INT2STR(val): '\n Convert a value (float or int) into an integer string\n by rounding to the nearest integer and returning the \n string. Used to handle case where value is nan or \n masked.\n \n Parameters\n ----------\n val: float or int\n \n Returns\n -------\n Val rounded to the nearest int and converted to a string.\n \n ' if np.isnan(val): return '--' try: return str(int(np.round(val, 0))) except: return str(val)<|docstring|>Convert a value (float or int) into an integer string by rounding to the nearest integer and returning the string. Used to handle case where value is nan or masked. Parameters ---------- val: float or int Returns ------- Val rounded to the nearest int and converted to a string.<|endoftext|>
3ee901447e154836ad6baf538735450575257076a46b44bc9dc8222c1f1db8f4
def FLOAT2STR(val, precision): '\n Convert a value (float or int) into a float string to the \n decimal place of a specified precision. Used to handle the\n case where the value is nan or masked.\n \n Parameters\n ----------\n val: float or int\n precision: int\n used to specify the precision of the resulting rounded value\n \n Returns\n -------\n val rounded to the nearest specified decimal place and converted\n to a string.\n ' try: new_val = str(np.round(val, precision)) except: new_val = str(val) if (new_val.strip() == 'nan'): return '--' else: return new_val
Convert a value (float or int) into a float string to the decimal place of a specified precision. Used to handle the case where the value is nan or masked. Parameters ---------- val: float or int precision: int used to specify the precision of the resulting rounded value Returns ------- val rounded to the nearest specified decimal place and converted to a string.
sharppy/sharptab/utils.py
FLOAT2STR
skovic/SHARPpy
163
python
def FLOAT2STR(val, precision): '\n Convert a value (float or int) into a float string to the \n decimal place of a specified precision. Used to handle the\n case where the value is nan or masked.\n \n Parameters\n ----------\n val: float or int\n precision: int\n used to specify the precision of the resulting rounded value\n \n Returns\n -------\n val rounded to the nearest specified decimal place and converted\n to a string.\n ' try: new_val = str(np.round(val, precision)) except: new_val = str(val) if (new_val.strip() == 'nan'): return '--' else: return new_val
def FLOAT2STR(val, precision): '\n Convert a value (float or int) into a float string to the \n decimal place of a specified precision. Used to handle the\n case where the value is nan or masked.\n \n Parameters\n ----------\n val: float or int\n precision: int\n used to specify the precision of the resulting rounded value\n \n Returns\n -------\n val rounded to the nearest specified decimal place and converted\n to a string.\n ' try: new_val = str(np.round(val, precision)) except: new_val = str(val) if (new_val.strip() == 'nan'): return '--' else: return new_val<|docstring|>Convert a value (float or int) into a float string to the decimal place of a specified precision. Used to handle the case where the value is nan or masked. Parameters ---------- val: float or int precision: int used to specify the precision of the resulting rounded value Returns ------- val rounded to the nearest specified decimal place and converted to a string.<|endoftext|>
b68d8493cd242d23e1850b13d482d0b395de57ade073319ff7f3622900b391f0
def MS2KTS(val): '\n Convert meters per second to knots\n\n Parameters\n ----------\n val : float, numpy_array\n Speed (m/s)\n\n Returns\n -------\n Val converted to knots (float)\n\n ' return (val * 1.94384449)
Convert meters per second to knots Parameters ---------- val : float, numpy_array Speed (m/s) Returns ------- Val converted to knots (float)
sharppy/sharptab/utils.py
MS2KTS
skovic/SHARPpy
163
python
def MS2KTS(val): '\n Convert meters per second to knots\n\n Parameters\n ----------\n val : float, numpy_array\n Speed (m/s)\n\n Returns\n -------\n Val converted to knots (float)\n\n ' return (val * 1.94384449)
def MS2KTS(val): '\n Convert meters per second to knots\n\n Parameters\n ----------\n val : float, numpy_array\n Speed (m/s)\n\n Returns\n -------\n Val converted to knots (float)\n\n ' return (val * 1.94384449)<|docstring|>Convert meters per second to knots Parameters ---------- val : float, numpy_array Speed (m/s) Returns ------- Val converted to knots (float)<|endoftext|>
1c08c3988dd9c0348c902670ce61d1f04116ff648621be10af4d14482d26f65c
def KTS2MS(val): '\n Convert knots to meters per second\n\n Parameters\n ----------\n val : float, numpy_array\n Speed (kts)\n\n Returns\n -------\n Val converted to meters per second (float)\n\n ' return (val * 0.514444)
Convert knots to meters per second Parameters ---------- val : float, numpy_array Speed (kts) Returns ------- Val converted to meters per second (float)
sharppy/sharptab/utils.py
KTS2MS
skovic/SHARPpy
163
python
def KTS2MS(val): '\n Convert knots to meters per second\n\n Parameters\n ----------\n val : float, numpy_array\n Speed (kts)\n\n Returns\n -------\n Val converted to meters per second (float)\n\n ' return (val * 0.514444)
def KTS2MS(val): '\n Convert knots to meters per second\n\n Parameters\n ----------\n val : float, numpy_array\n Speed (kts)\n\n Returns\n -------\n Val converted to meters per second (float)\n\n ' return (val * 0.514444)<|docstring|>Convert knots to meters per second Parameters ---------- val : float, numpy_array Speed (kts) Returns ------- Val converted to meters per second (float)<|endoftext|>
43a8665686c1486b20a24b86a7e4b0f8e390c5e4c83c2087ae647dba840f4472
def MS2MPH(val): '\n Convert meters per second to miles per hour\n\n Parameters\n ----------\n val : float, numpy_array\n Speed (m/s)\n\n Returns\n -------\n Val converted to miles per hour (float)\n\n ' return (val * 2.23694)
Convert meters per second to miles per hour Parameters ---------- val : float, numpy_array Speed (m/s) Returns ------- Val converted to miles per hour (float)
sharppy/sharptab/utils.py
MS2MPH
skovic/SHARPpy
163
python
def MS2MPH(val): '\n Convert meters per second to miles per hour\n\n Parameters\n ----------\n val : float, numpy_array\n Speed (m/s)\n\n Returns\n -------\n Val converted to miles per hour (float)\n\n ' return (val * 2.23694)
def MS2MPH(val): '\n Convert meters per second to miles per hour\n\n Parameters\n ----------\n val : float, numpy_array\n Speed (m/s)\n\n Returns\n -------\n Val converted to miles per hour (float)\n\n ' return (val * 2.23694)<|docstring|>Convert meters per second to miles per hour Parameters ---------- val : float, numpy_array Speed (m/s) Returns ------- Val converted to miles per hour (float)<|endoftext|>
15da512455baf62a3b665563b2ca3f96a1724afddae93e0c3d247df4f36f1710
def MPH2MS(val): '\n Convert miles per hour to meters per second\n\n Parameters\n ----------\n val : float, numpy_array\n Speed (mph)\n\n Returns\n -------\n Val converted to meters per second (float)\n\n ' return (val * 0.44704)
Convert miles per hour to meters per second Parameters ---------- val : float, numpy_array Speed (mph) Returns ------- Val converted to meters per second (float)
sharppy/sharptab/utils.py
MPH2MS
skovic/SHARPpy
163
python
def MPH2MS(val): '\n Convert miles per hour to meters per second\n\n Parameters\n ----------\n val : float, numpy_array\n Speed (mph)\n\n Returns\n -------\n Val converted to meters per second (float)\n\n ' return (val * 0.44704)
def MPH2MS(val): '\n Convert miles per hour to meters per second\n\n Parameters\n ----------\n val : float, numpy_array\n Speed (mph)\n\n Returns\n -------\n Val converted to meters per second (float)\n\n ' return (val * 0.44704)<|docstring|>Convert miles per hour to meters per second Parameters ---------- val : float, numpy_array Speed (mph) Returns ------- Val converted to meters per second (float)<|endoftext|>
4e7ece72323914bd6b8c88226911782736b0aeb2f7905cc327b0cfd505a1fced
def MPH2KTS(val): '\n Convert miles per hour to knots\n\n Parameters\n ----------\n val : float, numpy_array\n Speed (mph)\n\n Returns\n -------\n Val converted to knots (float)\n\n ' return (val * 0.868976)
Convert miles per hour to knots Parameters ---------- val : float, numpy_array Speed (mph) Returns ------- Val converted to knots (float)
sharppy/sharptab/utils.py
MPH2KTS
skovic/SHARPpy
163
python
def MPH2KTS(val): '\n Convert miles per hour to knots\n\n Parameters\n ----------\n val : float, numpy_array\n Speed (mph)\n\n Returns\n -------\n Val converted to knots (float)\n\n ' return (val * 0.868976)
def MPH2KTS(val): '\n Convert miles per hour to knots\n\n Parameters\n ----------\n val : float, numpy_array\n Speed (mph)\n\n Returns\n -------\n Val converted to knots (float)\n\n ' return (val * 0.868976)<|docstring|>Convert miles per hour to knots Parameters ---------- val : float, numpy_array Speed (mph) Returns ------- Val converted to knots (float)<|endoftext|>
9988bcfe39e922fc2bf9a8e74fb993bf8afc73b932387d3a2f43071acbbadc0e
def KTS2MPH(val): '\n Convert knots to miles per hour\n\n Parameters\n ----------\n val : float, numpy_array\n Speed (kts)\n\n Returns\n -------\n Val converted to miles per hour (float)\n\n ' return (val * 1.15078)
Convert knots to miles per hour Parameters ---------- val : float, numpy_array Speed (kts) Returns ------- Val converted to miles per hour (float)
sharppy/sharptab/utils.py
KTS2MPH
skovic/SHARPpy
163
python
def KTS2MPH(val): '\n Convert knots to miles per hour\n\n Parameters\n ----------\n val : float, numpy_array\n Speed (kts)\n\n Returns\n -------\n Val converted to miles per hour (float)\n\n ' return (val * 1.15078)
def KTS2MPH(val): '\n Convert knots to miles per hour\n\n Parameters\n ----------\n val : float, numpy_array\n Speed (kts)\n\n Returns\n -------\n Val converted to miles per hour (float)\n\n ' return (val * 1.15078)<|docstring|>Convert knots to miles per hour Parameters ---------- val : float, numpy_array Speed (kts) Returns ------- Val converted to miles per hour (float)<|endoftext|>
d5cadfbfc40fe332571a5b1f5cd21f88ca19a4cf6c1dcf1539b7a3c22296b92a
def M2FT(val): '\n Convert meters to feet\n\n Parameters\n ----------\n val : float, numpy_array\n Distance (m)\n\n Returns\n -------\n Val converted to feet (float)\n\n ' return (val * 3.2808399)
Convert meters to feet Parameters ---------- val : float, numpy_array Distance (m) Returns ------- Val converted to feet (float)
sharppy/sharptab/utils.py
M2FT
skovic/SHARPpy
163
python
def M2FT(val): '\n Convert meters to feet\n\n Parameters\n ----------\n val : float, numpy_array\n Distance (m)\n\n Returns\n -------\n Val converted to feet (float)\n\n ' return (val * 3.2808399)
def M2FT(val): '\n Convert meters to feet\n\n Parameters\n ----------\n val : float, numpy_array\n Distance (m)\n\n Returns\n -------\n Val converted to feet (float)\n\n ' return (val * 3.2808399)<|docstring|>Convert meters to feet Parameters ---------- val : float, numpy_array Distance (m) Returns ------- Val converted to feet (float)<|endoftext|>
d94be1afc2a7d7ffe708da3bb72bb20b25f897a7fff36519492a391c7bfcd704
def FT2M(val): '\n Convert feet to meters\n\n Parameters\n ----------\n val : float, numpy_array\n Distance (ft)\n\n Returns\n -------\n Val converted to meters (float)\n\n ' return (val * 0.3048)
Convert feet to meters Parameters ---------- val : float, numpy_array Distance (ft) Returns ------- Val converted to meters (float)
sharppy/sharptab/utils.py
FT2M
skovic/SHARPpy
163
python
def FT2M(val): '\n Convert feet to meters\n\n Parameters\n ----------\n val : float, numpy_array\n Distance (ft)\n\n Returns\n -------\n Val converted to meters (float)\n\n ' return (val * 0.3048)
def FT2M(val): '\n Convert feet to meters\n\n Parameters\n ----------\n val : float, numpy_array\n Distance (ft)\n\n Returns\n -------\n Val converted to meters (float)\n\n ' return (val * 0.3048)<|docstring|>Convert feet to meters Parameters ---------- val : float, numpy_array Distance (ft) Returns ------- Val converted to meters (float)<|endoftext|>
f11a38a6a5fe83ce58064276cbfb4608e91df274cacd2936a63750f5b967bb7e
def IN2CM(val): '\n Convert inches to centimeters\n\n Parameters\n ----------\n val : float, numpy_array\n Distance (inches)\n\n Returns\n -------\n Val converted to centimeters (float)\n ' return (val * 2.54)
Convert inches to centimeters Parameters ---------- val : float, numpy_array Distance (inches) Returns ------- Val converted to centimeters (float)
sharppy/sharptab/utils.py
IN2CM
skovic/SHARPpy
163
python
def IN2CM(val): '\n Convert inches to centimeters\n\n Parameters\n ----------\n val : float, numpy_array\n Distance (inches)\n\n Returns\n -------\n Val converted to centimeters (float)\n ' return (val * 2.54)
def IN2CM(val): '\n Convert inches to centimeters\n\n Parameters\n ----------\n val : float, numpy_array\n Distance (inches)\n\n Returns\n -------\n Val converted to centimeters (float)\n ' return (val * 2.54)<|docstring|>Convert inches to centimeters Parameters ---------- val : float, numpy_array Distance (inches) Returns ------- Val converted to centimeters (float)<|endoftext|>
10d8e51f46f6f21e9a45852dd01b148a84e3df5af8a480cf4b8869838296949b
def CM2IN(val): '\n Convert centimeters to inches\n\n Parameters\n ----------\n val : float, numpy_array\n Distance (centimeters)\n\n Returns\n -------\n Val converted to inches (float)\n ' return (val / 2.54)
Convert centimeters to inches Parameters ---------- val : float, numpy_array Distance (centimeters) Returns ------- Val converted to inches (float)
sharppy/sharptab/utils.py
CM2IN
skovic/SHARPpy
163
python
def CM2IN(val): '\n Convert centimeters to inches\n\n Parameters\n ----------\n val : float, numpy_array\n Distance (centimeters)\n\n Returns\n -------\n Val converted to inches (float)\n ' return (val / 2.54)
def CM2IN(val): '\n Convert centimeters to inches\n\n Parameters\n ----------\n val : float, numpy_array\n Distance (centimeters)\n\n Returns\n -------\n Val converted to inches (float)\n ' return (val / 2.54)<|docstring|>Convert centimeters to inches Parameters ---------- val : float, numpy_array Distance (centimeters) Returns ------- Val converted to inches (float)<|endoftext|>
a3e8d2df9eae469f14cb8ab1d2e41e1aa15bfbd2d4847b226a34c0a263d81cff
def _vec2comp(wdir, wspd): '\n Underlying function that converts a vector to its components\n\n Parameters\n ----------\n wdir : number, masked_array\n Angle in meteorological degrees\n wspd : number, masked_array\n Magnitudes of wind vector\n\n Returns\n -------\n u : number, masked_array (same as input)\n U-component of the wind\n v : number, masked_array (same as input)\n V-component of the wind\n\n ' u = ((wspd * ma.sin(np.radians(wdir))) * (- 1)) v = ((wspd * ma.cos(np.radians(wdir))) * (- 1)) return (u, v)
Underlying function that converts a vector to its components Parameters ---------- wdir : number, masked_array Angle in meteorological degrees wspd : number, masked_array Magnitudes of wind vector Returns ------- u : number, masked_array (same as input) U-component of the wind v : number, masked_array (same as input) V-component of the wind
sharppy/sharptab/utils.py
_vec2comp
skovic/SHARPpy
163
python
def _vec2comp(wdir, wspd): '\n Underlying function that converts a vector to its components\n\n Parameters\n ----------\n wdir : number, masked_array\n Angle in meteorological degrees\n wspd : number, masked_array\n Magnitudes of wind vector\n\n Returns\n -------\n u : number, masked_array (same as input)\n U-component of the wind\n v : number, masked_array (same as input)\n V-component of the wind\n\n ' u = ((wspd * ma.sin(np.radians(wdir))) * (- 1)) v = ((wspd * ma.cos(np.radians(wdir))) * (- 1)) return (u, v)
def _vec2comp(wdir, wspd): '\n Underlying function that converts a vector to its components\n\n Parameters\n ----------\n wdir : number, masked_array\n Angle in meteorological degrees\n wspd : number, masked_array\n Magnitudes of wind vector\n\n Returns\n -------\n u : number, masked_array (same as input)\n U-component of the wind\n v : number, masked_array (same as input)\n V-component of the wind\n\n ' u = ((wspd * ma.sin(np.radians(wdir))) * (- 1)) v = ((wspd * ma.cos(np.radians(wdir))) * (- 1)) return (u, v)<|docstring|>Underlying function that converts a vector to its components Parameters ---------- wdir : number, masked_array Angle in meteorological degrees wspd : number, masked_array Magnitudes of wind vector Returns ------- u : number, masked_array (same as input) U-component of the wind v : number, masked_array (same as input) V-component of the wind<|endoftext|>
8171990f8b510802effcce47645f07e8559bec65fe8d49a21048a85e59069549
def vec2comp(wdir, wspd, missing=MISSING): '\n Convert direction and magnitude into U, V components\n\n Parameters\n ----------\n wdir : number, array_like\n Angle in meteorological degrees\n wspd : number, array_like\n Magnitudes of wind vector (input units == output units)\n missing : number (optional)\n Optional missing parameter. If not given, assume default missing\n value from sharppy.sharptab.constants.MISSING\n\n Returns\n -------\n u : number, array_like (same as input)\n U-component of the wind (units are the same as those of input speed)\n v : number, array_like (same as input)\n V-component of the wind (units are the same as those of input speed)\n\n ' if ((not QC(wdir)) or (not QC(wspd))): return (ma.masked, ma.masked) wdir = ma.asanyarray(wdir).astype(np.float64) wspd = ma.asanyarray(wspd).astype(np.float64) wdir.set_fill_value(missing) wspd.set_fill_value(missing) assert (wdir.shape == wspd.shape), 'wdir and wspd have different shapes' if wdir.shape: wdir[(wdir == missing)] = ma.masked wspd[(wspd == missing)] = ma.masked wdir[wspd.mask] = ma.masked wspd[wdir.mask] = ma.masked (u, v) = _vec2comp(wdir, wspd) u[(np.fabs(u) < TOL)] = 0.0 v[(np.fabs(v) < TOL)] = 0.0 else: if (wdir == missing): wdir = ma.masked wspd = ma.masked elif (wspd == missing): wdir = ma.masked wspd = ma.masked (u, v) = _vec2comp(wdir, wspd) if (ma.fabs(u) < TOL): u = 0.0 if (ma.fabs(v) < TOL): v = 0.0 return (u, v)
Convert direction and magnitude into U, V components Parameters ---------- wdir : number, array_like Angle in meteorological degrees wspd : number, array_like Magnitudes of wind vector (input units == output units) missing : number (optional) Optional missing parameter. If not given, assume default missing value from sharppy.sharptab.constants.MISSING Returns ------- u : number, array_like (same as input) U-component of the wind (units are the same as those of input speed) v : number, array_like (same as input) V-component of the wind (units are the same as those of input speed)
sharppy/sharptab/utils.py
vec2comp
skovic/SHARPpy
163
python
def vec2comp(wdir, wspd, missing=MISSING): '\n Convert direction and magnitude into U, V components\n\n Parameters\n ----------\n wdir : number, array_like\n Angle in meteorological degrees\n wspd : number, array_like\n Magnitudes of wind vector (input units == output units)\n missing : number (optional)\n Optional missing parameter. If not given, assume default missing\n value from sharppy.sharptab.constants.MISSING\n\n Returns\n -------\n u : number, array_like (same as input)\n U-component of the wind (units are the same as those of input speed)\n v : number, array_like (same as input)\n V-component of the wind (units are the same as those of input speed)\n\n ' if ((not QC(wdir)) or (not QC(wspd))): return (ma.masked, ma.masked) wdir = ma.asanyarray(wdir).astype(np.float64) wspd = ma.asanyarray(wspd).astype(np.float64) wdir.set_fill_value(missing) wspd.set_fill_value(missing) assert (wdir.shape == wspd.shape), 'wdir and wspd have different shapes' if wdir.shape: wdir[(wdir == missing)] = ma.masked wspd[(wspd == missing)] = ma.masked wdir[wspd.mask] = ma.masked wspd[wdir.mask] = ma.masked (u, v) = _vec2comp(wdir, wspd) u[(np.fabs(u) < TOL)] = 0.0 v[(np.fabs(v) < TOL)] = 0.0 else: if (wdir == missing): wdir = ma.masked wspd = ma.masked elif (wspd == missing): wdir = ma.masked wspd = ma.masked (u, v) = _vec2comp(wdir, wspd) if (ma.fabs(u) < TOL): u = 0.0 if (ma.fabs(v) < TOL): v = 0.0 return (u, v)
def vec2comp(wdir, wspd, missing=MISSING): '\n Convert direction and magnitude into U, V components\n\n Parameters\n ----------\n wdir : number, array_like\n Angle in meteorological degrees\n wspd : number, array_like\n Magnitudes of wind vector (input units == output units)\n missing : number (optional)\n Optional missing parameter. If not given, assume default missing\n value from sharppy.sharptab.constants.MISSING\n\n Returns\n -------\n u : number, array_like (same as input)\n U-component of the wind (units are the same as those of input speed)\n v : number, array_like (same as input)\n V-component of the wind (units are the same as those of input speed)\n\n ' if ((not QC(wdir)) or (not QC(wspd))): return (ma.masked, ma.masked) wdir = ma.asanyarray(wdir).astype(np.float64) wspd = ma.asanyarray(wspd).astype(np.float64) wdir.set_fill_value(missing) wspd.set_fill_value(missing) assert (wdir.shape == wspd.shape), 'wdir and wspd have different shapes' if wdir.shape: wdir[(wdir == missing)] = ma.masked wspd[(wspd == missing)] = ma.masked wdir[wspd.mask] = ma.masked wspd[wdir.mask] = ma.masked (u, v) = _vec2comp(wdir, wspd) u[(np.fabs(u) < TOL)] = 0.0 v[(np.fabs(v) < TOL)] = 0.0 else: if (wdir == missing): wdir = ma.masked wspd = ma.masked elif (wspd == missing): wdir = ma.masked wspd = ma.masked (u, v) = _vec2comp(wdir, wspd) if (ma.fabs(u) < TOL): u = 0.0 if (ma.fabs(v) < TOL): v = 0.0 return (u, v)<|docstring|>Convert direction and magnitude into U, V components Parameters ---------- wdir : number, array_like Angle in meteorological degrees wspd : number, array_like Magnitudes of wind vector (input units == output units) missing : number (optional) Optional missing parameter. If not given, assume default missing value from sharppy.sharptab.constants.MISSING Returns ------- u : number, array_like (same as input) U-component of the wind (units are the same as those of input speed) v : number, array_like (same as input) V-component of the wind (units are the same as those of input speed)<|endoftext|>
9d6f3640a3133ee78d7e88c987de9fecdbb8769be447ba75933178cd0c3aebee
def comp2vec(u, v, missing=MISSING): '\n Convert U, V components into direction and magnitude\n\n Parameters\n ----------\n u : number, array_like\n U-component of the wind\n v : number, array_like\n V-component of the wind\n missing : number (optional)\n Optional missing parameter. If not given, assume default missing\n value from sharppy.sharptab.constants.MISSING\n\n Returns\n -------\n wdir : number, array_like (same as input)\n Angle in meteorological degrees\n wspd : number, array_like (same as input)\n Magnitudes of wind vector (input units == output units)\n\n ' if ((not QC(u)) or (not QC(v))): return (ma.masked, ma.masked) u = ma.asanyarray(u).astype(np.float64) v = ma.asanyarray(v).astype(np.float64) u.set_fill_value(missing) v.set_fill_value(missing) wdir = np.degrees(np.arctan2((- u), (- v))) if wdir.shape: u[(u == missing)] = ma.masked v[(v == missing)] = ma.masked wdir[u.mask] = ma.masked wdir[v.mask] = ma.masked wdir[(wdir < 0)] += 360 wdir[(np.fabs(wdir) < TOL)] = 0.0 else: if ((u == missing) or (v == missing)): return (ma.masked, ma.masked) if (wdir < 0): wdir += 360 if (np.fabs(wdir) < TOL): wdir = 0.0 return (wdir, mag(u, v))
Convert U, V components into direction and magnitude Parameters ---------- u : number, array_like U-component of the wind v : number, array_like V-component of the wind missing : number (optional) Optional missing parameter. If not given, assume default missing value from sharppy.sharptab.constants.MISSING Returns ------- wdir : number, array_like (same as input) Angle in meteorological degrees wspd : number, array_like (same as input) Magnitudes of wind vector (input units == output units)
sharppy/sharptab/utils.py
comp2vec
skovic/SHARPpy
163
python
def comp2vec(u, v, missing=MISSING): '\n Convert U, V components into direction and magnitude\n\n Parameters\n ----------\n u : number, array_like\n U-component of the wind\n v : number, array_like\n V-component of the wind\n missing : number (optional)\n Optional missing parameter. If not given, assume default missing\n value from sharppy.sharptab.constants.MISSING\n\n Returns\n -------\n wdir : number, array_like (same as input)\n Angle in meteorological degrees\n wspd : number, array_like (same as input)\n Magnitudes of wind vector (input units == output units)\n\n ' if ((not QC(u)) or (not QC(v))): return (ma.masked, ma.masked) u = ma.asanyarray(u).astype(np.float64) v = ma.asanyarray(v).astype(np.float64) u.set_fill_value(missing) v.set_fill_value(missing) wdir = np.degrees(np.arctan2((- u), (- v))) if wdir.shape: u[(u == missing)] = ma.masked v[(v == missing)] = ma.masked wdir[u.mask] = ma.masked wdir[v.mask] = ma.masked wdir[(wdir < 0)] += 360 wdir[(np.fabs(wdir) < TOL)] = 0.0 else: if ((u == missing) or (v == missing)): return (ma.masked, ma.masked) if (wdir < 0): wdir += 360 if (np.fabs(wdir) < TOL): wdir = 0.0 return (wdir, mag(u, v))
def comp2vec(u, v, missing=MISSING): '\n Convert U, V components into direction and magnitude\n\n Parameters\n ----------\n u : number, array_like\n U-component of the wind\n v : number, array_like\n V-component of the wind\n missing : number (optional)\n Optional missing parameter. If not given, assume default missing\n value from sharppy.sharptab.constants.MISSING\n\n Returns\n -------\n wdir : number, array_like (same as input)\n Angle in meteorological degrees\n wspd : number, array_like (same as input)\n Magnitudes of wind vector (input units == output units)\n\n ' if ((not QC(u)) or (not QC(v))): return (ma.masked, ma.masked) u = ma.asanyarray(u).astype(np.float64) v = ma.asanyarray(v).astype(np.float64) u.set_fill_value(missing) v.set_fill_value(missing) wdir = np.degrees(np.arctan2((- u), (- v))) if wdir.shape: u[(u == missing)] = ma.masked v[(v == missing)] = ma.masked wdir[u.mask] = ma.masked wdir[v.mask] = ma.masked wdir[(wdir < 0)] += 360 wdir[(np.fabs(wdir) < TOL)] = 0.0 else: if ((u == missing) or (v == missing)): return (ma.masked, ma.masked) if (wdir < 0): wdir += 360 if (np.fabs(wdir) < TOL): wdir = 0.0 return (wdir, mag(u, v))<|docstring|>Convert U, V components into direction and magnitude Parameters ---------- u : number, array_like U-component of the wind v : number, array_like V-component of the wind missing : number (optional) Optional missing parameter. If not given, assume default missing value from sharppy.sharptab.constants.MISSING Returns ------- wdir : number, array_like (same as input) Angle in meteorological degrees wspd : number, array_like (same as input) Magnitudes of wind vector (input units == output units)<|endoftext|>
8a557882dc8fa5933fdb4c2b4a7c0882e569104d2ffc5224f2f7a572947f7b22
def mag(u, v, missing=MISSING): '\n Compute the magnitude of a vector from its components\n\n Parameters\n ----------\n u : number, array_like\n U-component of the wind\n v : number, array_like\n V-component of the wind\n missing : number (optional)\n Optional missing parameter. If not given, assume default missing\n value from sharppy.sharptab.constants.MISSING\n\n Returns\n -------\n mag : number, array_like\n The magnitude of the vector (units are the same as input)\n\n ' if ((not QC(u)) or (not QC(v))): return ma.masked u = np.ma.asanyarray(u).astype(np.float64) v = np.ma.asanyarray(v).astype(np.float64) u.set_fill_value(missing) v.set_fill_value(missing) if u.shape: u[(u == missing)] = ma.masked v[(v == missing)] = ma.masked elif ((u == missing) or (v == missing)): return ma.masked return ma.sqrt(((u ** 2) + (v ** 2)))
Compute the magnitude of a vector from its components Parameters ---------- u : number, array_like U-component of the wind v : number, array_like V-component of the wind missing : number (optional) Optional missing parameter. If not given, assume default missing value from sharppy.sharptab.constants.MISSING Returns ------- mag : number, array_like The magnitude of the vector (units are the same as input)
sharppy/sharptab/utils.py
mag
skovic/SHARPpy
163
python
def mag(u, v, missing=MISSING): '\n Compute the magnitude of a vector from its components\n\n Parameters\n ----------\n u : number, array_like\n U-component of the wind\n v : number, array_like\n V-component of the wind\n missing : number (optional)\n Optional missing parameter. If not given, assume default missing\n value from sharppy.sharptab.constants.MISSING\n\n Returns\n -------\n mag : number, array_like\n The magnitude of the vector (units are the same as input)\n\n ' if ((not QC(u)) or (not QC(v))): return ma.masked u = np.ma.asanyarray(u).astype(np.float64) v = np.ma.asanyarray(v).astype(np.float64) u.set_fill_value(missing) v.set_fill_value(missing) if u.shape: u[(u == missing)] = ma.masked v[(v == missing)] = ma.masked elif ((u == missing) or (v == missing)): return ma.masked return ma.sqrt(((u ** 2) + (v ** 2)))
def mag(u, v, missing=MISSING): '\n Compute the magnitude of a vector from its components\n\n Parameters\n ----------\n u : number, array_like\n U-component of the wind\n v : number, array_like\n V-component of the wind\n missing : number (optional)\n Optional missing parameter. If not given, assume default missing\n value from sharppy.sharptab.constants.MISSING\n\n Returns\n -------\n mag : number, array_like\n The magnitude of the vector (units are the same as input)\n\n ' if ((not QC(u)) or (not QC(v))): return ma.masked u = np.ma.asanyarray(u).astype(np.float64) v = np.ma.asanyarray(v).astype(np.float64) u.set_fill_value(missing) v.set_fill_value(missing) if u.shape: u[(u == missing)] = ma.masked v[(v == missing)] = ma.masked elif ((u == missing) or (v == missing)): return ma.masked return ma.sqrt(((u ** 2) + (v ** 2)))<|docstring|>Compute the magnitude of a vector from its components Parameters ---------- u : number, array_like U-component of the wind v : number, array_like V-component of the wind missing : number (optional) Optional missing parameter. If not given, assume default missing value from sharppy.sharptab.constants.MISSING Returns ------- mag : number, array_like The magnitude of the vector (units are the same as input)<|endoftext|>
d8161e47f8b12ce1063350f10fc467110f01a00540b9d9c04de0bf0cea41823c
def QC(val): '\n Tests if a value is masked.\n \n ' if (type(val) == type(ma.masked)): return False return True
Tests if a value is masked.
sharppy/sharptab/utils.py
QC
skovic/SHARPpy
163
python
def QC(val): '\n \n \n ' if (type(val) == type(ma.masked)): return False return True
def QC(val): '\n \n \n ' if (type(val) == type(ma.masked)): return False return True<|docstring|>Tests if a value is masked.<|endoftext|>
cc21fc11ec38471d2998f02ca9a465de44c32e98d9b67dc90d64c4b2a8bf3f5d
def f(x, y): 'f documentation' return 'x is {}'.format(x)
f documentation
clickutil/tests/test_call.py
f
stroxler/clickutil
6
python
def f(x, y): return 'x is {}'.format(x)
def f(x, y): return 'x is {}'.format(x)<|docstring|>f documentation<|endoftext|>
fc8dd47871d3c6ed729482de7769579e214631bc26c630be86a10fbdb1d44339
def f(x, y): 'f documentation' return (x + y)
f documentation
clickutil/tests/test_call.py
f
stroxler/clickutil
6
python
def f(x, y): return (x + y)
def f(x, y): return (x + y)<|docstring|>f documentation<|endoftext|>
e5f7633cc8b131d46e9ac831b8bfb620fdf79546021ce7648d32476330ebdbe6
def similarity(self, code_vec, desc_vec): '\n https://arxiv.org/pdf/1508.01585.pdf \n ' assert (self.conf['sim_measure'] in ['cos', 'poly', 'euc', 'sigmoid', 'gesd', 'aesd']), 'invalid similarity measure' if (self.conf['sim_measure'] == 'cos'): return F.cosine_similarity(code_vec, desc_vec) elif (self.conf['sim_measure'] == 'poly'): return (((0.5 * torch.matmul(code_vec, desc_vec.t()).diag()) + 1) ** 2) elif (self.conf['sim_measure'] == 'sigmoid'): return torch.tanh((torch.matmul(code_vec, desc_vec.t()).diag() + 1)) elif (self.conf['sim_measure'] in ['euc', 'gesd', 'aesd']): euc_dist = torch.dist(code_vec, desc_vec, 2) euc_sim = (1 / (1 + euc_dist)) if (self.conf['sim_measure'] == 'euc'): return euc_sim sigmoid_sim = torch.sigmoid((torch.matmul(code_vec, desc_vec.t()).diag() + 1)) if (self.conf['sim_measure'] == 'gesd'): return (euc_sim * sigmoid_sim) elif (self.conf['sim_measure'] == 'aesd'): return (0.5 * (euc_sim + sigmoid_sim))
https://arxiv.org/pdf/1508.01585.pdf
pytorch/models/jointemb.py
similarity
mdvertola/VCENN
1
python
def similarity(self, code_vec, desc_vec): '\n \n ' assert (self.conf['sim_measure'] in ['cos', 'poly', 'euc', 'sigmoid', 'gesd', 'aesd']), 'invalid similarity measure' if (self.conf['sim_measure'] == 'cos'): return F.cosine_similarity(code_vec, desc_vec) elif (self.conf['sim_measure'] == 'poly'): return (((0.5 * torch.matmul(code_vec, desc_vec.t()).diag()) + 1) ** 2) elif (self.conf['sim_measure'] == 'sigmoid'): return torch.tanh((torch.matmul(code_vec, desc_vec.t()).diag() + 1)) elif (self.conf['sim_measure'] in ['euc', 'gesd', 'aesd']): euc_dist = torch.dist(code_vec, desc_vec, 2) euc_sim = (1 / (1 + euc_dist)) if (self.conf['sim_measure'] == 'euc'): return euc_sim sigmoid_sim = torch.sigmoid((torch.matmul(code_vec, desc_vec.t()).diag() + 1)) if (self.conf['sim_measure'] == 'gesd'): return (euc_sim * sigmoid_sim) elif (self.conf['sim_measure'] == 'aesd'): return (0.5 * (euc_sim + sigmoid_sim))
def similarity(self, code_vec, desc_vec): '\n \n ' assert (self.conf['sim_measure'] in ['cos', 'poly', 'euc', 'sigmoid', 'gesd', 'aesd']), 'invalid similarity measure' if (self.conf['sim_measure'] == 'cos'): return F.cosine_similarity(code_vec, desc_vec) elif (self.conf['sim_measure'] == 'poly'): return (((0.5 * torch.matmul(code_vec, desc_vec.t()).diag()) + 1) ** 2) elif (self.conf['sim_measure'] == 'sigmoid'): return torch.tanh((torch.matmul(code_vec, desc_vec.t()).diag() + 1)) elif (self.conf['sim_measure'] in ['euc', 'gesd', 'aesd']): euc_dist = torch.dist(code_vec, desc_vec, 2) euc_sim = (1 / (1 + euc_dist)) if (self.conf['sim_measure'] == 'euc'): return euc_sim sigmoid_sim = torch.sigmoid((torch.matmul(code_vec, desc_vec.t()).diag() + 1)) if (self.conf['sim_measure'] == 'gesd'): return (euc_sim * sigmoid_sim) elif (self.conf['sim_measure'] == 'aesd'): return (0.5 * (euc_sim + sigmoid_sim))<|docstring|>https://arxiv.org/pdf/1508.01585.pdf<|endoftext|>
4dbfa490dbb04dcb4d7bb32053b7231e95d95e27369b2714f0637bb79d886db6
def save_history(history, result_file='history.csv'): "Save history instance as file.\n\n # Arguments\n history: Returns of fit method.\n result_file: Path to save as csv file. End with '.csv'.\n\n # Returns\n Save as csv file.\n " df = pd.DataFrame(history.history) df.to_csv(result_file, sep=',', index_label='epoch')
Save history instance as file. # Arguments history: Returns of fit method. result_file: Path to save as csv file. End with '.csv'. # Returns Save as csv file.
farmer/ncc/history/history.py
save_history
tamahassam/farmer
10
python
def save_history(history, result_file='history.csv'): "Save history instance as file.\n\n # Arguments\n history: Returns of fit method.\n result_file: Path to save as csv file. End with '.csv'.\n\n # Returns\n Save as csv file.\n " df = pd.DataFrame(history.history) df.to_csv(result_file, sep=',', index_label='epoch')
def save_history(history, result_file='history.csv'): "Save history instance as file.\n\n # Arguments\n history: Returns of fit method.\n result_file: Path to save as csv file. End with '.csv'.\n\n # Returns\n Save as csv file.\n " df = pd.DataFrame(history.history) df.to_csv(result_file, sep=',', index_label='epoch')<|docstring|>Save history instance as file. # Arguments history: Returns of fit method. result_file: Path to save as csv file. End with '.csv'. # Returns Save as csv file.<|endoftext|>
757c8b5be502a33ac9c6309250fd2edec9fe0a65ddf4dfbdf13a3043966e0598
def get_array(files): 'Convert file to numpy array.\n\n # Arguments\n files: Path to file, saved by above save_history method.\n\n # Returns\n labels: Dictionary, Keys(file_path) and Values(metrics name).\n values: Dictionary, Keys(file_path) and Values(metrics value).\n ' (labels, values) = ({}, {}) for file in files: df = pd.read_csv(file) (labels[file], values[file]) = (df.columns.tolist(), df.values) return (labels, values)
Convert file to numpy array. # Arguments files: Path to file, saved by above save_history method. # Returns labels: Dictionary, Keys(file_path) and Values(metrics name). values: Dictionary, Keys(file_path) and Values(metrics value).
farmer/ncc/history/history.py
get_array
tamahassam/farmer
10
python
def get_array(files): 'Convert file to numpy array.\n\n # Arguments\n files: Path to file, saved by above save_history method.\n\n # Returns\n labels: Dictionary, Keys(file_path) and Values(metrics name).\n values: Dictionary, Keys(file_path) and Values(metrics value).\n ' (labels, values) = ({}, {}) for file in files: df = pd.read_csv(file) (labels[file], values[file]) = (df.columns.tolist(), df.values) return (labels, values)
def get_array(files): 'Convert file to numpy array.\n\n # Arguments\n files: Path to file, saved by above save_history method.\n\n # Returns\n labels: Dictionary, Keys(file_path) and Values(metrics name).\n values: Dictionary, Keys(file_path) and Values(metrics value).\n ' (labels, values) = ({}, {}) for file in files: df = pd.read_csv(file) (labels[file], values[file]) = (df.columns.tolist(), df.values) return (labels, values)<|docstring|>Convert file to numpy array. # Arguments files: Path to file, saved by above save_history method. # Returns labels: Dictionary, Keys(file_path) and Values(metrics name). values: Dictionary, Keys(file_path) and Values(metrics value).<|endoftext|>
c45956ef67fe351a2e70f3205d79c78523249b6cee15f3b31ca6f5d9e5078da6
def show_history(metrics='acc', average=False, *files): "Show history.\n\n # Arguments\n metrics: Metrics name. If 'acc', you can see 'acc' and 'val_acc'.\n average: Moving average. (e.g. 3 and 5)\n files: Path to file, saved by above save_history method. It receives multiple files.\n\n # Returns\n Show as integrated graph.\n " (labels, values) = get_array(files) colors = ['b', 'g', 'r', 'c', 'm', 'y', 'b', 'w'] plt.figure(figsize=(12, 8)) for (i, key) in enumerate(values.keys()): if average: for column in range(1, values[key].shape[1]): values[key][(:, column)] = np.convolve(values[key][(:, column)], (np.ones(average) / float(average)), 'same') values[key] = values[key][(average // 2):(- ((average // 2) + 1))] plt.plot(values[key][(:, 0)], values[key][(:, labels[key].index(metrics))], colors[i], alpha=0.3, label=((key[:(- 4)] + ' ') + metrics)) if (('val_' + metrics) in labels[key]): plt.plot(values[key][(:, 0)], values[key][(:, labels[key].index(('val_' + metrics)))], colors[i], alpha=0.9, label=(((key[:(- 4)] + ' ') + 'val_') + metrics)) plt.title('History') plt.xlabel('Epochs') plt.ylabel(metrics) plt.legend(loc='upper right', bbox_to_anchor=(1.35, 1.0), fontsize=12) plt.grid(color='gray', alpha=0.3) plt.show()
Show history. # Arguments metrics: Metrics name. If 'acc', you can see 'acc' and 'val_acc'. average: Moving average. (e.g. 3 and 5) files: Path to file, saved by above save_history method. It receives multiple files. # Returns Show as integrated graph.
farmer/ncc/history/history.py
show_history
tamahassam/farmer
10
python
def show_history(metrics='acc', average=False, *files): "Show history.\n\n # Arguments\n metrics: Metrics name. If 'acc', you can see 'acc' and 'val_acc'.\n average: Moving average. (e.g. 3 and 5)\n files: Path to file, saved by above save_history method. It receives multiple files.\n\n # Returns\n Show as integrated graph.\n " (labels, values) = get_array(files) colors = ['b', 'g', 'r', 'c', 'm', 'y', 'b', 'w'] plt.figure(figsize=(12, 8)) for (i, key) in enumerate(values.keys()): if average: for column in range(1, values[key].shape[1]): values[key][(:, column)] = np.convolve(values[key][(:, column)], (np.ones(average) / float(average)), 'same') values[key] = values[key][(average // 2):(- ((average // 2) + 1))] plt.plot(values[key][(:, 0)], values[key][(:, labels[key].index(metrics))], colors[i], alpha=0.3, label=((key[:(- 4)] + ' ') + metrics)) if (('val_' + metrics) in labels[key]): plt.plot(values[key][(:, 0)], values[key][(:, labels[key].index(('val_' + metrics)))], colors[i], alpha=0.9, label=(((key[:(- 4)] + ' ') + 'val_') + metrics)) plt.title('History') plt.xlabel('Epochs') plt.ylabel(metrics) plt.legend(loc='upper right', bbox_to_anchor=(1.35, 1.0), fontsize=12) plt.grid(color='gray', alpha=0.3) plt.show()
def show_history(metrics='acc', average=False, *files): "Show history.\n\n # Arguments\n metrics: Metrics name. If 'acc', you can see 'acc' and 'val_acc'.\n average: Moving average. (e.g. 3 and 5)\n files: Path to file, saved by above save_history method. It receives multiple files.\n\n # Returns\n Show as integrated graph.\n " (labels, values) = get_array(files) colors = ['b', 'g', 'r', 'c', 'm', 'y', 'b', 'w'] plt.figure(figsize=(12, 8)) for (i, key) in enumerate(values.keys()): if average: for column in range(1, values[key].shape[1]): values[key][(:, column)] = np.convolve(values[key][(:, column)], (np.ones(average) / float(average)), 'same') values[key] = values[key][(average // 2):(- ((average // 2) + 1))] plt.plot(values[key][(:, 0)], values[key][(:, labels[key].index(metrics))], colors[i], alpha=0.3, label=((key[:(- 4)] + ' ') + metrics)) if (('val_' + metrics) in labels[key]): plt.plot(values[key][(:, 0)], values[key][(:, labels[key].index(('val_' + metrics)))], colors[i], alpha=0.9, label=(((key[:(- 4)] + ' ') + 'val_') + metrics)) plt.title('History') plt.xlabel('Epochs') plt.ylabel(metrics) plt.legend(loc='upper right', bbox_to_anchor=(1.35, 1.0), fontsize=12) plt.grid(color='gray', alpha=0.3) plt.show()<|docstring|>Show history. # Arguments metrics: Metrics name. If 'acc', you can see 'acc' and 'val_acc'. average: Moving average. (e.g. 3 and 5) files: Path to file, saved by above save_history method. It receives multiple files. # Returns Show as integrated graph.<|endoftext|>
98c8c31cb35d201abf2d6f1e9a8fc19ef9bffa4bba68d7643b2012b0de54169c
def register_custom_op(is_ortmodule=False): '\n This function registers symbolic functions for\n custom ops that are implemented as part of ONNX Runtime\n ' def inverse(g, self): return g.op('com.microsoft::Inverse', self).setType(self.type()) def gelu(g, self): return g.op('com.microsoft::Gelu', self).setType(self.type()) def triu(g, self, diagonal): return g.op('com.microsoft::Trilu', self, diagonal, upper_i=1).setType(self.type()) def tril(g, self, diagonal): return g.op('com.microsoft::Trilu', self, diagonal, upper_i=0).setType(self.type()) register_custom_op_symbolic('::inverse', inverse, _onnx_opset_version) register_custom_op_symbolic('::gelu', gelu, _onnx_opset_version) register_custom_op_symbolic('::triu', triu, _onnx_opset_version) register_custom_op_symbolic('::tril', tril, _onnx_opset_version) if is_ortmodule: @parse_args('v', 'v', 'i', 'b', 'b') def embedding(g, weight, indices, padding_idx, scale_grad_by_freq, sparse): custom_attributes_json = f'{{"padding_idx":{str(padding_idx)},"scale_grad_by_freq":{str(scale_grad_by_freq).lower()},"sparse":{str(sparse).lower()}}}' output = g.op('com.microsoft::ATenOp', weight, indices, name_s='aten::embedding', custom_attributes_json_s=custom_attributes_json) indices_shape = _get_tensor_sizes(indices) if ((indices_shape is not None) and hasattr(weight.type(), 'with_sizes')): output_type = weight.type().with_sizes((indices_shape + [_get_tensor_dim_size(weight, 1)])) output.setType(output_type) return output register_custom_op_symbolic('::embedding', embedding, _onnx_opset_version) @parse_args('v', 'v', 'v', 'i', 'v') def cross_entropy_loss(g, self, target, weight, reduction, ignore_index): reduction = sym_help._maybe_get_const(reduction, 'i') reduction_vals = ['none', 'mean', 'sum'] reduction = reduction_vals[reduction] (output, log_prob) = g.op('com.microsoft::SoftmaxCrossEntropyLossInternal', self, target, weight, ignore_index, reduction_s=reduction, outputs=2) output.setType(self.type()) log_prob.setType(self.type()) return output register_custom_op_symbolic('::cross_entropy_loss', cross_entropy_loss, _onnx_opset_version) @parse_args('v', 'v', 'v', 'i', 'v') def nll_loss(g, self, target, weight, reduction, ignore_index): reduction = sym_help._maybe_get_const(reduction, 'i') reduction_vals = ['none', 'mean', 'sum'] reduction = reduction_vals[reduction] output = g.op('com.microsoft::NegativeLogLikelihoodLossInternal', self, target, weight, ignore_index, reduction_s=reduction) output.setType(self.type()) return output register_custom_op_symbolic('::nll_loss', nll_loss, _onnx_opset_version) @parse_args('v', 'is', 'is', 'is', 'is', 'b') def max_pool2d(g, self, kernel_size, stride, padding, dilation, ceil_mode): custom_attributes_json = f'{{"kernel_size":{str(kernel_size)},"stride":{str(stride)},"padding":{str(padding)},"dilation":{str(dilation)},"ceil_mode":{str(ceil_mode).lower()}}}' return g.op('com.microsoft::ATenOp', self, name_s='aten::max_pool2d_with_indices', custom_attributes_json_s=custom_attributes_json, outputs=2)[0] register_custom_op_symbolic('::max_pool2d', max_pool2d, _onnx_opset_version) @parse_args('v', 'i', 'i', 'i') def unfold(g, input, dimension, size, step): custom_attributes_json = f'{{"dimension":{str(dimension)},"size":{str(size)},"step":{str(step)}}}' return g.op('com.microsoft::ATenOp', input, name_s='aten::unfold', custom_attributes_json_s=custom_attributes_json) register_custom_op_symbolic('::unfold', unfold, _onnx_opset_version)
This function registers symbolic functions for custom ops that are implemented as part of ONNX Runtime
tools/python/register_custom_ops_pytorch_exporter.py
register_custom_op
yutkin/onnxruntime
2
python
def register_custom_op(is_ortmodule=False): '\n This function registers symbolic functions for\n custom ops that are implemented as part of ONNX Runtime\n ' def inverse(g, self): return g.op('com.microsoft::Inverse', self).setType(self.type()) def gelu(g, self): return g.op('com.microsoft::Gelu', self).setType(self.type()) def triu(g, self, diagonal): return g.op('com.microsoft::Trilu', self, diagonal, upper_i=1).setType(self.type()) def tril(g, self, diagonal): return g.op('com.microsoft::Trilu', self, diagonal, upper_i=0).setType(self.type()) register_custom_op_symbolic('::inverse', inverse, _onnx_opset_version) register_custom_op_symbolic('::gelu', gelu, _onnx_opset_version) register_custom_op_symbolic('::triu', triu, _onnx_opset_version) register_custom_op_symbolic('::tril', tril, _onnx_opset_version) if is_ortmodule: @parse_args('v', 'v', 'i', 'b', 'b') def embedding(g, weight, indices, padding_idx, scale_grad_by_freq, sparse): custom_attributes_json = f'{{"padding_idx":{str(padding_idx)},"scale_grad_by_freq":{str(scale_grad_by_freq).lower()},"sparse":{str(sparse).lower()}}}' output = g.op('com.microsoft::ATenOp', weight, indices, name_s='aten::embedding', custom_attributes_json_s=custom_attributes_json) indices_shape = _get_tensor_sizes(indices) if ((indices_shape is not None) and hasattr(weight.type(), 'with_sizes')): output_type = weight.type().with_sizes((indices_shape + [_get_tensor_dim_size(weight, 1)])) output.setType(output_type) return output register_custom_op_symbolic('::embedding', embedding, _onnx_opset_version) @parse_args('v', 'v', 'v', 'i', 'v') def cross_entropy_loss(g, self, target, weight, reduction, ignore_index): reduction = sym_help._maybe_get_const(reduction, 'i') reduction_vals = ['none', 'mean', 'sum'] reduction = reduction_vals[reduction] (output, log_prob) = g.op('com.microsoft::SoftmaxCrossEntropyLossInternal', self, target, weight, ignore_index, reduction_s=reduction, outputs=2) output.setType(self.type()) log_prob.setType(self.type()) return output register_custom_op_symbolic('::cross_entropy_loss', cross_entropy_loss, _onnx_opset_version) @parse_args('v', 'v', 'v', 'i', 'v') def nll_loss(g, self, target, weight, reduction, ignore_index): reduction = sym_help._maybe_get_const(reduction, 'i') reduction_vals = ['none', 'mean', 'sum'] reduction = reduction_vals[reduction] output = g.op('com.microsoft::NegativeLogLikelihoodLossInternal', self, target, weight, ignore_index, reduction_s=reduction) output.setType(self.type()) return output register_custom_op_symbolic('::nll_loss', nll_loss, _onnx_opset_version) @parse_args('v', 'is', 'is', 'is', 'is', 'b') def max_pool2d(g, self, kernel_size, stride, padding, dilation, ceil_mode): custom_attributes_json = f'{{"kernel_size":{str(kernel_size)},"stride":{str(stride)},"padding":{str(padding)},"dilation":{str(dilation)},"ceil_mode":{str(ceil_mode).lower()}}}' return g.op('com.microsoft::ATenOp', self, name_s='aten::max_pool2d_with_indices', custom_attributes_json_s=custom_attributes_json, outputs=2)[0] register_custom_op_symbolic('::max_pool2d', max_pool2d, _onnx_opset_version) @parse_args('v', 'i', 'i', 'i') def unfold(g, input, dimension, size, step): custom_attributes_json = f'{{"dimension":{str(dimension)},"size":{str(size)},"step":{str(step)}}}' return g.op('com.microsoft::ATenOp', input, name_s='aten::unfold', custom_attributes_json_s=custom_attributes_json) register_custom_op_symbolic('::unfold', unfold, _onnx_opset_version)
def register_custom_op(is_ortmodule=False): '\n This function registers symbolic functions for\n custom ops that are implemented as part of ONNX Runtime\n ' def inverse(g, self): return g.op('com.microsoft::Inverse', self).setType(self.type()) def gelu(g, self): return g.op('com.microsoft::Gelu', self).setType(self.type()) def triu(g, self, diagonal): return g.op('com.microsoft::Trilu', self, diagonal, upper_i=1).setType(self.type()) def tril(g, self, diagonal): return g.op('com.microsoft::Trilu', self, diagonal, upper_i=0).setType(self.type()) register_custom_op_symbolic('::inverse', inverse, _onnx_opset_version) register_custom_op_symbolic('::gelu', gelu, _onnx_opset_version) register_custom_op_symbolic('::triu', triu, _onnx_opset_version) register_custom_op_symbolic('::tril', tril, _onnx_opset_version) if is_ortmodule: @parse_args('v', 'v', 'i', 'b', 'b') def embedding(g, weight, indices, padding_idx, scale_grad_by_freq, sparse): custom_attributes_json = f'{{"padding_idx":{str(padding_idx)},"scale_grad_by_freq":{str(scale_grad_by_freq).lower()},"sparse":{str(sparse).lower()}}}' output = g.op('com.microsoft::ATenOp', weight, indices, name_s='aten::embedding', custom_attributes_json_s=custom_attributes_json) indices_shape = _get_tensor_sizes(indices) if ((indices_shape is not None) and hasattr(weight.type(), 'with_sizes')): output_type = weight.type().with_sizes((indices_shape + [_get_tensor_dim_size(weight, 1)])) output.setType(output_type) return output register_custom_op_symbolic('::embedding', embedding, _onnx_opset_version) @parse_args('v', 'v', 'v', 'i', 'v') def cross_entropy_loss(g, self, target, weight, reduction, ignore_index): reduction = sym_help._maybe_get_const(reduction, 'i') reduction_vals = ['none', 'mean', 'sum'] reduction = reduction_vals[reduction] (output, log_prob) = g.op('com.microsoft::SoftmaxCrossEntropyLossInternal', self, target, weight, ignore_index, reduction_s=reduction, outputs=2) output.setType(self.type()) log_prob.setType(self.type()) return output register_custom_op_symbolic('::cross_entropy_loss', cross_entropy_loss, _onnx_opset_version) @parse_args('v', 'v', 'v', 'i', 'v') def nll_loss(g, self, target, weight, reduction, ignore_index): reduction = sym_help._maybe_get_const(reduction, 'i') reduction_vals = ['none', 'mean', 'sum'] reduction = reduction_vals[reduction] output = g.op('com.microsoft::NegativeLogLikelihoodLossInternal', self, target, weight, ignore_index, reduction_s=reduction) output.setType(self.type()) return output register_custom_op_symbolic('::nll_loss', nll_loss, _onnx_opset_version) @parse_args('v', 'is', 'is', 'is', 'is', 'b') def max_pool2d(g, self, kernel_size, stride, padding, dilation, ceil_mode): custom_attributes_json = f'{{"kernel_size":{str(kernel_size)},"stride":{str(stride)},"padding":{str(padding)},"dilation":{str(dilation)},"ceil_mode":{str(ceil_mode).lower()}}}' return g.op('com.microsoft::ATenOp', self, name_s='aten::max_pool2d_with_indices', custom_attributes_json_s=custom_attributes_json, outputs=2)[0] register_custom_op_symbolic('::max_pool2d', max_pool2d, _onnx_opset_version) @parse_args('v', 'i', 'i', 'i') def unfold(g, input, dimension, size, step): custom_attributes_json = f'{{"dimension":{str(dimension)},"size":{str(size)},"step":{str(step)}}}' return g.op('com.microsoft::ATenOp', input, name_s='aten::unfold', custom_attributes_json_s=custom_attributes_json) register_custom_op_symbolic('::unfold', unfold, _onnx_opset_version)<|docstring|>This function registers symbolic functions for custom ops that are implemented as part of ONNX Runtime<|endoftext|>
ae9fb536f482609ecdd43a420cd867c4a346ee9152fafd394654c89dcb3895f6
def unregister_custom_op(): '\n This function unregisters symbolic functions for\n custom ops that are implemented as part of ONNX Runtime\n ' import torch.onnx.symbolic_registry as sym_registry def unregister(name, opset_version): (ns, kind) = name.split('::') from torch.onnx.symbolic_helper import _onnx_stable_opsets for version in _onnx_stable_opsets: if ((version >= opset_version) and sym_registry.is_registered_op(kind, ns, version)): del sym_registry._registry[(ns, version)][kind] unregister('::inverse', _onnx_opset_version) unregister('::gelu', _onnx_opset_version) unregister('::triu', _onnx_opset_version) unregister('::tril', _onnx_opset_version)
This function unregisters symbolic functions for custom ops that are implemented as part of ONNX Runtime
tools/python/register_custom_ops_pytorch_exporter.py
unregister_custom_op
yutkin/onnxruntime
2
python
def unregister_custom_op(): '\n This function unregisters symbolic functions for\n custom ops that are implemented as part of ONNX Runtime\n ' import torch.onnx.symbolic_registry as sym_registry def unregister(name, opset_version): (ns, kind) = name.split('::') from torch.onnx.symbolic_helper import _onnx_stable_opsets for version in _onnx_stable_opsets: if ((version >= opset_version) and sym_registry.is_registered_op(kind, ns, version)): del sym_registry._registry[(ns, version)][kind] unregister('::inverse', _onnx_opset_version) unregister('::gelu', _onnx_opset_version) unregister('::triu', _onnx_opset_version) unregister('::tril', _onnx_opset_version)
def unregister_custom_op(): '\n This function unregisters symbolic functions for\n custom ops that are implemented as part of ONNX Runtime\n ' import torch.onnx.symbolic_registry as sym_registry def unregister(name, opset_version): (ns, kind) = name.split('::') from torch.onnx.symbolic_helper import _onnx_stable_opsets for version in _onnx_stable_opsets: if ((version >= opset_version) and sym_registry.is_registered_op(kind, ns, version)): del sym_registry._registry[(ns, version)][kind] unregister('::inverse', _onnx_opset_version) unregister('::gelu', _onnx_opset_version) unregister('::triu', _onnx_opset_version) unregister('::tril', _onnx_opset_version)<|docstring|>This function unregisters symbolic functions for custom ops that are implemented as part of ONNX Runtime<|endoftext|>
1364d80675dc1b0892c8ebf7ef97e1ba59d9cee57f6d6e34d3806e6833b81106
def _run_summary_op(self, val=False): '\n Run the summary operator: feed the placeholders with corresponding newtork outputs(activations)\n ' summaries = [] summaries.append(self._add_gt_image_summary()) for (key, var) in self._event_summaries.items(): summaries.append(tb.summary.scalar(key, var.item())) self._event_summaries = {} if (not val): for (key, var) in self._score_summaries.items(): summaries.append(self._add_score_summary(key, var)) self._score_summaries = {} for (key, var) in self._act_summaries.items(): summaries += self._add_act_summary(key, var) self._act_summaries = {} for (k, var) in dict(self.named_parameters()).items(): if var.requires_grad: summaries.append(self._add_train_summary(k, var)) self._image_gt_summaries = {} return summaries
Run the summary operator: feed the placeholders with corresponding newtork outputs(activations)
lib/nets/network.py
_run_summary_op
Yibin-Li/pytorch-faster-rcnn
2,038
python
def _run_summary_op(self, val=False): '\n \n ' summaries = [] summaries.append(self._add_gt_image_summary()) for (key, var) in self._event_summaries.items(): summaries.append(tb.summary.scalar(key, var.item())) self._event_summaries = {} if (not val): for (key, var) in self._score_summaries.items(): summaries.append(self._add_score_summary(key, var)) self._score_summaries = {} for (key, var) in self._act_summaries.items(): summaries += self._add_act_summary(key, var) self._act_summaries = {} for (k, var) in dict(self.named_parameters()).items(): if var.requires_grad: summaries.append(self._add_train_summary(k, var)) self._image_gt_summaries = {} return summaries
def _run_summary_op(self, val=False): '\n \n ' summaries = [] summaries.append(self._add_gt_image_summary()) for (key, var) in self._event_summaries.items(): summaries.append(tb.summary.scalar(key, var.item())) self._event_summaries = {} if (not val): for (key, var) in self._score_summaries.items(): summaries.append(self._add_score_summary(key, var)) self._score_summaries = {} for (key, var) in self._act_summaries.items(): summaries += self._add_act_summary(key, var) self._act_summaries = {} for (k, var) in dict(self.named_parameters()).items(): if var.requires_grad: summaries.append(self._add_train_summary(k, var)) self._image_gt_summaries = {} return summaries<|docstring|>Run the summary operator: feed the placeholders with corresponding newtork outputs(activations)<|endoftext|>
7fbc091f4f46f64c9e82b351631d0445ffe936d9bb5925c5cc5543a1584a8f35
def load_state_dict(self, state_dict): '\n Because we remove the definition of fc layer in resnet now, it will fail when loading \n the model trained before.\n To provide back compatibility, we overwrite the load_state_dict\n ' nn.Module.load_state_dict(self, {k: v for (k, v) in state_dict.items() if (k in self.state_dict())})
Because we remove the definition of fc layer in resnet now, it will fail when loading the model trained before. To provide back compatibility, we overwrite the load_state_dict
lib/nets/network.py
load_state_dict
Yibin-Li/pytorch-faster-rcnn
2,038
python
def load_state_dict(self, state_dict): '\n Because we remove the definition of fc layer in resnet now, it will fail when loading \n the model trained before.\n To provide back compatibility, we overwrite the load_state_dict\n ' nn.Module.load_state_dict(self, {k: v for (k, v) in state_dict.items() if (k in self.state_dict())})
def load_state_dict(self, state_dict): '\n Because we remove the definition of fc layer in resnet now, it will fail when loading \n the model trained before.\n To provide back compatibility, we overwrite the load_state_dict\n ' nn.Module.load_state_dict(self, {k: v for (k, v) in state_dict.items() if (k in self.state_dict())})<|docstring|>Because we remove the definition of fc layer in resnet now, it will fail when loading the model trained before. To provide back compatibility, we overwrite the load_state_dict<|endoftext|>
344a7b77be33771374171d1e7eb16212abbcd506c9bccb91e9ac361274229e92
def normal_init(m, mean, stddev, truncated=False): '\n weight initalizer: truncated normal and random normal.\n ' if truncated: m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) else: m.weight.data.normal_(mean, stddev) m.bias.data.zero_()
weight initalizer: truncated normal and random normal.
lib/nets/network.py
normal_init
Yibin-Li/pytorch-faster-rcnn
2,038
python
def normal_init(m, mean, stddev, truncated=False): '\n \n ' if truncated: m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) else: m.weight.data.normal_(mean, stddev) m.bias.data.zero_()
def normal_init(m, mean, stddev, truncated=False): '\n \n ' if truncated: m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) else: m.weight.data.normal_(mean, stddev) m.bias.data.zero_()<|docstring|>weight initalizer: truncated normal and random normal.<|endoftext|>
08bcade8056833eadd7328717b990f73a85d5c9b2be350e0dea78b614529d39f
@abstractmethod def run(self, *args, **kwargs): 'Implementation required'
Implementation required
hfb/strategy/server.py
run
harshanarayana/hfb
1
python
@abstractmethod def run(self, *args, **kwargs):
@abstractmethod def run(self, *args, **kwargs): <|docstring|>Implementation required<|endoftext|>
26787630b29099f07da081ab71897814129f22555fc606fa55186dbf4e1b4810
def parse_search_page(html): '获取检索页面所有工业增长值相关url' soup = bs(html, features='lxml') titles = soup.find_all('font', {'class': 'cont_tit03'}) target_urls = [re.search("urlstr = '(.*?)';", str(title.script)).group(1) for title in titles] next_page_url = soup.find('a', class_='next-page').get('href') return (target_urls, next_page_url)
获取检索页面所有工业增长值相关url
industry/industry.py
parse_search_page
linusqzdeng/macroind-crawler
0
python
def parse_search_page(html): soup = bs(html, features='lxml') titles = soup.find_all('font', {'class': 'cont_tit03'}) target_urls = [re.search("urlstr = '(.*?)';", str(title.script)).group(1) for title in titles] next_page_url = soup.find('a', class_='next-page').get('href') return (target_urls, next_page_url)
def parse_search_page(html): soup = bs(html, features='lxml') titles = soup.find_all('font', {'class': 'cont_tit03'}) target_urls = [re.search("urlstr = '(.*?)';", str(title.script)).group(1) for title in titles] next_page_url = soup.find('a', class_='next-page').get('href') return (target_urls, next_page_url)<|docstring|>获取检索页面所有工业增长值相关url<|endoftext|>
17fb4ea1b3737e8903a4ae60f92fc1fbd0bd7926f4dae9869c97812d914b708f
def parse_html(html): 'Extract tables and release date from the html' if (html is None): return soup = bs(html, features='lxml') table = pd.read_html(html, index_col=1, na_values='…')[1] table.columns = [table.iloc[0], table.iloc[1]] table = table.iloc[2:].reset_index(drop=False) table.set_index(1, inplace=True) table = table.iloc[(:, 1:)] print(table) title = soup.find('h2', class_='xilan_tit').get_text() date_info = soup.find_all('font', {'style': 'float:left;width:560px;text-align:right;margin-right:60px;'})[0].get_text() date_str = re.search('时间:(.*)', date_info, flags=re.DOTALL).group(1).strip() release_dt = datetime.strptime(date_str, '%Y-%m-%d %H:%M').date() report_dt = datetime((release_dt.year - (release_dt.month == 1)), ((release_dt.month - 1) or 12), 1).date() return (title, table, release_dt, report_dt)
Extract tables and release date from the html
industry/industry.py
parse_html
linusqzdeng/macroind-crawler
0
python
def parse_html(html): if (html is None): return soup = bs(html, features='lxml') table = pd.read_html(html, index_col=1, na_values='…')[1] table.columns = [table.iloc[0], table.iloc[1]] table = table.iloc[2:].reset_index(drop=False) table.set_index(1, inplace=True) table = table.iloc[(:, 1:)] print(table) title = soup.find('h2', class_='xilan_tit').get_text() date_info = soup.find_all('font', {'style': 'float:left;width:560px;text-align:right;margin-right:60px;'})[0].get_text() date_str = re.search('时间:(.*)', date_info, flags=re.DOTALL).group(1).strip() release_dt = datetime.strptime(date_str, '%Y-%m-%d %H:%M').date() report_dt = datetime((release_dt.year - (release_dt.month == 1)), ((release_dt.month - 1) or 12), 1).date() return (title, table, release_dt, report_dt)
def parse_html(html): if (html is None): return soup = bs(html, features='lxml') table = pd.read_html(html, index_col=1, na_values='…')[1] table.columns = [table.iloc[0], table.iloc[1]] table = table.iloc[2:].reset_index(drop=False) table.set_index(1, inplace=True) table = table.iloc[(:, 1:)] print(table) title = soup.find('h2', class_='xilan_tit').get_text() date_info = soup.find_all('font', {'style': 'float:left;width:560px;text-align:right;margin-right:60px;'})[0].get_text() date_str = re.search('时间:(.*)', date_info, flags=re.DOTALL).group(1).strip() release_dt = datetime.strptime(date_str, '%Y-%m-%d %H:%M').date() report_dt = datetime((release_dt.year - (release_dt.month == 1)), ((release_dt.month - 1) or 12), 1).date() return (title, table, release_dt, report_dt)<|docstring|>Extract tables and release date from the html<|endoftext|>