body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
8cd5220b750084ed02c23b99d76c506a8c494fb7d97430b4ed562467b6ad4730
@storage_service_port.setter def storage_service_port(self, storage_service_port): 'Sets the storage_service_port of this VCloudRestCloud.\n\n\n :param storage_service_port: The storage_service_port of this VCloudRestCloud. # noqa: E501\n :type: int\n ' self._storage_service_port = storage_service_port
Sets the storage_service_port of this VCloudRestCloud. :param storage_service_port: The storage_service_port of this VCloudRestCloud. # noqa: E501 :type: int
cons3rt/models/v_cloud_rest_cloud.py
storage_service_port
cons3rt/cons3rt-python-sdk
0
python
@storage_service_port.setter def storage_service_port(self, storage_service_port): 'Sets the storage_service_port of this VCloudRestCloud.\n\n\n :param storage_service_port: The storage_service_port of this VCloudRestCloud. # noqa: E501\n :type: int\n ' self._storage_service_port = storage_service_port
@storage_service_port.setter def storage_service_port(self, storage_service_port): 'Sets the storage_service_port of this VCloudRestCloud.\n\n\n :param storage_service_port: The storage_service_port of this VCloudRestCloud. # noqa: E501\n :type: int\n ' self._storage_service_port = storage_service_port<|docstring|>Sets the storage_service_port of this VCloudRestCloud. :param storage_service_port: The storage_service_port of this VCloudRestCloud. # noqa: E501 :type: int<|endoftext|>
54ed451f6fdaea3a56baa9a71e784fe5f7f36ead57e0ad73b99f30d3da9e489b
@property def storage_service_protocol(self): 'Gets the storage_service_protocol of this VCloudRestCloud. # noqa: E501\n\n\n :return: The storage_service_protocol of this VCloudRestCloud. # noqa: E501\n :rtype: str\n ' return self._storage_service_protocol
Gets the storage_service_protocol of this VCloudRestCloud. # noqa: E501 :return: The storage_service_protocol of this VCloudRestCloud. # noqa: E501 :rtype: str
cons3rt/models/v_cloud_rest_cloud.py
storage_service_protocol
cons3rt/cons3rt-python-sdk
0
python
@property def storage_service_protocol(self): 'Gets the storage_service_protocol of this VCloudRestCloud. # noqa: E501\n\n\n :return: The storage_service_protocol of this VCloudRestCloud. # noqa: E501\n :rtype: str\n ' return self._storage_service_protocol
@property def storage_service_protocol(self): 'Gets the storage_service_protocol of this VCloudRestCloud. # noqa: E501\n\n\n :return: The storage_service_protocol of this VCloudRestCloud. # noqa: E501\n :rtype: str\n ' return self._storage_service_protocol<|docstring|>Gets the storage_service_protocol of this VCloudRestCloud. # noqa: E501 :return: The storage_service_protocol of this VCloudRestCloud. # noqa: E501 :rtype: str<|endoftext|>
19d520d08feabe96133f6cc7909c280f7e3ca23fd0f927fd7102c4311997bf5d
@storage_service_protocol.setter def storage_service_protocol(self, storage_service_protocol): 'Sets the storage_service_protocol of this VCloudRestCloud.\n\n\n :param storage_service_protocol: The storage_service_protocol of this VCloudRestCloud. # noqa: E501\n :type: str\n ' self._storage_service_protocol = storage_service_protocol
Sets the storage_service_protocol of this VCloudRestCloud. :param storage_service_protocol: The storage_service_protocol of this VCloudRestCloud. # noqa: E501 :type: str
cons3rt/models/v_cloud_rest_cloud.py
storage_service_protocol
cons3rt/cons3rt-python-sdk
0
python
@storage_service_protocol.setter def storage_service_protocol(self, storage_service_protocol): 'Sets the storage_service_protocol of this VCloudRestCloud.\n\n\n :param storage_service_protocol: The storage_service_protocol of this VCloudRestCloud. # noqa: E501\n :type: str\n ' self._storage_service_protocol = storage_service_protocol
@storage_service_protocol.setter def storage_service_protocol(self, storage_service_protocol): 'Sets the storage_service_protocol of this VCloudRestCloud.\n\n\n :param storage_service_protocol: The storage_service_protocol of this VCloudRestCloud. # noqa: E501\n :type: str\n ' self._storage_service_protocol = storage_service_protocol<|docstring|>Sets the storage_service_protocol of this VCloudRestCloud. :param storage_service_protocol: The storage_service_protocol of this VCloudRestCloud. # noqa: E501 :type: str<|endoftext|>
ed836ea8798d22a3f69bad9eb6ca1757ecf45264eb546cd151687a357173345c
@property def storage_service_username(self): 'Gets the storage_service_username of this VCloudRestCloud. # noqa: E501\n\n\n :return: The storage_service_username of this VCloudRestCloud. # noqa: E501\n :rtype: str\n ' return self._storage_service_username
Gets the storage_service_username of this VCloudRestCloud. # noqa: E501 :return: The storage_service_username of this VCloudRestCloud. # noqa: E501 :rtype: str
cons3rt/models/v_cloud_rest_cloud.py
storage_service_username
cons3rt/cons3rt-python-sdk
0
python
@property def storage_service_username(self): 'Gets the storage_service_username of this VCloudRestCloud. # noqa: E501\n\n\n :return: The storage_service_username of this VCloudRestCloud. # noqa: E501\n :rtype: str\n ' return self._storage_service_username
@property def storage_service_username(self): 'Gets the storage_service_username of this VCloudRestCloud. # noqa: E501\n\n\n :return: The storage_service_username of this VCloudRestCloud. # noqa: E501\n :rtype: str\n ' return self._storage_service_username<|docstring|>Gets the storage_service_username of this VCloudRestCloud. # noqa: E501 :return: The storage_service_username of this VCloudRestCloud. # noqa: E501 :rtype: str<|endoftext|>
ba164f63dff3262ced9798bb3026e3649a170d2ed18115f5708bb0be8c3808e8
@storage_service_username.setter def storage_service_username(self, storage_service_username): 'Sets the storage_service_username of this VCloudRestCloud.\n\n\n :param storage_service_username: The storage_service_username of this VCloudRestCloud. # noqa: E501\n :type: str\n ' self._storage_service_username = storage_service_username
Sets the storage_service_username of this VCloudRestCloud. :param storage_service_username: The storage_service_username of this VCloudRestCloud. # noqa: E501 :type: str
cons3rt/models/v_cloud_rest_cloud.py
storage_service_username
cons3rt/cons3rt-python-sdk
0
python
@storage_service_username.setter def storage_service_username(self, storage_service_username): 'Sets the storage_service_username of this VCloudRestCloud.\n\n\n :param storage_service_username: The storage_service_username of this VCloudRestCloud. # noqa: E501\n :type: str\n ' self._storage_service_username = storage_service_username
@storage_service_username.setter def storage_service_username(self, storage_service_username): 'Sets the storage_service_username of this VCloudRestCloud.\n\n\n :param storage_service_username: The storage_service_username of this VCloudRestCloud. # noqa: E501\n :type: str\n ' self._storage_service_username = storage_service_username<|docstring|>Sets the storage_service_username of this VCloudRestCloud. :param storage_service_username: The storage_service_username of this VCloudRestCloud. # noqa: E501 :type: str<|endoftext|>
512788ca1643ead00980389290fdcb5bad3fcce131125121ec8cf68e4a542313
@property def username(self): 'Gets the username of this VCloudRestCloud. # noqa: E501\n\n\n :return: The username of this VCloudRestCloud. # noqa: E501\n :rtype: str\n ' return self._username
Gets the username of this VCloudRestCloud. # noqa: E501 :return: The username of this VCloudRestCloud. # noqa: E501 :rtype: str
cons3rt/models/v_cloud_rest_cloud.py
username
cons3rt/cons3rt-python-sdk
0
python
@property def username(self): 'Gets the username of this VCloudRestCloud. # noqa: E501\n\n\n :return: The username of this VCloudRestCloud. # noqa: E501\n :rtype: str\n ' return self._username
@property def username(self): 'Gets the username of this VCloudRestCloud. # noqa: E501\n\n\n :return: The username of this VCloudRestCloud. # noqa: E501\n :rtype: str\n ' return self._username<|docstring|>Gets the username of this VCloudRestCloud. # noqa: E501 :return: The username of this VCloudRestCloud. # noqa: E501 :rtype: str<|endoftext|>
121f37305a7a886713f0d177769940f1dbe51b7799380b09fb710357424f632a
@username.setter def username(self, username): 'Sets the username of this VCloudRestCloud.\n\n\n :param username: The username of this VCloudRestCloud. # noqa: E501\n :type: str\n ' if (self.local_vars_configuration.client_side_validation and (username is None)): raise ValueError('Invalid value for `username`, must not be `None`') self._username = username
Sets the username of this VCloudRestCloud. :param username: The username of this VCloudRestCloud. # noqa: E501 :type: str
cons3rt/models/v_cloud_rest_cloud.py
username
cons3rt/cons3rt-python-sdk
0
python
@username.setter def username(self, username): 'Sets the username of this VCloudRestCloud.\n\n\n :param username: The username of this VCloudRestCloud. # noqa: E501\n :type: str\n ' if (self.local_vars_configuration.client_side_validation and (username is None)): raise ValueError('Invalid value for `username`, must not be `None`') self._username = username
@username.setter def username(self, username): 'Sets the username of this VCloudRestCloud.\n\n\n :param username: The username of this VCloudRestCloud. # noqa: E501\n :type: str\n ' if (self.local_vars_configuration.client_side_validation and (username is None)): raise ValueError('Invalid value for `username`, must not be `None`') self._username = username<|docstring|>Sets the username of this VCloudRestCloud. :param username: The username of this VCloudRestCloud. # noqa: E501 :type: str<|endoftext|>
4475fa8d40c383e06258c9e2f9c46ebdb40fd277495ced28dcdfe085cffacc48
@property def vsphere_api_version(self): 'Gets the vsphere_api_version of this VCloudRestCloud. # noqa: E501\n\n\n :return: The vsphere_api_version of this VCloudRestCloud. # noqa: E501\n :rtype: str\n ' return self._vsphere_api_version
Gets the vsphere_api_version of this VCloudRestCloud. # noqa: E501 :return: The vsphere_api_version of this VCloudRestCloud. # noqa: E501 :rtype: str
cons3rt/models/v_cloud_rest_cloud.py
vsphere_api_version
cons3rt/cons3rt-python-sdk
0
python
@property def vsphere_api_version(self): 'Gets the vsphere_api_version of this VCloudRestCloud. # noqa: E501\n\n\n :return: The vsphere_api_version of this VCloudRestCloud. # noqa: E501\n :rtype: str\n ' return self._vsphere_api_version
@property def vsphere_api_version(self): 'Gets the vsphere_api_version of this VCloudRestCloud. # noqa: E501\n\n\n :return: The vsphere_api_version of this VCloudRestCloud. # noqa: E501\n :rtype: str\n ' return self._vsphere_api_version<|docstring|>Gets the vsphere_api_version of this VCloudRestCloud. # noqa: E501 :return: The vsphere_api_version of this VCloudRestCloud. # noqa: E501 :rtype: str<|endoftext|>
24a89ada2df1ff4c788ac10bc381b4e600d318cd935e504cd4592e7c7c5d1a48
@vsphere_api_version.setter def vsphere_api_version(self, vsphere_api_version): 'Sets the vsphere_api_version of this VCloudRestCloud.\n\n\n :param vsphere_api_version: The vsphere_api_version of this VCloudRestCloud. # noqa: E501\n :type: str\n ' if (self.local_vars_configuration.client_side_validation and (vsphere_api_version is not None) and (len(vsphere_api_version) > 6)): raise ValueError('Invalid value for `vsphere_api_version`, length must be less than or equal to `6`') if (self.local_vars_configuration.client_side_validation and (vsphere_api_version is not None) and (len(vsphere_api_version) < 1)): raise ValueError('Invalid value for `vsphere_api_version`, length must be greater than or equal to `1`') self._vsphere_api_version = vsphere_api_version
Sets the vsphere_api_version of this VCloudRestCloud. :param vsphere_api_version: The vsphere_api_version of this VCloudRestCloud. # noqa: E501 :type: str
cons3rt/models/v_cloud_rest_cloud.py
vsphere_api_version
cons3rt/cons3rt-python-sdk
0
python
@vsphere_api_version.setter def vsphere_api_version(self, vsphere_api_version): 'Sets the vsphere_api_version of this VCloudRestCloud.\n\n\n :param vsphere_api_version: The vsphere_api_version of this VCloudRestCloud. # noqa: E501\n :type: str\n ' if (self.local_vars_configuration.client_side_validation and (vsphere_api_version is not None) and (len(vsphere_api_version) > 6)): raise ValueError('Invalid value for `vsphere_api_version`, length must be less than or equal to `6`') if (self.local_vars_configuration.client_side_validation and (vsphere_api_version is not None) and (len(vsphere_api_version) < 1)): raise ValueError('Invalid value for `vsphere_api_version`, length must be greater than or equal to `1`') self._vsphere_api_version = vsphere_api_version
@vsphere_api_version.setter def vsphere_api_version(self, vsphere_api_version): 'Sets the vsphere_api_version of this VCloudRestCloud.\n\n\n :param vsphere_api_version: The vsphere_api_version of this VCloudRestCloud. # noqa: E501\n :type: str\n ' if (self.local_vars_configuration.client_side_validation and (vsphere_api_version is not None) and (len(vsphere_api_version) > 6)): raise ValueError('Invalid value for `vsphere_api_version`, length must be less than or equal to `6`') if (self.local_vars_configuration.client_side_validation and (vsphere_api_version is not None) and (len(vsphere_api_version) < 1)): raise ValueError('Invalid value for `vsphere_api_version`, length must be greater than or equal to `1`') self._vsphere_api_version = vsphere_api_version<|docstring|>Sets the vsphere_api_version of this VCloudRestCloud. :param vsphere_api_version: The vsphere_api_version of this VCloudRestCloud. # noqa: E501 :type: str<|endoftext|>
5374a99ebae71e16277de35f589145cb317cbded3fa6616ff9cfbc6b1c138d4a
@property def vsphere_host(self): 'Gets the vsphere_host of this VCloudRestCloud. # noqa: E501\n\n\n :return: The vsphere_host of this VCloudRestCloud. # noqa: E501\n :rtype: str\n ' return self._vsphere_host
Gets the vsphere_host of this VCloudRestCloud. # noqa: E501 :return: The vsphere_host of this VCloudRestCloud. # noqa: E501 :rtype: str
cons3rt/models/v_cloud_rest_cloud.py
vsphere_host
cons3rt/cons3rt-python-sdk
0
python
@property def vsphere_host(self): 'Gets the vsphere_host of this VCloudRestCloud. # noqa: E501\n\n\n :return: The vsphere_host of this VCloudRestCloud. # noqa: E501\n :rtype: str\n ' return self._vsphere_host
@property def vsphere_host(self): 'Gets the vsphere_host of this VCloudRestCloud. # noqa: E501\n\n\n :return: The vsphere_host of this VCloudRestCloud. # noqa: E501\n :rtype: str\n ' return self._vsphere_host<|docstring|>Gets the vsphere_host of this VCloudRestCloud. # noqa: E501 :return: The vsphere_host of this VCloudRestCloud. # noqa: E501 :rtype: str<|endoftext|>
c717f55584408d4b3221d9468e32d65e1bc258a7b537b92bbc07f6a88284c8d1
@vsphere_host.setter def vsphere_host(self, vsphere_host): 'Sets the vsphere_host of this VCloudRestCloud.\n\n\n :param vsphere_host: The vsphere_host of this VCloudRestCloud. # noqa: E501\n :type: str\n ' self._vsphere_host = vsphere_host
Sets the vsphere_host of this VCloudRestCloud. :param vsphere_host: The vsphere_host of this VCloudRestCloud. # noqa: E501 :type: str
cons3rt/models/v_cloud_rest_cloud.py
vsphere_host
cons3rt/cons3rt-python-sdk
0
python
@vsphere_host.setter def vsphere_host(self, vsphere_host): 'Sets the vsphere_host of this VCloudRestCloud.\n\n\n :param vsphere_host: The vsphere_host of this VCloudRestCloud. # noqa: E501\n :type: str\n ' self._vsphere_host = vsphere_host
@vsphere_host.setter def vsphere_host(self, vsphere_host): 'Sets the vsphere_host of this VCloudRestCloud.\n\n\n :param vsphere_host: The vsphere_host of this VCloudRestCloud. # noqa: E501\n :type: str\n ' self._vsphere_host = vsphere_host<|docstring|>Sets the vsphere_host of this VCloudRestCloud. :param vsphere_host: The vsphere_host of this VCloudRestCloud. # noqa: E501 :type: str<|endoftext|>
88e74ba67ad2e85c349a810dc265074e76124002e35d6764fbcd2d2513b6c1eb
@property def vsphere_port(self): 'Gets the vsphere_port of this VCloudRestCloud. # noqa: E501\n\n\n :return: The vsphere_port of this VCloudRestCloud. # noqa: E501\n :rtype: int\n ' return self._vsphere_port
Gets the vsphere_port of this VCloudRestCloud. # noqa: E501 :return: The vsphere_port of this VCloudRestCloud. # noqa: E501 :rtype: int
cons3rt/models/v_cloud_rest_cloud.py
vsphere_port
cons3rt/cons3rt-python-sdk
0
python
@property def vsphere_port(self): 'Gets the vsphere_port of this VCloudRestCloud. # noqa: E501\n\n\n :return: The vsphere_port of this VCloudRestCloud. # noqa: E501\n :rtype: int\n ' return self._vsphere_port
@property def vsphere_port(self): 'Gets the vsphere_port of this VCloudRestCloud. # noqa: E501\n\n\n :return: The vsphere_port of this VCloudRestCloud. # noqa: E501\n :rtype: int\n ' return self._vsphere_port<|docstring|>Gets the vsphere_port of this VCloudRestCloud. # noqa: E501 :return: The vsphere_port of this VCloudRestCloud. # noqa: E501 :rtype: int<|endoftext|>
279bf8720a0051233cd8302969a89790c8e6dc80f142cbbe3df01ea0e9e380be
@vsphere_port.setter def vsphere_port(self, vsphere_port): 'Sets the vsphere_port of this VCloudRestCloud.\n\n\n :param vsphere_port: The vsphere_port of this VCloudRestCloud. # noqa: E501\n :type: int\n ' self._vsphere_port = vsphere_port
Sets the vsphere_port of this VCloudRestCloud. :param vsphere_port: The vsphere_port of this VCloudRestCloud. # noqa: E501 :type: int
cons3rt/models/v_cloud_rest_cloud.py
vsphere_port
cons3rt/cons3rt-python-sdk
0
python
@vsphere_port.setter def vsphere_port(self, vsphere_port): 'Sets the vsphere_port of this VCloudRestCloud.\n\n\n :param vsphere_port: The vsphere_port of this VCloudRestCloud. # noqa: E501\n :type: int\n ' self._vsphere_port = vsphere_port
@vsphere_port.setter def vsphere_port(self, vsphere_port): 'Sets the vsphere_port of this VCloudRestCloud.\n\n\n :param vsphere_port: The vsphere_port of this VCloudRestCloud. # noqa: E501\n :type: int\n ' self._vsphere_port = vsphere_port<|docstring|>Sets the vsphere_port of this VCloudRestCloud. :param vsphere_port: The vsphere_port of this VCloudRestCloud. # noqa: E501 :type: int<|endoftext|>
5a4e41bb6a0def746593298cb605df98f1366e957c4ca89b12010ea7db707963
def to_dict(self): 'Returns the model properties as a dict' result = {} for (attr, _) in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
Returns the model properties as a dict
cons3rt/models/v_cloud_rest_cloud.py
to_dict
cons3rt/cons3rt-python-sdk
0
python
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result<|docstring|>Returns the model properties as a dict<|endoftext|>
cbb19eaa2fc8a113d9e32f924ef280a7e97563f8915f94f65dab438997af2e99
def to_str(self): 'Returns the string representation of the model' return pprint.pformat(self.to_dict())
Returns the string representation of the model
cons3rt/models/v_cloud_rest_cloud.py
to_str
cons3rt/cons3rt-python-sdk
0
python
def to_str(self): return pprint.pformat(self.to_dict())
def to_str(self): return pprint.pformat(self.to_dict())<|docstring|>Returns the string representation of the model<|endoftext|>
772243a2c2b3261a9b954d07aaf295e3c1242a579a495e2d6a5679c677861703
def __repr__(self): 'For `print` and `pprint`' return self.to_str()
For `print` and `pprint`
cons3rt/models/v_cloud_rest_cloud.py
__repr__
cons3rt/cons3rt-python-sdk
0
python
def __repr__(self): return self.to_str()
def __repr__(self): return self.to_str()<|docstring|>For `print` and `pprint`<|endoftext|>
75c9485b0d88c948840ea411e5f3404ed1b67d735bd968bd5cb549c6382ee480
def __eq__(self, other): 'Returns true if both objects are equal' if (not isinstance(other, VCloudRestCloud)): return False return (self.to_dict() == other.to_dict())
Returns true if both objects are equal
cons3rt/models/v_cloud_rest_cloud.py
__eq__
cons3rt/cons3rt-python-sdk
0
python
def __eq__(self, other): if (not isinstance(other, VCloudRestCloud)): return False return (self.to_dict() == other.to_dict())
def __eq__(self, other): if (not isinstance(other, VCloudRestCloud)): return False return (self.to_dict() == other.to_dict())<|docstring|>Returns true if both objects are equal<|endoftext|>
315657c2974adf5a6512059304866be1f46fb6bef28122f7f6b9d91d4edb464d
def __ne__(self, other): 'Returns true if both objects are not equal' if (not isinstance(other, VCloudRestCloud)): return True return (self.to_dict() != other.to_dict())
Returns true if both objects are not equal
cons3rt/models/v_cloud_rest_cloud.py
__ne__
cons3rt/cons3rt-python-sdk
0
python
def __ne__(self, other): if (not isinstance(other, VCloudRestCloud)): return True return (self.to_dict() != other.to_dict())
def __ne__(self, other): if (not isinstance(other, VCloudRestCloud)): return True return (self.to_dict() != other.to_dict())<|docstring|>Returns true if both objects are not equal<|endoftext|>
0f21fba14581c5a1234a4c1755980ff400da8b234df474dd1a48091aa5b8e4bd
@event('manager.daemon.started') @event('manager.config_updated') def setup_scheduler(manager): 'Starts, stops or restarts the scheduler when config changes.' if (not manager.is_daemon): return scheduler = Scheduler(manager) if scheduler.is_alive(): scheduler.stop() if manager.config.get('schedules', True): scheduler.start()
Starts, stops or restarts the scheduler when config changes.
flexget/plugins/daemon/scheduler.py
setup_scheduler
fcharlier/Flexget
0
python
@event('manager.daemon.started') @event('manager.config_updated') def setup_scheduler(manager): if (not manager.is_daemon): return scheduler = Scheduler(manager) if scheduler.is_alive(): scheduler.stop() if manager.config.get('schedules', True): scheduler.start()
@event('manager.daemon.started') @event('manager.config_updated') def setup_scheduler(manager): if (not manager.is_daemon): return scheduler = Scheduler(manager) if scheduler.is_alive(): scheduler.stop() if manager.config.get('schedules', True): scheduler.start()<|docstring|>Starts, stops or restarts the scheduler when config changes.<|endoftext|>
4cee429c4d167d0bf6e4538b6b76825ed334b801dfcde71be2f8e3968b4f4625
def load_schedules(self): 'Clears current schedules and loads them from the config.' with self.triggers_lock: self.triggers = [] if ('schedules' not in self.manager.config): log.info('No schedules defined in config. Defaulting to run all tasks on a 1 hour interval.') for item in self.manager.config.get('schedules', [{'tasks': ['*'], 'interval': {'hours': 1}}]): tasks = item['tasks'] if (not isinstance(tasks, list)): tasks = [tasks] self.triggers.append(Trigger(item['interval'], tasks, options={'cron': True}))
Clears current schedules and loads them from the config.
flexget/plugins/daemon/scheduler.py
load_schedules
fcharlier/Flexget
0
python
def load_schedules(self): with self.triggers_lock: self.triggers = [] if ('schedules' not in self.manager.config): log.info('No schedules defined in config. Defaulting to run all tasks on a 1 hour interval.') for item in self.manager.config.get('schedules', [{'tasks': ['*'], 'interval': {'hours': 1}}]): tasks = item['tasks'] if (not isinstance(tasks, list)): tasks = [tasks] self.triggers.append(Trigger(item['interval'], tasks, options={'cron': True}))
def load_schedules(self): with self.triggers_lock: self.triggers = [] if ('schedules' not in self.manager.config): log.info('No schedules defined in config. Defaulting to run all tasks on a 1 hour interval.') for item in self.manager.config.get('schedules', [{'tasks': ['*'], 'interval': {'hours': 1}}]): tasks = item['tasks'] if (not isinstance(tasks, list)): tasks = [tasks] self.triggers.append(Trigger(item['interval'], tasks, options={'cron': True}))<|docstring|>Clears current schedules and loads them from the config.<|endoftext|>
7fa49bb445ff3f6c722707b1f2ea918f23dc19e10c737e5b75d631de59bc9dff
def __init__(self, interval, tasks, options=None): '\n :param dict interval: An interval dictionary from the config.\n :param list tasks: List of task names specified to run. Wildcards are allowed.\n :param dict options: Dictionary of options that should be applied to this run.\n ' self.tasks = tasks self.options = options self.unit = None self.amount = None self.on_day = None self.at_time = None self.last_run = None self.run_at = None self.interval = interval self._get_db_last_run() self.schedule_next_run()
:param dict interval: An interval dictionary from the config. :param list tasks: List of task names specified to run. Wildcards are allowed. :param dict options: Dictionary of options that should be applied to this run.
flexget/plugins/daemon/scheduler.py
__init__
fcharlier/Flexget
0
python
def __init__(self, interval, tasks, options=None): '\n :param dict interval: An interval dictionary from the config.\n :param list tasks: List of task names specified to run. Wildcards are allowed.\n :param dict options: Dictionary of options that should be applied to this run.\n ' self.tasks = tasks self.options = options self.unit = None self.amount = None self.on_day = None self.at_time = None self.last_run = None self.run_at = None self.interval = interval self._get_db_last_run() self.schedule_next_run()
def __init__(self, interval, tasks, options=None): '\n :param dict interval: An interval dictionary from the config.\n :param list tasks: List of task names specified to run. Wildcards are allowed.\n :param dict options: Dictionary of options that should be applied to this run.\n ' self.tasks = tasks self.options = options self.unit = None self.amount = None self.on_day = None self.at_time = None self.last_run = None self.run_at = None self.interval = interval self._get_db_last_run() self.schedule_next_run()<|docstring|>:param dict interval: An interval dictionary from the config. :param list tasks: List of task names specified to run. Wildcards are allowed. :param dict options: Dictionary of options that should be applied to this run.<|endoftext|>
522c1dbffbeaf8318db5a928f82e2ab38b8576549c220fb735a107639f53e904
def trigger(self): 'Call when trigger is activated. Records current run time and schedules next run.' self.last_run = datetime.now() self._set_db_last_run() self.schedule_next_run()
Call when trigger is activated. Records current run time and schedules next run.
flexget/plugins/daemon/scheduler.py
trigger
fcharlier/Flexget
0
python
def trigger(self): self.last_run = datetime.now() self._set_db_last_run() self.schedule_next_run()
def trigger(self): self.last_run = datetime.now() self._set_db_last_run() self.schedule_next_run()<|docstring|>Call when trigger is activated. Records current run time and schedules next run.<|endoftext|>
902f6f33e058b1eabb8d03e4e85c760eb878fe9f39abcc5beb181fefecb4e638
def __hash__(self): 'A unique id which describes this trigger.' return hash((tuple(sorted(self.interval.iteritems())) + tuple(sorted(self.tasks))))
A unique id which describes this trigger.
flexget/plugins/daemon/scheduler.py
__hash__
fcharlier/Flexget
0
python
def __hash__(self): return hash((tuple(sorted(self.interval.iteritems())) + tuple(sorted(self.tasks))))
def __hash__(self): return hash((tuple(sorted(self.interval.iteritems())) + tuple(sorted(self.tasks))))<|docstring|>A unique id which describes this trigger.<|endoftext|>
48196dd13abf47b136970e2fe1616407526b7e94d16482bc6b72e4d036051811
def alphanum_key(s): ' Turn a string into a list of string and number chunks.\n "z23a" -> ["z", 23, "a"]\n ' return [tryint(c) for c in re.split('([0-9]+)', s)]
Turn a string into a list of string and number chunks. "z23a" -> ["z", 23, "a"]
scripts/benchmark_kcl/merge_hemis.py
alphanum_key
amiralansary/BrainSurfaceTK
0
python
def alphanum_key(s): ' Turn a string into a list of string and number chunks.\n "z23a" -> ["z", 23, "a"]\n ' return [tryint(c) for c in re.split('([0-9]+)', s)]
def alphanum_key(s): ' Turn a string into a list of string and number chunks.\n "z23a" -> ["z", 23, "a"]\n ' return [tryint(c) for c in re.split('([0-9]+)', s)]<|docstring|>Turn a string into a list of string and number chunks. "z23a" -> ["z", 23, "a"]<|endoftext|>
4abfa0763f4c698d3a23904e5b2affffd1dcdb4eef513292e21aba5463df9756
def make_dirs(path): 'make a new directory if path does not exist' if os.path.exists(path): return os.makedirs(path)
make a new directory if path does not exist
scripts/benchmark_kcl/merge_hemis.py
make_dirs
amiralansary/BrainSurfaceTK
0
python
def make_dirs(path): if os.path.exists(path): return os.makedirs(path)
def make_dirs(path): if os.path.exists(path): return os.makedirs(path)<|docstring|>make a new directory if path does not exist<|endoftext|>
a3dc7b5997abec29f84a1585fdb63d65c742d94ac8754710c1ea438da818043f
def decimate_surface(target_file, reduce_by, save_dir, surf, tags='', ext='.vtk'): '\n :param ext:\n :param tag:\n :param surf:\n :param reduce_by:\n :param save_path:\n :param target_file:\n :type surfaces: object\n ' filename = target_file.split('/')[(- 1)][:(- 4)] print((((20 * '-') + filename) + (20 * '-'))) for (i, tag) in enumerate(tags): save_reduce_path = os.path.join(save_dir, ('reducedto_' + tag)) output_dir = os.path.join(save_reduce_path, surf, ext[1:]) make_dirs(output_dir) output_file = os.path.join(output_dir, (((filename + '_') + tag) + ext)) cmd = (((((('mirtk decimate-surface ' + target_file) + ' ') + output_file) + ' -reduceby ') + str(reduce_by[i])) + ' -preservetopology 1 -splitangle 75') print((((10 * '-') + ' decimate-surface ') + (10 * '-'))) print(cmd) os.system(cmd) print('--')
:param ext: :param tag: :param surf: :param reduce_by: :param save_path: :param target_file: :type surfaces: object
scripts/benchmark_kcl/merge_hemis.py
decimate_surface
amiralansary/BrainSurfaceTK
0
python
def decimate_surface(target_file, reduce_by, save_dir, surf, tags=, ext='.vtk'): '\n :param ext:\n :param tag:\n :param surf:\n :param reduce_by:\n :param save_path:\n :param target_file:\n :type surfaces: object\n ' filename = target_file.split('/')[(- 1)][:(- 4)] print((((20 * '-') + filename) + (20 * '-'))) for (i, tag) in enumerate(tags): save_reduce_path = os.path.join(save_dir, ('reducedto_' + tag)) output_dir = os.path.join(save_reduce_path, surf, ext[1:]) make_dirs(output_dir) output_file = os.path.join(output_dir, (((filename + '_') + tag) + ext)) cmd = (((((('mirtk decimate-surface ' + target_file) + ' ') + output_file) + ' -reduceby ') + str(reduce_by[i])) + ' -preservetopology 1 -splitangle 75') print((((10 * '-') + ' decimate-surface ') + (10 * '-'))) print(cmd) os.system(cmd) print('--')
def decimate_surface(target_file, reduce_by, save_dir, surf, tags=, ext='.vtk'): '\n :param ext:\n :param tag:\n :param surf:\n :param reduce_by:\n :param save_path:\n :param target_file:\n :type surfaces: object\n ' filename = target_file.split('/')[(- 1)][:(- 4)] print((((20 * '-') + filename) + (20 * '-'))) for (i, tag) in enumerate(tags): save_reduce_path = os.path.join(save_dir, ('reducedto_' + tag)) output_dir = os.path.join(save_reduce_path, surf, ext[1:]) make_dirs(output_dir) output_file = os.path.join(output_dir, (((filename + '_') + tag) + ext)) cmd = (((((('mirtk decimate-surface ' + target_file) + ' ') + output_file) + ' -reduceby ') + str(reduce_by[i])) + ' -preservetopology 1 -splitangle 75') print((((10 * '-') + ' decimate-surface ') + (10 * '-'))) print(cmd) os.system(cmd) print('--')<|docstring|>:param ext: :param tag: :param surf: :param reduce_by: :param save_path: :param target_file: :type surfaces: object<|endoftext|>
0562cebec081580930e2240b6f687f718025d02fc919626b6dba4a84c2a55ee0
def make_image_key(video_id, timestamp): 'Returns a unique identifier for a video id & timestamp.' return ('%s,%04d' % (video_id, int(timestamp)))
Returns a unique identifier for a video id & timestamp.
evaluation/get_ava_performance_custom.py
make_image_key
oulutan/ActorConditionedAttentionMaps
23
python
def make_image_key(video_id, timestamp): return ('%s,%04d' % (video_id, int(timestamp)))
def make_image_key(video_id, timestamp): return ('%s,%04d' % (video_id, int(timestamp)))<|docstring|>Returns a unique identifier for a video id & timestamp.<|endoftext|>
937cac49154fd19773f83952e2fd70a6d4b649ec921e649da7b1d0aae33fb163
def read_csv(csv_file, class_whitelist=None): 'Loads boxes and class labels from a CSV file in the AVA format.\n\n CSV file format described at https://research.google.com/ava/download.html.\n\n Args:\n csv_file: A file object.\n class_whitelist: If provided, boxes corresponding to (integer) class labels\n not in this set are skipped.\n\n Returns:\n boxes: A dictionary mapping each unique image key (string) to a list of\n boxes, given as coordinates [y1, x1, y2, x2].\n labels: A dictionary mapping each unique image key (string) to a list of\n integer class lables, matching the corresponding box in `boxes`.\n scores: A dictionary mapping each unique image key (string) to a list of\n score values lables, matching the corresponding label in `labels`. If\n scores are not provided in the csv, then they will default to 1.0.\n ' start = time.time() boxes = defaultdict(list) labels = defaultdict(list) scores = defaultdict(list) reader = csv.reader(csv_file) for row in reader: assert (len(row) in [7, 8]), ('Wrong number of columns: ' + row) image_key = make_image_key(row[0], row[1]) (x1, y1, x2, y2) = [float(n) for n in row[2:6]] action_id = int(row[6]) if (class_whitelist and (action_id not in class_whitelist)): continue score = 1.0 if (len(row) == 8): score = float(row[7]) boxes[image_key].append([y1, x1, y2, x2]) labels[image_key].append(action_id) scores[image_key].append(score) print_time(('read file ' + csv_file.name), start) return (boxes, labels, scores)
Loads boxes and class labels from a CSV file in the AVA format. CSV file format described at https://research.google.com/ava/download.html. Args: csv_file: A file object. class_whitelist: If provided, boxes corresponding to (integer) class labels not in this set are skipped. Returns: boxes: A dictionary mapping each unique image key (string) to a list of boxes, given as coordinates [y1, x1, y2, x2]. labels: A dictionary mapping each unique image key (string) to a list of integer class lables, matching the corresponding box in `boxes`. scores: A dictionary mapping each unique image key (string) to a list of score values lables, matching the corresponding label in `labels`. If scores are not provided in the csv, then they will default to 1.0.
evaluation/get_ava_performance_custom.py
read_csv
oulutan/ActorConditionedAttentionMaps
23
python
def read_csv(csv_file, class_whitelist=None): 'Loads boxes and class labels from a CSV file in the AVA format.\n\n CSV file format described at https://research.google.com/ava/download.html.\n\n Args:\n csv_file: A file object.\n class_whitelist: If provided, boxes corresponding to (integer) class labels\n not in this set are skipped.\n\n Returns:\n boxes: A dictionary mapping each unique image key (string) to a list of\n boxes, given as coordinates [y1, x1, y2, x2].\n labels: A dictionary mapping each unique image key (string) to a list of\n integer class lables, matching the corresponding box in `boxes`.\n scores: A dictionary mapping each unique image key (string) to a list of\n score values lables, matching the corresponding label in `labels`. If\n scores are not provided in the csv, then they will default to 1.0.\n ' start = time.time() boxes = defaultdict(list) labels = defaultdict(list) scores = defaultdict(list) reader = csv.reader(csv_file) for row in reader: assert (len(row) in [7, 8]), ('Wrong number of columns: ' + row) image_key = make_image_key(row[0], row[1]) (x1, y1, x2, y2) = [float(n) for n in row[2:6]] action_id = int(row[6]) if (class_whitelist and (action_id not in class_whitelist)): continue score = 1.0 if (len(row) == 8): score = float(row[7]) boxes[image_key].append([y1, x1, y2, x2]) labels[image_key].append(action_id) scores[image_key].append(score) print_time(('read file ' + csv_file.name), start) return (boxes, labels, scores)
def read_csv(csv_file, class_whitelist=None): 'Loads boxes and class labels from a CSV file in the AVA format.\n\n CSV file format described at https://research.google.com/ava/download.html.\n\n Args:\n csv_file: A file object.\n class_whitelist: If provided, boxes corresponding to (integer) class labels\n not in this set are skipped.\n\n Returns:\n boxes: A dictionary mapping each unique image key (string) to a list of\n boxes, given as coordinates [y1, x1, y2, x2].\n labels: A dictionary mapping each unique image key (string) to a list of\n integer class lables, matching the corresponding box in `boxes`.\n scores: A dictionary mapping each unique image key (string) to a list of\n score values lables, matching the corresponding label in `labels`. If\n scores are not provided in the csv, then they will default to 1.0.\n ' start = time.time() boxes = defaultdict(list) labels = defaultdict(list) scores = defaultdict(list) reader = csv.reader(csv_file) for row in reader: assert (len(row) in [7, 8]), ('Wrong number of columns: ' + row) image_key = make_image_key(row[0], row[1]) (x1, y1, x2, y2) = [float(n) for n in row[2:6]] action_id = int(row[6]) if (class_whitelist and (action_id not in class_whitelist)): continue score = 1.0 if (len(row) == 8): score = float(row[7]) boxes[image_key].append([y1, x1, y2, x2]) labels[image_key].append(action_id) scores[image_key].append(score) print_time(('read file ' + csv_file.name), start) return (boxes, labels, scores)<|docstring|>Loads boxes and class labels from a CSV file in the AVA format. CSV file format described at https://research.google.com/ava/download.html. Args: csv_file: A file object. class_whitelist: If provided, boxes corresponding to (integer) class labels not in this set are skipped. Returns: boxes: A dictionary mapping each unique image key (string) to a list of boxes, given as coordinates [y1, x1, y2, x2]. labels: A dictionary mapping each unique image key (string) to a list of integer class lables, matching the corresponding box in `boxes`. scores: A dictionary mapping each unique image key (string) to a list of score values lables, matching the corresponding label in `labels`. If scores are not provided in the csv, then they will default to 1.0.<|endoftext|>
2a4a2597a691ed16e7290fa3b47e8444646dcd9aac8b39fbf54810c662967923
def read_exclusions(exclusions_file): 'Reads a CSV file of excluded timestamps.\n\n Args:\n exclusions_file: A file object containing a csv of video-id,timestamp.\n\n Returns:\n A set of strings containing excluded image keys, e.g. "aaaaaaaaaaa,0904",\n or an empty set if exclusions file is None.\n ' excluded = set() if exclusions_file: reader = csv.reader(exclusions_file) for row in reader: assert (len(row) == 2), ('Expected only 2 columns, got: ' + row) excluded.add(make_image_key(row[0], row[1])) return excluded
Reads a CSV file of excluded timestamps. Args: exclusions_file: A file object containing a csv of video-id,timestamp. Returns: A set of strings containing excluded image keys, e.g. "aaaaaaaaaaa,0904", or an empty set if exclusions file is None.
evaluation/get_ava_performance_custom.py
read_exclusions
oulutan/ActorConditionedAttentionMaps
23
python
def read_exclusions(exclusions_file): 'Reads a CSV file of excluded timestamps.\n\n Args:\n exclusions_file: A file object containing a csv of video-id,timestamp.\n\n Returns:\n A set of strings containing excluded image keys, e.g. "aaaaaaaaaaa,0904",\n or an empty set if exclusions file is None.\n ' excluded = set() if exclusions_file: reader = csv.reader(exclusions_file) for row in reader: assert (len(row) == 2), ('Expected only 2 columns, got: ' + row) excluded.add(make_image_key(row[0], row[1])) return excluded
def read_exclusions(exclusions_file): 'Reads a CSV file of excluded timestamps.\n\n Args:\n exclusions_file: A file object containing a csv of video-id,timestamp.\n\n Returns:\n A set of strings containing excluded image keys, e.g. "aaaaaaaaaaa,0904",\n or an empty set if exclusions file is None.\n ' excluded = set() if exclusions_file: reader = csv.reader(exclusions_file) for row in reader: assert (len(row) == 2), ('Expected only 2 columns, got: ' + row) excluded.add(make_image_key(row[0], row[1])) return excluded<|docstring|>Reads a CSV file of excluded timestamps. Args: exclusions_file: A file object containing a csv of video-id,timestamp. Returns: A set of strings containing excluded image keys, e.g. "aaaaaaaaaaa,0904", or an empty set if exclusions file is None.<|endoftext|>
ca8ea7540e95784ab4a37a5acee59b39444f633385ee8a82bd35663bfcddee72
def read_labelmap(labelmap_file): 'Reads a labelmap without the dependency on protocol buffers.\n\n Args:\n labelmap_file: A file object containing a label map protocol buffer.\n\n Returns:\n labelmap: The label map in the form used by the object_detection_evaluation\n module - a list of {"id": integer, "name": classname } dicts.\n class_ids: A set containing all of the valid class id integers.\n ' labelmap = [] class_ids = set() name = '' class_id = '' for line in labelmap_file: if line.startswith(' name:'): name = line.split('"')[1] elif (line.startswith(' id:') or line.startswith(' label_id:')): class_id = int(line.strip().split(' ')[(- 1)]) labelmap.append({'id': class_id, 'name': name}) class_ids.add(class_id) return (labelmap, class_ids)
Reads a labelmap without the dependency on protocol buffers. Args: labelmap_file: A file object containing a label map protocol buffer. Returns: labelmap: The label map in the form used by the object_detection_evaluation module - a list of {"id": integer, "name": classname } dicts. class_ids: A set containing all of the valid class id integers.
evaluation/get_ava_performance_custom.py
read_labelmap
oulutan/ActorConditionedAttentionMaps
23
python
def read_labelmap(labelmap_file): 'Reads a labelmap without the dependency on protocol buffers.\n\n Args:\n labelmap_file: A file object containing a label map protocol buffer.\n\n Returns:\n labelmap: The label map in the form used by the object_detection_evaluation\n module - a list of {"id": integer, "name": classname } dicts.\n class_ids: A set containing all of the valid class id integers.\n ' labelmap = [] class_ids = set() name = class_id = for line in labelmap_file: if line.startswith(' name:'): name = line.split('"')[1] elif (line.startswith(' id:') or line.startswith(' label_id:')): class_id = int(line.strip().split(' ')[(- 1)]) labelmap.append({'id': class_id, 'name': name}) class_ids.add(class_id) return (labelmap, class_ids)
def read_labelmap(labelmap_file): 'Reads a labelmap without the dependency on protocol buffers.\n\n Args:\n labelmap_file: A file object containing a label map protocol buffer.\n\n Returns:\n labelmap: The label map in the form used by the object_detection_evaluation\n module - a list of {"id": integer, "name": classname } dicts.\n class_ids: A set containing all of the valid class id integers.\n ' labelmap = [] class_ids = set() name = class_id = for line in labelmap_file: if line.startswith(' name:'): name = line.split('"')[1] elif (line.startswith(' id:') or line.startswith(' label_id:')): class_id = int(line.strip().split(' ')[(- 1)]) labelmap.append({'id': class_id, 'name': name}) class_ids.add(class_id) return (labelmap, class_ids)<|docstring|>Reads a labelmap without the dependency on protocol buffers. Args: labelmap_file: A file object containing a label map protocol buffer. Returns: labelmap: The label map in the form used by the object_detection_evaluation module - a list of {"id": integer, "name": classname } dicts. class_ids: A set containing all of the valid class id integers.<|endoftext|>
1705f6755381a1ae538aa1c7bf86a5c8120a3de672eda916d57e18d30072033f
def run_evaluation(labelmap, groundtruth, detections, exclusions): 'Runs evaluations given input files.\n\n Args:\n labelmap: file object containing map of labels to consider, in pbtxt format\n groundtruth: file object\n detections: file object\n exclusions: file object or None.\n ' (categories, class_whitelist) = read_labelmap(labelmap) logging.info('CATEGORIES (%d):\n%s', len(categories), pprint.pformat(categories, indent=2)) excluded_keys = read_exclusions(exclusions) pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator(categories) (boxes, labels, _) = read_csv(groundtruth, class_whitelist) start = time.time() for image_key in boxes: if (image_key in excluded_keys): logging.info('Found excluded timestamp in ground truth: %s. It will be ignored.', image_key) continue pascal_evaluator.add_single_ground_truth_image_info(image_key, {standard_fields.InputDataFields.groundtruth_boxes: np.array(boxes[image_key], dtype=float), standard_fields.InputDataFields.groundtruth_classes: np.array(labels[image_key], dtype=int), standard_fields.InputDataFields.groundtruth_difficult: np.zeros(len(boxes[image_key]), dtype=bool)}) print_time('convert groundtruth', start) (boxes, labels, scores) = read_csv(detections, class_whitelist) start = time.time() for image_key in boxes: if (image_key in excluded_keys): logging.info('Found excluded timestamp in detections: %s. It will be ignored.', image_key) continue pascal_evaluator.add_single_detected_image_info(image_key, {standard_fields.DetectionResultFields.detection_boxes: np.array(boxes[image_key], dtype=float), standard_fields.DetectionResultFields.detection_classes: np.array(labels[image_key], dtype=int), standard_fields.DetectionResultFields.detection_scores: np.array(scores[image_key], dtype=float)}) print_time('convert detections', start) start = time.time() metrics = pascal_evaluator.evaluate() print_time('run_evaluator', start) return metrics
Runs evaluations given input files. Args: labelmap: file object containing map of labels to consider, in pbtxt format groundtruth: file object detections: file object exclusions: file object or None.
evaluation/get_ava_performance_custom.py
run_evaluation
oulutan/ActorConditionedAttentionMaps
23
python
def run_evaluation(labelmap, groundtruth, detections, exclusions): 'Runs evaluations given input files.\n\n Args:\n labelmap: file object containing map of labels to consider, in pbtxt format\n groundtruth: file object\n detections: file object\n exclusions: file object or None.\n ' (categories, class_whitelist) = read_labelmap(labelmap) logging.info('CATEGORIES (%d):\n%s', len(categories), pprint.pformat(categories, indent=2)) excluded_keys = read_exclusions(exclusions) pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator(categories) (boxes, labels, _) = read_csv(groundtruth, class_whitelist) start = time.time() for image_key in boxes: if (image_key in excluded_keys): logging.info('Found excluded timestamp in ground truth: %s. It will be ignored.', image_key) continue pascal_evaluator.add_single_ground_truth_image_info(image_key, {standard_fields.InputDataFields.groundtruth_boxes: np.array(boxes[image_key], dtype=float), standard_fields.InputDataFields.groundtruth_classes: np.array(labels[image_key], dtype=int), standard_fields.InputDataFields.groundtruth_difficult: np.zeros(len(boxes[image_key]), dtype=bool)}) print_time('convert groundtruth', start) (boxes, labels, scores) = read_csv(detections, class_whitelist) start = time.time() for image_key in boxes: if (image_key in excluded_keys): logging.info('Found excluded timestamp in detections: %s. It will be ignored.', image_key) continue pascal_evaluator.add_single_detected_image_info(image_key, {standard_fields.DetectionResultFields.detection_boxes: np.array(boxes[image_key], dtype=float), standard_fields.DetectionResultFields.detection_classes: np.array(labels[image_key], dtype=int), standard_fields.DetectionResultFields.detection_scores: np.array(scores[image_key], dtype=float)}) print_time('convert detections', start) start = time.time() metrics = pascal_evaluator.evaluate() print_time('run_evaluator', start) return metrics
def run_evaluation(labelmap, groundtruth, detections, exclusions): 'Runs evaluations given input files.\n\n Args:\n labelmap: file object containing map of labels to consider, in pbtxt format\n groundtruth: file object\n detections: file object\n exclusions: file object or None.\n ' (categories, class_whitelist) = read_labelmap(labelmap) logging.info('CATEGORIES (%d):\n%s', len(categories), pprint.pformat(categories, indent=2)) excluded_keys = read_exclusions(exclusions) pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator(categories) (boxes, labels, _) = read_csv(groundtruth, class_whitelist) start = time.time() for image_key in boxes: if (image_key in excluded_keys): logging.info('Found excluded timestamp in ground truth: %s. It will be ignored.', image_key) continue pascal_evaluator.add_single_ground_truth_image_info(image_key, {standard_fields.InputDataFields.groundtruth_boxes: np.array(boxes[image_key], dtype=float), standard_fields.InputDataFields.groundtruth_classes: np.array(labels[image_key], dtype=int), standard_fields.InputDataFields.groundtruth_difficult: np.zeros(len(boxes[image_key]), dtype=bool)}) print_time('convert groundtruth', start) (boxes, labels, scores) = read_csv(detections, class_whitelist) start = time.time() for image_key in boxes: if (image_key in excluded_keys): logging.info('Found excluded timestamp in detections: %s. It will be ignored.', image_key) continue pascal_evaluator.add_single_detected_image_info(image_key, {standard_fields.DetectionResultFields.detection_boxes: np.array(boxes[image_key], dtype=float), standard_fields.DetectionResultFields.detection_classes: np.array(labels[image_key], dtype=int), standard_fields.DetectionResultFields.detection_scores: np.array(scores[image_key], dtype=float)}) print_time('convert detections', start) start = time.time() metrics = pascal_evaluator.evaluate() print_time('run_evaluator', start) return metrics<|docstring|>Runs evaluations given input files. Args: labelmap: file object containing map of labels to consider, in pbtxt format groundtruth: file object detections: file object exclusions: file object or None.<|endoftext|>
16fc7af2f8adecfa44787cac6fb65da2f310559563ca8724514189cc8487fc18
def parse_arguments(): 'Parses command-line flags.\n\n Returns:\n args: a named tuple containing three file objects args.labelmap,\n args.groundtruth, and args.detections.\n ' parser = argparse.ArgumentParser() parser.add_argument('-l', '--labelmap', help='Filename of label map', type=argparse.FileType('r'), default='ava/ava_action_list_v2.1_for_activitynet_2018.pbtxt.txt') parser.add_argument('-g', '--groundtruth', help='CSV file containing ground truth.', type=argparse.FileType('r'), required=True) parser.add_argument('-d', '--detections', help='CSV file containing inferred action detections.', type=argparse.FileType('r'), required=True) parser.add_argument('-e', '--exclusions', help='Optional CSV file containing videoid,timestamp pairs to exclude from evaluation.', type=argparse.FileType('r'), required=False) return parser.parse_args()
Parses command-line flags. Returns: args: a named tuple containing three file objects args.labelmap, args.groundtruth, and args.detections.
evaluation/get_ava_performance_custom.py
parse_arguments
oulutan/ActorConditionedAttentionMaps
23
python
def parse_arguments(): 'Parses command-line flags.\n\n Returns:\n args: a named tuple containing three file objects args.labelmap,\n args.groundtruth, and args.detections.\n ' parser = argparse.ArgumentParser() parser.add_argument('-l', '--labelmap', help='Filename of label map', type=argparse.FileType('r'), default='ava/ava_action_list_v2.1_for_activitynet_2018.pbtxt.txt') parser.add_argument('-g', '--groundtruth', help='CSV file containing ground truth.', type=argparse.FileType('r'), required=True) parser.add_argument('-d', '--detections', help='CSV file containing inferred action detections.', type=argparse.FileType('r'), required=True) parser.add_argument('-e', '--exclusions', help='Optional CSV file containing videoid,timestamp pairs to exclude from evaluation.', type=argparse.FileType('r'), required=False) return parser.parse_args()
def parse_arguments(): 'Parses command-line flags.\n\n Returns:\n args: a named tuple containing three file objects args.labelmap,\n args.groundtruth, and args.detections.\n ' parser = argparse.ArgumentParser() parser.add_argument('-l', '--labelmap', help='Filename of label map', type=argparse.FileType('r'), default='ava/ava_action_list_v2.1_for_activitynet_2018.pbtxt.txt') parser.add_argument('-g', '--groundtruth', help='CSV file containing ground truth.', type=argparse.FileType('r'), required=True) parser.add_argument('-d', '--detections', help='CSV file containing inferred action detections.', type=argparse.FileType('r'), required=True) parser.add_argument('-e', '--exclusions', help='Optional CSV file containing videoid,timestamp pairs to exclude from evaluation.', type=argparse.FileType('r'), required=False) return parser.parse_args()<|docstring|>Parses command-line flags. Returns: args: a named tuple containing three file objects args.labelmap, args.groundtruth, and args.detections.<|endoftext|>
b0d8987cd35a81ca2b21b485171306ac846ad799761fa416850dbd2655a48b71
def make_instance(self, include_optional): 'Test VehicleResource\n include_option is a boolean, when False only required\n params are included, when True both required and\n optional params are included ' if include_optional: return VehicleResource(type='0', relationships=openapi_client.models.vehicle_resource_relationships.VehicleResource_relationships(trip=openapi_client.models.prediction_resource_relationships_trip.PredictionResource_relationships_trip(links=openapi_client.models.prediction_resource_relationships_trip_links.PredictionResource_relationships_trip_links(self='0', related='0'), data=openapi_client.models.prediction_resource_relationships_trip_data.PredictionResource_relationships_trip_data(type='0', id='0')), stop=openapi_client.models.prediction_resource_relationships_stop.PredictionResource_relationships_stop(), route=openapi_client.models.prediction_resource_relationships_route.PredictionResource_relationships_route()), links=None, id='0', attributes=openapi_client.models.vehicle_resource_attributes.VehicleResource_attributes(updated_at='2017-08-14T16:04:44-04:00', speed=16.0, longitude=42.32941818237305, latitude=(- 71.27239990234375), label='1817', direction_id=56, current_stop_sequence=8, current_status='IN_TRANSIT_TO', bearing=174)) else: return VehicleResource()
Test VehicleResource include_option is a boolean, when False only required params are included, when True both required and optional params are included
test/test_vehicle_resource.py
make_instance
hypostulate/mbta-api-client
0
python
def make_instance(self, include_optional): 'Test VehicleResource\n include_option is a boolean, when False only required\n params are included, when True both required and\n optional params are included ' if include_optional: return VehicleResource(type='0', relationships=openapi_client.models.vehicle_resource_relationships.VehicleResource_relationships(trip=openapi_client.models.prediction_resource_relationships_trip.PredictionResource_relationships_trip(links=openapi_client.models.prediction_resource_relationships_trip_links.PredictionResource_relationships_trip_links(self='0', related='0'), data=openapi_client.models.prediction_resource_relationships_trip_data.PredictionResource_relationships_trip_data(type='0', id='0')), stop=openapi_client.models.prediction_resource_relationships_stop.PredictionResource_relationships_stop(), route=openapi_client.models.prediction_resource_relationships_route.PredictionResource_relationships_route()), links=None, id='0', attributes=openapi_client.models.vehicle_resource_attributes.VehicleResource_attributes(updated_at='2017-08-14T16:04:44-04:00', speed=16.0, longitude=42.32941818237305, latitude=(- 71.27239990234375), label='1817', direction_id=56, current_stop_sequence=8, current_status='IN_TRANSIT_TO', bearing=174)) else: return VehicleResource()
def make_instance(self, include_optional): 'Test VehicleResource\n include_option is a boolean, when False only required\n params are included, when True both required and\n optional params are included ' if include_optional: return VehicleResource(type='0', relationships=openapi_client.models.vehicle_resource_relationships.VehicleResource_relationships(trip=openapi_client.models.prediction_resource_relationships_trip.PredictionResource_relationships_trip(links=openapi_client.models.prediction_resource_relationships_trip_links.PredictionResource_relationships_trip_links(self='0', related='0'), data=openapi_client.models.prediction_resource_relationships_trip_data.PredictionResource_relationships_trip_data(type='0', id='0')), stop=openapi_client.models.prediction_resource_relationships_stop.PredictionResource_relationships_stop(), route=openapi_client.models.prediction_resource_relationships_route.PredictionResource_relationships_route()), links=None, id='0', attributes=openapi_client.models.vehicle_resource_attributes.VehicleResource_attributes(updated_at='2017-08-14T16:04:44-04:00', speed=16.0, longitude=42.32941818237305, latitude=(- 71.27239990234375), label='1817', direction_id=56, current_stop_sequence=8, current_status='IN_TRANSIT_TO', bearing=174)) else: return VehicleResource()<|docstring|>Test VehicleResource include_option is a boolean, when False only required params are included, when True both required and optional params are included<|endoftext|>
702295050ddbf1c7391aaadee35d10e1c8faee3efec6f35aebf7ffbf11f27ee5
def testVehicleResource(self): 'Test VehicleResource' inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True)
Test VehicleResource
test/test_vehicle_resource.py
testVehicleResource
hypostulate/mbta-api-client
0
python
def testVehicleResource(self): inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True)
def testVehicleResource(self): inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True)<|docstring|>Test VehicleResource<|endoftext|>
946bdc3077a04568d1206f7bdb8e303992e74bc3e7d39cf760f887927ef5fe4a
def kf_model(input_val, y_val, fn, fn_kwargs={}, k_folds=5, random_state=15, verbose=False): '\n K-fold task, mean and std of results are calculated over K folds\n \n Params: \n input_val: (np.array) 2-D array holding instances (features) of validation set.\n y_val: (np.array) 1-D array holding y-values for validation set.\n fn: (class) a method used for calibration\n l2: (float) L2 regulariation value.\n k_folds: (int) how many crossvalidation folds are used.\n comp_l2: (bool) use reversed L2 matrix for regulariation (default = False)\n \n returns: \n mean_error, mean_ece, mean_mce, mean_loss, mean_brier, std_loss, std_brier\n ' kf = KFold(n_splits=k_folds, shuffle=True, random_state=random_state) kf_results = [] models = [] for (train_index, test_index) in kf.split(input_val): (X_train_c, X_val_c) = (input_val[train_index], input_val[test_index]) (y_train_c, y_val_c) = (y_val[train_index], y_val[test_index]) t1 = time.time() model = fn(**fn_kwargs) model.fit(X_train_c, y_train_c) print('Model trained:', (time.time() - t1)) probs_holdout = model.predict_proba(X_val_c) (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = evaluate_rip(probs_holdout, y_val_c, verbose=False) kf_results.append([error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier]) models.append(model) return (models, np.mean(kf_results, axis=0))
K-fold task, mean and std of results are calculated over K folds Params: input_val: (np.array) 2-D array holding instances (features) of validation set. y_val: (np.array) 1-D array holding y-values for validation set. fn: (class) a method used for calibration l2: (float) L2 regulariation value. k_folds: (int) how many crossvalidation folds are used. comp_l2: (bool) use reversed L2 matrix for regulariation (default = False) returns: mean_error, mean_ece, mean_mce, mean_loss, mean_brier, std_loss, std_brier
Confidence_Calibration/calibration/calibration_functions.py
kf_model
heatherwan/Automatic-Validation-of-Simulation-Results
0
python
def kf_model(input_val, y_val, fn, fn_kwargs={}, k_folds=5, random_state=15, verbose=False): '\n K-fold task, mean and std of results are calculated over K folds\n \n Params: \n input_val: (np.array) 2-D array holding instances (features) of validation set.\n y_val: (np.array) 1-D array holding y-values for validation set.\n fn: (class) a method used for calibration\n l2: (float) L2 regulariation value.\n k_folds: (int) how many crossvalidation folds are used.\n comp_l2: (bool) use reversed L2 matrix for regulariation (default = False)\n \n returns: \n mean_error, mean_ece, mean_mce, mean_loss, mean_brier, std_loss, std_brier\n ' kf = KFold(n_splits=k_folds, shuffle=True, random_state=random_state) kf_results = [] models = [] for (train_index, test_index) in kf.split(input_val): (X_train_c, X_val_c) = (input_val[train_index], input_val[test_index]) (y_train_c, y_val_c) = (y_val[train_index], y_val[test_index]) t1 = time.time() model = fn(**fn_kwargs) model.fit(X_train_c, y_train_c) print('Model trained:', (time.time() - t1)) probs_holdout = model.predict_proba(X_val_c) (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = evaluate_rip(probs_holdout, y_val_c, verbose=False) kf_results.append([error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier]) models.append(model) return (models, np.mean(kf_results, axis=0))
def kf_model(input_val, y_val, fn, fn_kwargs={}, k_folds=5, random_state=15, verbose=False): '\n K-fold task, mean and std of results are calculated over K folds\n \n Params: \n input_val: (np.array) 2-D array holding instances (features) of validation set.\n y_val: (np.array) 1-D array holding y-values for validation set.\n fn: (class) a method used for calibration\n l2: (float) L2 regulariation value.\n k_folds: (int) how many crossvalidation folds are used.\n comp_l2: (bool) use reversed L2 matrix for regulariation (default = False)\n \n returns: \n mean_error, mean_ece, mean_mce, mean_loss, mean_brier, std_loss, std_brier\n ' kf = KFold(n_splits=k_folds, shuffle=True, random_state=random_state) kf_results = [] models = [] for (train_index, test_index) in kf.split(input_val): (X_train_c, X_val_c) = (input_val[train_index], input_val[test_index]) (y_train_c, y_val_c) = (y_val[train_index], y_val[test_index]) t1 = time.time() model = fn(**fn_kwargs) model.fit(X_train_c, y_train_c) print('Model trained:', (time.time() - t1)) probs_holdout = model.predict_proba(X_val_c) (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = evaluate_rip(probs_holdout, y_val_c, verbose=False) kf_results.append([error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier]) models.append(model) return (models, np.mean(kf_results, axis=0))<|docstring|>K-fold task, mean and std of results are calculated over K folds Params: input_val: (np.array) 2-D array holding instances (features) of validation set. y_val: (np.array) 1-D array holding y-values for validation set. fn: (class) a method used for calibration l2: (float) L2 regulariation value. k_folds: (int) how many crossvalidation folds are used. comp_l2: (bool) use reversed L2 matrix for regulariation (default = False) returns: mean_error, mean_ece, mean_mce, mean_loss, mean_brier, std_loss, std_brier<|endoftext|>
2572e78addd375929753c5e2966e751571e89b2daeebcaa39fdd37dbbf36a93a
def one_model(input_val, y_val, fn, fn_kwargs={}, k_folds=1, random_state=15, verbose=False): '\n 1-fold task, mean and std of results are calculated over 1 folds\n \n Params: \n input_val: (np.array) 2-D array holding instances (features) of validation set.\n y_val: (np.array) 1-D array holding y-values for validation set.\n fn: (class) a method used for calibration\n l2: (float) L2 regulariation value.\n k_folds: (int) how many crossvalidation folds are used.\n comp_l2: (bool) use reversed L2 matrix for regulariation (default = False)\n \n returns: \n mean_error, mean_ece, mean_mce, mean_loss, mean_brier, std_loss, std_brier\n ' kf_results = [] models = [] t1 = time.time() model = fn(**fn_kwargs) model.fit(input_val, y_val) print('Model trained:', (time.time() - t1)) probs_holdout = model.predict_proba(input_val) (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = evaluate_rip(probs_holdout, y_val, verbose=False) kf_results.append([error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier]) models.append(model) return (models, (np.mean(kf_results, axis=0), np.std(np.array(kf_results)[(:, (- 2):)], axis=0)))
1-fold task, mean and std of results are calculated over 1 folds Params: input_val: (np.array) 2-D array holding instances (features) of validation set. y_val: (np.array) 1-D array holding y-values for validation set. fn: (class) a method used for calibration l2: (float) L2 regulariation value. k_folds: (int) how many crossvalidation folds are used. comp_l2: (bool) use reversed L2 matrix for regulariation (default = False) returns: mean_error, mean_ece, mean_mce, mean_loss, mean_brier, std_loss, std_brier
Confidence_Calibration/calibration/calibration_functions.py
one_model
heatherwan/Automatic-Validation-of-Simulation-Results
0
python
def one_model(input_val, y_val, fn, fn_kwargs={}, k_folds=1, random_state=15, verbose=False): '\n 1-fold task, mean and std of results are calculated over 1 folds\n \n Params: \n input_val: (np.array) 2-D array holding instances (features) of validation set.\n y_val: (np.array) 1-D array holding y-values for validation set.\n fn: (class) a method used for calibration\n l2: (float) L2 regulariation value.\n k_folds: (int) how many crossvalidation folds are used.\n comp_l2: (bool) use reversed L2 matrix for regulariation (default = False)\n \n returns: \n mean_error, mean_ece, mean_mce, mean_loss, mean_brier, std_loss, std_brier\n ' kf_results = [] models = [] t1 = time.time() model = fn(**fn_kwargs) model.fit(input_val, y_val) print('Model trained:', (time.time() - t1)) probs_holdout = model.predict_proba(input_val) (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = evaluate_rip(probs_holdout, y_val, verbose=False) kf_results.append([error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier]) models.append(model) return (models, (np.mean(kf_results, axis=0), np.std(np.array(kf_results)[(:, (- 2):)], axis=0)))
def one_model(input_val, y_val, fn, fn_kwargs={}, k_folds=1, random_state=15, verbose=False): '\n 1-fold task, mean and std of results are calculated over 1 folds\n \n Params: \n input_val: (np.array) 2-D array holding instances (features) of validation set.\n y_val: (np.array) 1-D array holding y-values for validation set.\n fn: (class) a method used for calibration\n l2: (float) L2 regulariation value.\n k_folds: (int) how many crossvalidation folds are used.\n comp_l2: (bool) use reversed L2 matrix for regulariation (default = False)\n \n returns: \n mean_error, mean_ece, mean_mce, mean_loss, mean_brier, std_loss, std_brier\n ' kf_results = [] models = [] t1 = time.time() model = fn(**fn_kwargs) model.fit(input_val, y_val) print('Model trained:', (time.time() - t1)) probs_holdout = model.predict_proba(input_val) (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = evaluate_rip(probs_holdout, y_val, verbose=False) kf_results.append([error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier]) models.append(model) return (models, (np.mean(kf_results, axis=0), np.std(np.array(kf_results)[(:, (- 2):)], axis=0)))<|docstring|>1-fold task, mean and std of results are calculated over 1 folds Params: input_val: (np.array) 2-D array holding instances (features) of validation set. y_val: (np.array) 1-D array holding y-values for validation set. fn: (class) a method used for calibration l2: (float) L2 regulariation value. k_folds: (int) how many crossvalidation folds are used. comp_l2: (bool) use reversed L2 matrix for regulariation (default = False) returns: mean_error, mean_ece, mean_mce, mean_loss, mean_brier, std_loss, std_brier<|endoftext|>
ce197edcaa4ec167a7d9b18ad3cab89db9f1393a0a65c267483918b30bf3651d
def tune_dir_nn_heather(name, method, files, lambdas, mus, k_folds=5, random_state=15, verbose=True, double_learning=False, model_dir='models_dump', loss_fn='sparse_categorical_crossentropy', comp_l2=True, use_logits=False, use_scipy=False): '\n\n Params:\n fn (class): class of the calibration method used. It must contain methods "fit" and "predict",\n where first fits the models and second outputs calibrated probabilities.\n path (string): path to the folder with logits files\n files (list of strings): pickled logits files ((logits_val, y_val), (logits_test, y_test))\n comp_l2 (bool): use reversed L2 matrix for regulariation (default = False)\n\n Returns:\n df (pandas.DataFrame): dataframe with calibrated and uncalibrated results for all the input files.\n\n ' df_columns = ['Name', 'L2', 'mu', 'Error', 'ECE', 'ECE2', 'ECE_CW', 'ECE_CW2', 'ECE_FULL', 'ECE_FULL2', 'MCE', 'MCE2', 'Loss', 'Brier'] results = [] results2 = [] if (not os.path.exists(model_dir)): os.makedirs(model_dir) t1 = time.time() val_df = pd.read_csv(files[0], sep='\t', index_col=False) test_df = pd.read_csv(files[1], sep='\t', index_col=False) logits_val = val_df.iloc[(:, 3:)].to_numpy() y_val = val_df.iloc[(:, 1:2)].to_numpy().ravel() logits_test = test_df.iloc[(:, 3:)].to_numpy() y_test = test_df.iloc[(:, 1:2)].to_numpy().ravel() if use_logits: input_val = logits_val input_test = logits_test else: input_val = softmax(logits_val) input_test = softmax(logits_test) (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = evaluate_rip(softmax(logits_val), y_val, verbose=False) print(('Uncal Val: Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f; brier %f' % (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier))) results.append([(name + 'val_uncal'), error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier]) (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = evaluate_rip(softmax(logits_test), y_test, verbose=False) print(('Uncal Test: Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f; brier %f' % (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier))) results.append([(name + 'test_uncal'), error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier]) for l2 in lambdas: for mu in mus: if (mu is None): mu = l2 if use_scipy: temp_res = kf_model(input_val, y_val, LogisticCalibration, {'C': np.true_divide(1, l2)}, k_folds=k_folds, random_state=random_state, verbose=verbose) elif (k_folds > 1): temp_res = kf_model(input_val, y_val, Dirichlet_NN, {'l2': l2, 'mu': mu, 'patience': 15, 'loss': loss_fn, 'double_fit': double_learning, 'comp': comp_l2, 'use_logits': use_logits}, k_folds=k_folds, random_state=random_state, verbose=verbose) else: temp_res = one_model(input_val, y_val, Dirichlet_NN, {'l2': l2, 'mu': mu, 'patience': 15, 'loss': loss_fn, 'double_fit': double_learning, 'comp': comp_l2, 'use_logits': use_logits}, k_folds=k_folds, random_state=random_state, verbose=verbose) (models, (avg_error, avg_ece, avg_ece2, avg_ece_cw, avg_ece_cw2, avg_ece_full, avg_ece_full2, avg_mce, avg_mce2, avg_loss, avg_brier)) = temp_res results.append([(name + 'val_cal'), l2, mu, avg_error, avg_ece, avg_ece2, avg_ece_cw, avg_ece_cw2, avg_ece_full, avg_ece_full2, avg_mce, avg_mce2, avg_loss, avg_brier]) fname = f'model_{method}_{name}_l2={l2}_mu={mu}.p' model_weights = [] for mod in models: if (not use_scipy): model_weights.append(mod.model.get_weights()) else: model_weights.append([mod.coef_, mod.intercept_]) with open(join(model_dir, fname), 'wb') as f: pickle.dump((model_weights, temp_res[1], (name, l2, mu)), f) print(f'L2 = {l2}, Mu= {mu}, Validation Error {avg_error}; ece {avg_ece}; ece2 {avg_ece2}; ece_cw {avg_ece_cw}; ece_cw2 {avg_ece_cw2}; ece_full {avg_ece_full}; ece_full2 {avg_ece_full2}; mce {avg_mce}; mce2 {avg_mce2}; loss {avg_loss}; brier {avg_brier}') with open(f'result/{name}_{method}_val_{l2}_{mu}.txt', 'wb') as f: np.savetxt(f, input_val) np.savetxt(f, get_cal_prob(models, input_val)) with open(f'result/{name}_{method}_test_{l2}_{mu}.txt', 'wb') as f2: np.savetxt(f2, input_test) np.savetxt(f2, get_cal_prob(models, input_test)) (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = get_test_scores(models, input_test, y_test) print(f'L2 = {l2}, Mu= {mu}, Test Error {error}; ece {ece}; ece2 {ece2}; ece_cw {ece_cw}; ece_cw2 {ece_cw2}; ece_full {ece_full}; ece_full2 {ece_full2}; mce {mce}; mce2 {mce2}; loss {loss}; brier {brier}') results.append([(name + '_cal_test'), l2, mu, error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier]) print('Ensembled results:') (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = get_test_scores2(models, input_test, y_test) print(f'L2 = {l2}, Mu= {mu}, Test Error {error}; ece {ece}; ece2 {ece2}; ece_cw {ece_cw}; ece_cw2 {ece_cw2}; ece_full {ece_full}; ece_full2 {ece_full2}; mce {mce}; mce2 {mce2}; loss {loss}; brier {brier}') results2.append([name, l2, mu, error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier]) K.clear_session() for mod in models: del mod del models del temp_res K.clear_session() gc.collect() t2 = time.time() print('Time taken:', (t2 - t1), '\n') df = pd.DataFrame(results, columns=df_columns) df2 = pd.DataFrame(results2, columns=df_columns) return (df, df2)
Params: fn (class): class of the calibration method used. It must contain methods "fit" and "predict", where first fits the models and second outputs calibrated probabilities. path (string): path to the folder with logits files files (list of strings): pickled logits files ((logits_val, y_val), (logits_test, y_test)) comp_l2 (bool): use reversed L2 matrix for regulariation (default = False) Returns: df (pandas.DataFrame): dataframe with calibrated and uncalibrated results for all the input files.
Confidence_Calibration/calibration/calibration_functions.py
tune_dir_nn_heather
heatherwan/Automatic-Validation-of-Simulation-Results
0
python
def tune_dir_nn_heather(name, method, files, lambdas, mus, k_folds=5, random_state=15, verbose=True, double_learning=False, model_dir='models_dump', loss_fn='sparse_categorical_crossentropy', comp_l2=True, use_logits=False, use_scipy=False): '\n\n Params:\n fn (class): class of the calibration method used. It must contain methods "fit" and "predict",\n where first fits the models and second outputs calibrated probabilities.\n path (string): path to the folder with logits files\n files (list of strings): pickled logits files ((logits_val, y_val), (logits_test, y_test))\n comp_l2 (bool): use reversed L2 matrix for regulariation (default = False)\n\n Returns:\n df (pandas.DataFrame): dataframe with calibrated and uncalibrated results for all the input files.\n\n ' df_columns = ['Name', 'L2', 'mu', 'Error', 'ECE', 'ECE2', 'ECE_CW', 'ECE_CW2', 'ECE_FULL', 'ECE_FULL2', 'MCE', 'MCE2', 'Loss', 'Brier'] results = [] results2 = [] if (not os.path.exists(model_dir)): os.makedirs(model_dir) t1 = time.time() val_df = pd.read_csv(files[0], sep='\t', index_col=False) test_df = pd.read_csv(files[1], sep='\t', index_col=False) logits_val = val_df.iloc[(:, 3:)].to_numpy() y_val = val_df.iloc[(:, 1:2)].to_numpy().ravel() logits_test = test_df.iloc[(:, 3:)].to_numpy() y_test = test_df.iloc[(:, 1:2)].to_numpy().ravel() if use_logits: input_val = logits_val input_test = logits_test else: input_val = softmax(logits_val) input_test = softmax(logits_test) (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = evaluate_rip(softmax(logits_val), y_val, verbose=False) print(('Uncal Val: Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f; brier %f' % (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier))) results.append([(name + 'val_uncal'), error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier]) (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = evaluate_rip(softmax(logits_test), y_test, verbose=False) print(('Uncal Test: Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f; brier %f' % (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier))) results.append([(name + 'test_uncal'), error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier]) for l2 in lambdas: for mu in mus: if (mu is None): mu = l2 if use_scipy: temp_res = kf_model(input_val, y_val, LogisticCalibration, {'C': np.true_divide(1, l2)}, k_folds=k_folds, random_state=random_state, verbose=verbose) elif (k_folds > 1): temp_res = kf_model(input_val, y_val, Dirichlet_NN, {'l2': l2, 'mu': mu, 'patience': 15, 'loss': loss_fn, 'double_fit': double_learning, 'comp': comp_l2, 'use_logits': use_logits}, k_folds=k_folds, random_state=random_state, verbose=verbose) else: temp_res = one_model(input_val, y_val, Dirichlet_NN, {'l2': l2, 'mu': mu, 'patience': 15, 'loss': loss_fn, 'double_fit': double_learning, 'comp': comp_l2, 'use_logits': use_logits}, k_folds=k_folds, random_state=random_state, verbose=verbose) (models, (avg_error, avg_ece, avg_ece2, avg_ece_cw, avg_ece_cw2, avg_ece_full, avg_ece_full2, avg_mce, avg_mce2, avg_loss, avg_brier)) = temp_res results.append([(name + 'val_cal'), l2, mu, avg_error, avg_ece, avg_ece2, avg_ece_cw, avg_ece_cw2, avg_ece_full, avg_ece_full2, avg_mce, avg_mce2, avg_loss, avg_brier]) fname = f'model_{method}_{name}_l2={l2}_mu={mu}.p' model_weights = [] for mod in models: if (not use_scipy): model_weights.append(mod.model.get_weights()) else: model_weights.append([mod.coef_, mod.intercept_]) with open(join(model_dir, fname), 'wb') as f: pickle.dump((model_weights, temp_res[1], (name, l2, mu)), f) print(f'L2 = {l2}, Mu= {mu}, Validation Error {avg_error}; ece {avg_ece}; ece2 {avg_ece2}; ece_cw {avg_ece_cw}; ece_cw2 {avg_ece_cw2}; ece_full {avg_ece_full}; ece_full2 {avg_ece_full2}; mce {avg_mce}; mce2 {avg_mce2}; loss {avg_loss}; brier {avg_brier}') with open(f'result/{name}_{method}_val_{l2}_{mu}.txt', 'wb') as f: np.savetxt(f, input_val) np.savetxt(f, get_cal_prob(models, input_val)) with open(f'result/{name}_{method}_test_{l2}_{mu}.txt', 'wb') as f2: np.savetxt(f2, input_test) np.savetxt(f2, get_cal_prob(models, input_test)) (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = get_test_scores(models, input_test, y_test) print(f'L2 = {l2}, Mu= {mu}, Test Error {error}; ece {ece}; ece2 {ece2}; ece_cw {ece_cw}; ece_cw2 {ece_cw2}; ece_full {ece_full}; ece_full2 {ece_full2}; mce {mce}; mce2 {mce2}; loss {loss}; brier {brier}') results.append([(name + '_cal_test'), l2, mu, error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier]) print('Ensembled results:') (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = get_test_scores2(models, input_test, y_test) print(f'L2 = {l2}, Mu= {mu}, Test Error {error}; ece {ece}; ece2 {ece2}; ece_cw {ece_cw}; ece_cw2 {ece_cw2}; ece_full {ece_full}; ece_full2 {ece_full2}; mce {mce}; mce2 {mce2}; loss {loss}; brier {brier}') results2.append([name, l2, mu, error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier]) K.clear_session() for mod in models: del mod del models del temp_res K.clear_session() gc.collect() t2 = time.time() print('Time taken:', (t2 - t1), '\n') df = pd.DataFrame(results, columns=df_columns) df2 = pd.DataFrame(results2, columns=df_columns) return (df, df2)
def tune_dir_nn_heather(name, method, files, lambdas, mus, k_folds=5, random_state=15, verbose=True, double_learning=False, model_dir='models_dump', loss_fn='sparse_categorical_crossentropy', comp_l2=True, use_logits=False, use_scipy=False): '\n\n Params:\n fn (class): class of the calibration method used. It must contain methods "fit" and "predict",\n where first fits the models and second outputs calibrated probabilities.\n path (string): path to the folder with logits files\n files (list of strings): pickled logits files ((logits_val, y_val), (logits_test, y_test))\n comp_l2 (bool): use reversed L2 matrix for regulariation (default = False)\n\n Returns:\n df (pandas.DataFrame): dataframe with calibrated and uncalibrated results for all the input files.\n\n ' df_columns = ['Name', 'L2', 'mu', 'Error', 'ECE', 'ECE2', 'ECE_CW', 'ECE_CW2', 'ECE_FULL', 'ECE_FULL2', 'MCE', 'MCE2', 'Loss', 'Brier'] results = [] results2 = [] if (not os.path.exists(model_dir)): os.makedirs(model_dir) t1 = time.time() val_df = pd.read_csv(files[0], sep='\t', index_col=False) test_df = pd.read_csv(files[1], sep='\t', index_col=False) logits_val = val_df.iloc[(:, 3:)].to_numpy() y_val = val_df.iloc[(:, 1:2)].to_numpy().ravel() logits_test = test_df.iloc[(:, 3:)].to_numpy() y_test = test_df.iloc[(:, 1:2)].to_numpy().ravel() if use_logits: input_val = logits_val input_test = logits_test else: input_val = softmax(logits_val) input_test = softmax(logits_test) (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = evaluate_rip(softmax(logits_val), y_val, verbose=False) print(('Uncal Val: Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f; brier %f' % (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier))) results.append([(name + 'val_uncal'), error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier]) (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = evaluate_rip(softmax(logits_test), y_test, verbose=False) print(('Uncal Test: Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f; brier %f' % (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier))) results.append([(name + 'test_uncal'), error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier]) for l2 in lambdas: for mu in mus: if (mu is None): mu = l2 if use_scipy: temp_res = kf_model(input_val, y_val, LogisticCalibration, {'C': np.true_divide(1, l2)}, k_folds=k_folds, random_state=random_state, verbose=verbose) elif (k_folds > 1): temp_res = kf_model(input_val, y_val, Dirichlet_NN, {'l2': l2, 'mu': mu, 'patience': 15, 'loss': loss_fn, 'double_fit': double_learning, 'comp': comp_l2, 'use_logits': use_logits}, k_folds=k_folds, random_state=random_state, verbose=verbose) else: temp_res = one_model(input_val, y_val, Dirichlet_NN, {'l2': l2, 'mu': mu, 'patience': 15, 'loss': loss_fn, 'double_fit': double_learning, 'comp': comp_l2, 'use_logits': use_logits}, k_folds=k_folds, random_state=random_state, verbose=verbose) (models, (avg_error, avg_ece, avg_ece2, avg_ece_cw, avg_ece_cw2, avg_ece_full, avg_ece_full2, avg_mce, avg_mce2, avg_loss, avg_brier)) = temp_res results.append([(name + 'val_cal'), l2, mu, avg_error, avg_ece, avg_ece2, avg_ece_cw, avg_ece_cw2, avg_ece_full, avg_ece_full2, avg_mce, avg_mce2, avg_loss, avg_brier]) fname = f'model_{method}_{name}_l2={l2}_mu={mu}.p' model_weights = [] for mod in models: if (not use_scipy): model_weights.append(mod.model.get_weights()) else: model_weights.append([mod.coef_, mod.intercept_]) with open(join(model_dir, fname), 'wb') as f: pickle.dump((model_weights, temp_res[1], (name, l2, mu)), f) print(f'L2 = {l2}, Mu= {mu}, Validation Error {avg_error}; ece {avg_ece}; ece2 {avg_ece2}; ece_cw {avg_ece_cw}; ece_cw2 {avg_ece_cw2}; ece_full {avg_ece_full}; ece_full2 {avg_ece_full2}; mce {avg_mce}; mce2 {avg_mce2}; loss {avg_loss}; brier {avg_brier}') with open(f'result/{name}_{method}_val_{l2}_{mu}.txt', 'wb') as f: np.savetxt(f, input_val) np.savetxt(f, get_cal_prob(models, input_val)) with open(f'result/{name}_{method}_test_{l2}_{mu}.txt', 'wb') as f2: np.savetxt(f2, input_test) np.savetxt(f2, get_cal_prob(models, input_test)) (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = get_test_scores(models, input_test, y_test) print(f'L2 = {l2}, Mu= {mu}, Test Error {error}; ece {ece}; ece2 {ece2}; ece_cw {ece_cw}; ece_cw2 {ece_cw2}; ece_full {ece_full}; ece_full2 {ece_full2}; mce {mce}; mce2 {mce2}; loss {loss}; brier {brier}') results.append([(name + '_cal_test'), l2, mu, error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier]) print('Ensembled results:') (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = get_test_scores2(models, input_test, y_test) print(f'L2 = {l2}, Mu= {mu}, Test Error {error}; ece {ece}; ece2 {ece2}; ece_cw {ece_cw}; ece_cw2 {ece_cw2}; ece_full {ece_full}; ece_full2 {ece_full2}; mce {mce}; mce2 {mce2}; loss {loss}; brier {brier}') results2.append([name, l2, mu, error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier]) K.clear_session() for mod in models: del mod del models del temp_res K.clear_session() gc.collect() t2 = time.time() print('Time taken:', (t2 - t1), '\n') df = pd.DataFrame(results, columns=df_columns) df2 = pd.DataFrame(results2, columns=df_columns) return (df, df2)<|docstring|>Params: fn (class): class of the calibration method used. It must contain methods "fit" and "predict", where first fits the models and second outputs calibrated probabilities. path (string): path to the folder with logits files files (list of strings): pickled logits files ((logits_val, y_val), (logits_test, y_test)) comp_l2 (bool): use reversed L2 matrix for regulariation (default = False) Returns: df (pandas.DataFrame): dataframe with calibrated and uncalibrated results for all the input files.<|endoftext|>
9b84a0d3a9c15c24d153c6c1127bca00f8a4a16ed2b12a1fb5c5a3ef8329f219
def cal_TS_results(name, method, files, m_kwargs={}, approach='all'): '\n Calibrate models scores, using output from logits files and given function (fn).\n There are implemented to different approaches "all" and "1-vs-K" for calibration,\n the approach of calibration should match with function used for calibration.\n\n Params:\n fn (class): class of the calibration method used. It must contain methods "fit" and "predict",\n where first fits the models and second outputs calibrated probabilities.\n path (string): path to the folder with logits files\n files (list of strings): pickled logits files ((logits_val, y_val), (logits_test, y_test))\n m_kwargs (dictionary): keyword arguments for the calibration class initialization\n approach (string): "all" for multiclass calibration and "1-vs-K" for 1-vs-K approach.\n input (string): "probabilities" or "logits", specific to calibration method\n\n Returns:\n df (pandas.DataFrame): dataframe with calibrated and uncalibrated results for all the input files.\n\n ' df = pd.DataFrame(columns=['Name', 'Error', 'ECE', 'ECE2', 'ECE_CW', 'ECE_CW2', 'ECE_FULL', 'ECE_FULL2', 'MCE', 'MCE2', 'Loss', 'Brier']) t1 = time.time() val_df = pd.read_csv(files[0], sep='\t', index_col=False) test_df = pd.read_csv(files[1], sep='\t', index_col=False) logits_val = val_df.iloc[(:, 3:)].to_numpy() y_val = val_df.iloc[(:, 1:2)].to_numpy() logits_test = test_df.iloc[(:, 3:)].to_numpy() y_test = test_df.iloc[(:, 1:2)].to_numpy() input_val = logits_val input_test = logits_test if (approach == 'all'): y_val_flat = y_val.flatten() model = TemperatureScaling(**m_kwargs) opt = model.fit(input_val, y_val_flat) print(f'the optimal temperature is {opt.x[0]}') file1 = open(f'model_weights/model_TS_{name}.txt', 'w') file1.write(str(opt.x[0])) probs_val = model.predict(input_val) probs_test = model.predict(input_test) (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = evaluate(softmax(logits_val), y_val, verbose=False) (error1, ece1, ece1_2, ece_cw1_1, ece_cw1_2, ece_full1_1, ece_full1_2, mce1_1, mce1_2, loss1, brier1) = evaluate(softmax(logits_test), y_test, verbose=False) (error2, ece2_1, ece2_2, ece_cw2_1, ece_cw2_2, ece_full2_1, ece_full2_2, mce2_1, mce2_2, loss2, brier2) = evaluate(probs_test, y_test, verbose=False) (error3, ece3_1, ece3_2, ece_cw3_1, ece_cw3_2, ece_full3_1, ece_full3_2, mce3_1, mce3_2, loss3, brier3) = evaluate(probs_val, y_val, verbose=False) print(('Uncal Valid Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier))) print(('Uncal Test Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error1, ece1, ece1_2, ece_cw1_1, ece_cw1_2, ece_full1_1, ece_full1_2, mce1_1, mce1_2, loss1, brier1))) print(('Test Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error2, ece2_1, ece2_2, ece_cw2_1, ece_cw2_2, ece_full2_1, ece_full2_2, mce2_1, mce2_2, loss2, brier2))) print(('Validation Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error3, ece3_1, ece3_2, ece_cw3_1, ece_cw3_2, ece_full3_1, ece_full3_2, mce3_1, mce3_2, loss3, brier3))) else: K = input_test.shape[1] probs_val = np.zeros_like(input_val) probs_test = np.zeros_like(input_test) for k in range(K): y_cal = np.array((y_val == k), dtype='int')[(:, 0)] model = TemperatureScaling(**m_kwargs) model.fit(input_val[(:, k)], y_cal) probs_val[(:, k)] = model.predict(input_val[(:, k)]) probs_test[(:, k)] = model.predict(input_test[(:, k)]) (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = evaluate(softmax(logits_val), y_val, verbose=False) (error1, ece1, ece1_2, ece_cw1_1, ece_cw1_2, ece_full1_1, ece_full1_2, mce1_1, mce1_2, loss1, brier1) = evaluate(softmax(logits_test), y_test, verbose=False) (error2, ece2_1, ece2_2, ece_cw2_1, ece_cw2_2, ece_full2_1, ece_full2_2, mce2_1, mce2_2, loss2, brier2) = evaluate(probs_test, y_test, verbose=False) (error3, ece3_1, ece3_2, ece_cw3_1, ece_cw3_2, ece_full3_1, ece_full3_2, mce3_1, mce3_2, loss3, brier3) = evaluate(probs_val, y_val, verbose=False) print(('Uncal Valid Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier))) print(('Uncal Test Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error1, ece1, ece1_2, ece_cw1_1, ece_cw1_2, ece_full1_1, ece_full1_2, mce1_1, mce1_2, loss1, brier1))) print(('Test Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error2, ece2_1, ece2_2, ece_cw2_1, ece_cw2_2, ece_full2_1, ece_full2_2, mce2_1, mce2_2, loss2, brier2))) print(('Validation Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error3, ece3_1, ece3_2, ece_cw3_1, ece_cw3_2, ece_full3_1, ece_full3_2, mce3_1, mce3_2, loss3, brier3))) with open(f'result/{name}_{method}_val.txt', 'wb') as f: np.savetxt(f, softmax(logits_val)) np.savetxt(f, probs_val) with open(f'result/{name}_{method}_test.txt', 'wb') as f2: np.savetxt(f2, softmax(logits_test)) np.savetxt(f2, probs_test) df.loc[0] = [(name + '_val_uncalib'), error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier] df.loc[1] = [(name + '_test_uncalib'), error1, ece1, ece1_2, ece_cw1_1, ece_cw1_2, ece_full1_1, ece_full1_2, mce1_1, mce1_2, loss1, brier1] df.loc[2] = [(name + '_test_calib'), error2, ece2_1, ece2_2, ece_cw2_1, ece_cw2_2, ece_full2_1, ece_full2_2, mce2_1, mce2_2, loss2, brier2] df.loc[3] = [(name + '_val_calib'), error3, ece3_1, ece3_2, ece_cw3_1, ece_cw3_2, ece_full3_1, ece_full3_2, mce3_1, mce3_2, loss3, brier3] t2 = time.time() print('Time taken:', (t2 - t1), '\n') return df
Calibrate models scores, using output from logits files and given function (fn). There are implemented to different approaches "all" and "1-vs-K" for calibration, the approach of calibration should match with function used for calibration. Params: fn (class): class of the calibration method used. It must contain methods "fit" and "predict", where first fits the models and second outputs calibrated probabilities. path (string): path to the folder with logits files files (list of strings): pickled logits files ((logits_val, y_val), (logits_test, y_test)) m_kwargs (dictionary): keyword arguments for the calibration class initialization approach (string): "all" for multiclass calibration and "1-vs-K" for 1-vs-K approach. input (string): "probabilities" or "logits", specific to calibration method Returns: df (pandas.DataFrame): dataframe with calibrated and uncalibrated results for all the input files.
Confidence_Calibration/calibration/calibration_functions.py
cal_TS_results
heatherwan/Automatic-Validation-of-Simulation-Results
0
python
def cal_TS_results(name, method, files, m_kwargs={}, approach='all'): '\n Calibrate models scores, using output from logits files and given function (fn).\n There are implemented to different approaches "all" and "1-vs-K" for calibration,\n the approach of calibration should match with function used for calibration.\n\n Params:\n fn (class): class of the calibration method used. It must contain methods "fit" and "predict",\n where first fits the models and second outputs calibrated probabilities.\n path (string): path to the folder with logits files\n files (list of strings): pickled logits files ((logits_val, y_val), (logits_test, y_test))\n m_kwargs (dictionary): keyword arguments for the calibration class initialization\n approach (string): "all" for multiclass calibration and "1-vs-K" for 1-vs-K approach.\n input (string): "probabilities" or "logits", specific to calibration method\n\n Returns:\n df (pandas.DataFrame): dataframe with calibrated and uncalibrated results for all the input files.\n\n ' df = pd.DataFrame(columns=['Name', 'Error', 'ECE', 'ECE2', 'ECE_CW', 'ECE_CW2', 'ECE_FULL', 'ECE_FULL2', 'MCE', 'MCE2', 'Loss', 'Brier']) t1 = time.time() val_df = pd.read_csv(files[0], sep='\t', index_col=False) test_df = pd.read_csv(files[1], sep='\t', index_col=False) logits_val = val_df.iloc[(:, 3:)].to_numpy() y_val = val_df.iloc[(:, 1:2)].to_numpy() logits_test = test_df.iloc[(:, 3:)].to_numpy() y_test = test_df.iloc[(:, 1:2)].to_numpy() input_val = logits_val input_test = logits_test if (approach == 'all'): y_val_flat = y_val.flatten() model = TemperatureScaling(**m_kwargs) opt = model.fit(input_val, y_val_flat) print(f'the optimal temperature is {opt.x[0]}') file1 = open(f'model_weights/model_TS_{name}.txt', 'w') file1.write(str(opt.x[0])) probs_val = model.predict(input_val) probs_test = model.predict(input_test) (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = evaluate(softmax(logits_val), y_val, verbose=False) (error1, ece1, ece1_2, ece_cw1_1, ece_cw1_2, ece_full1_1, ece_full1_2, mce1_1, mce1_2, loss1, brier1) = evaluate(softmax(logits_test), y_test, verbose=False) (error2, ece2_1, ece2_2, ece_cw2_1, ece_cw2_2, ece_full2_1, ece_full2_2, mce2_1, mce2_2, loss2, brier2) = evaluate(probs_test, y_test, verbose=False) (error3, ece3_1, ece3_2, ece_cw3_1, ece_cw3_2, ece_full3_1, ece_full3_2, mce3_1, mce3_2, loss3, brier3) = evaluate(probs_val, y_val, verbose=False) print(('Uncal Valid Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier))) print(('Uncal Test Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error1, ece1, ece1_2, ece_cw1_1, ece_cw1_2, ece_full1_1, ece_full1_2, mce1_1, mce1_2, loss1, brier1))) print(('Test Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error2, ece2_1, ece2_2, ece_cw2_1, ece_cw2_2, ece_full2_1, ece_full2_2, mce2_1, mce2_2, loss2, brier2))) print(('Validation Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error3, ece3_1, ece3_2, ece_cw3_1, ece_cw3_2, ece_full3_1, ece_full3_2, mce3_1, mce3_2, loss3, brier3))) else: K = input_test.shape[1] probs_val = np.zeros_like(input_val) probs_test = np.zeros_like(input_test) for k in range(K): y_cal = np.array((y_val == k), dtype='int')[(:, 0)] model = TemperatureScaling(**m_kwargs) model.fit(input_val[(:, k)], y_cal) probs_val[(:, k)] = model.predict(input_val[(:, k)]) probs_test[(:, k)] = model.predict(input_test[(:, k)]) (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = evaluate(softmax(logits_val), y_val, verbose=False) (error1, ece1, ece1_2, ece_cw1_1, ece_cw1_2, ece_full1_1, ece_full1_2, mce1_1, mce1_2, loss1, brier1) = evaluate(softmax(logits_test), y_test, verbose=False) (error2, ece2_1, ece2_2, ece_cw2_1, ece_cw2_2, ece_full2_1, ece_full2_2, mce2_1, mce2_2, loss2, brier2) = evaluate(probs_test, y_test, verbose=False) (error3, ece3_1, ece3_2, ece_cw3_1, ece_cw3_2, ece_full3_1, ece_full3_2, mce3_1, mce3_2, loss3, brier3) = evaluate(probs_val, y_val, verbose=False) print(('Uncal Valid Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier))) print(('Uncal Test Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error1, ece1, ece1_2, ece_cw1_1, ece_cw1_2, ece_full1_1, ece_full1_2, mce1_1, mce1_2, loss1, brier1))) print(('Test Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error2, ece2_1, ece2_2, ece_cw2_1, ece_cw2_2, ece_full2_1, ece_full2_2, mce2_1, mce2_2, loss2, brier2))) print(('Validation Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error3, ece3_1, ece3_2, ece_cw3_1, ece_cw3_2, ece_full3_1, ece_full3_2, mce3_1, mce3_2, loss3, brier3))) with open(f'result/{name}_{method}_val.txt', 'wb') as f: np.savetxt(f, softmax(logits_val)) np.savetxt(f, probs_val) with open(f'result/{name}_{method}_test.txt', 'wb') as f2: np.savetxt(f2, softmax(logits_test)) np.savetxt(f2, probs_test) df.loc[0] = [(name + '_val_uncalib'), error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier] df.loc[1] = [(name + '_test_uncalib'), error1, ece1, ece1_2, ece_cw1_1, ece_cw1_2, ece_full1_1, ece_full1_2, mce1_1, mce1_2, loss1, brier1] df.loc[2] = [(name + '_test_calib'), error2, ece2_1, ece2_2, ece_cw2_1, ece_cw2_2, ece_full2_1, ece_full2_2, mce2_1, mce2_2, loss2, brier2] df.loc[3] = [(name + '_val_calib'), error3, ece3_1, ece3_2, ece_cw3_1, ece_cw3_2, ece_full3_1, ece_full3_2, mce3_1, mce3_2, loss3, brier3] t2 = time.time() print('Time taken:', (t2 - t1), '\n') return df
def cal_TS_results(name, method, files, m_kwargs={}, approach='all'): '\n Calibrate models scores, using output from logits files and given function (fn).\n There are implemented to different approaches "all" and "1-vs-K" for calibration,\n the approach of calibration should match with function used for calibration.\n\n Params:\n fn (class): class of the calibration method used. It must contain methods "fit" and "predict",\n where first fits the models and second outputs calibrated probabilities.\n path (string): path to the folder with logits files\n files (list of strings): pickled logits files ((logits_val, y_val), (logits_test, y_test))\n m_kwargs (dictionary): keyword arguments for the calibration class initialization\n approach (string): "all" for multiclass calibration and "1-vs-K" for 1-vs-K approach.\n input (string): "probabilities" or "logits", specific to calibration method\n\n Returns:\n df (pandas.DataFrame): dataframe with calibrated and uncalibrated results for all the input files.\n\n ' df = pd.DataFrame(columns=['Name', 'Error', 'ECE', 'ECE2', 'ECE_CW', 'ECE_CW2', 'ECE_FULL', 'ECE_FULL2', 'MCE', 'MCE2', 'Loss', 'Brier']) t1 = time.time() val_df = pd.read_csv(files[0], sep='\t', index_col=False) test_df = pd.read_csv(files[1], sep='\t', index_col=False) logits_val = val_df.iloc[(:, 3:)].to_numpy() y_val = val_df.iloc[(:, 1:2)].to_numpy() logits_test = test_df.iloc[(:, 3:)].to_numpy() y_test = test_df.iloc[(:, 1:2)].to_numpy() input_val = logits_val input_test = logits_test if (approach == 'all'): y_val_flat = y_val.flatten() model = TemperatureScaling(**m_kwargs) opt = model.fit(input_val, y_val_flat) print(f'the optimal temperature is {opt.x[0]}') file1 = open(f'model_weights/model_TS_{name}.txt', 'w') file1.write(str(opt.x[0])) probs_val = model.predict(input_val) probs_test = model.predict(input_test) (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = evaluate(softmax(logits_val), y_val, verbose=False) (error1, ece1, ece1_2, ece_cw1_1, ece_cw1_2, ece_full1_1, ece_full1_2, mce1_1, mce1_2, loss1, brier1) = evaluate(softmax(logits_test), y_test, verbose=False) (error2, ece2_1, ece2_2, ece_cw2_1, ece_cw2_2, ece_full2_1, ece_full2_2, mce2_1, mce2_2, loss2, brier2) = evaluate(probs_test, y_test, verbose=False) (error3, ece3_1, ece3_2, ece_cw3_1, ece_cw3_2, ece_full3_1, ece_full3_2, mce3_1, mce3_2, loss3, brier3) = evaluate(probs_val, y_val, verbose=False) print(('Uncal Valid Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier))) print(('Uncal Test Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error1, ece1, ece1_2, ece_cw1_1, ece_cw1_2, ece_full1_1, ece_full1_2, mce1_1, mce1_2, loss1, brier1))) print(('Test Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error2, ece2_1, ece2_2, ece_cw2_1, ece_cw2_2, ece_full2_1, ece_full2_2, mce2_1, mce2_2, loss2, brier2))) print(('Validation Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error3, ece3_1, ece3_2, ece_cw3_1, ece_cw3_2, ece_full3_1, ece_full3_2, mce3_1, mce3_2, loss3, brier3))) else: K = input_test.shape[1] probs_val = np.zeros_like(input_val) probs_test = np.zeros_like(input_test) for k in range(K): y_cal = np.array((y_val == k), dtype='int')[(:, 0)] model = TemperatureScaling(**m_kwargs) model.fit(input_val[(:, k)], y_cal) probs_val[(:, k)] = model.predict(input_val[(:, k)]) probs_test[(:, k)] = model.predict(input_test[(:, k)]) (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier) = evaluate(softmax(logits_val), y_val, verbose=False) (error1, ece1, ece1_2, ece_cw1_1, ece_cw1_2, ece_full1_1, ece_full1_2, mce1_1, mce1_2, loss1, brier1) = evaluate(softmax(logits_test), y_test, verbose=False) (error2, ece2_1, ece2_2, ece_cw2_1, ece_cw2_2, ece_full2_1, ece_full2_2, mce2_1, mce2_2, loss2, brier2) = evaluate(probs_test, y_test, verbose=False) (error3, ece3_1, ece3_2, ece_cw3_1, ece_cw3_2, ece_full3_1, ece_full3_2, mce3_1, mce3_2, loss3, brier3) = evaluate(probs_val, y_val, verbose=False) print(('Uncal Valid Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier))) print(('Uncal Test Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error1, ece1, ece1_2, ece_cw1_1, ece_cw1_2, ece_full1_1, ece_full1_2, mce1_1, mce1_2, loss1, brier1))) print(('Test Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error2, ece2_1, ece2_2, ece_cw2_1, ece_cw2_2, ece_full2_1, ece_full2_2, mce2_1, mce2_2, loss2, brier2))) print(('Validation Error %f; ece %f; ece2 %f; ece_cw %f; ece_cw2 %f; ece_full %f; ece_full2 %f; mce %f; mce2 %f; loss %f, brier %f' % (error3, ece3_1, ece3_2, ece_cw3_1, ece_cw3_2, ece_full3_1, ece_full3_2, mce3_1, mce3_2, loss3, brier3))) with open(f'result/{name}_{method}_val.txt', 'wb') as f: np.savetxt(f, softmax(logits_val)) np.savetxt(f, probs_val) with open(f'result/{name}_{method}_test.txt', 'wb') as f2: np.savetxt(f2, softmax(logits_test)) np.savetxt(f2, probs_test) df.loc[0] = [(name + '_val_uncalib'), error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier] df.loc[1] = [(name + '_test_uncalib'), error1, ece1, ece1_2, ece_cw1_1, ece_cw1_2, ece_full1_1, ece_full1_2, mce1_1, mce1_2, loss1, brier1] df.loc[2] = [(name + '_test_calib'), error2, ece2_1, ece2_2, ece_cw2_1, ece_cw2_2, ece_full2_1, ece_full2_2, mce2_1, mce2_2, loss2, brier2] df.loc[3] = [(name + '_val_calib'), error3, ece3_1, ece3_2, ece_cw3_1, ece_cw3_2, ece_full3_1, ece_full3_2, mce3_1, mce3_2, loss3, brier3] t2 = time.time() print('Time taken:', (t2 - t1), '\n') return df<|docstring|>Calibrate models scores, using output from logits files and given function (fn). There are implemented to different approaches "all" and "1-vs-K" for calibration, the approach of calibration should match with function used for calibration. Params: fn (class): class of the calibration method used. It must contain methods "fit" and "predict", where first fits the models and second outputs calibrated probabilities. path (string): path to the folder with logits files files (list of strings): pickled logits files ((logits_val, y_val), (logits_test, y_test)) m_kwargs (dictionary): keyword arguments for the calibration class initialization approach (string): "all" for multiclass calibration and "1-vs-K" for 1-vs-K approach. input (string): "probabilities" or "logits", specific to calibration method Returns: df (pandas.DataFrame): dataframe with calibrated and uncalibrated results for all the input files.<|endoftext|>
5dc17b5423528daf5de8853dd4f4aa055ebeaebafbbe854c67956b5cdddbb284
def _ninja_impl(ctx): "The implementation of the `ninja` rule\n\n Args:\n ctx (ctx): The rule's context object\n\n Returns:\n list: A list of providers. See `cc_external_rule_impl`\n " ninja_data = get_ninja_data(ctx) tools_deps = (ctx.attr.tools_deps + ninja_data.deps) attrs = create_attrs(ctx.attr, configure_name='Ninja', create_configure_script=_create_ninja_script, tools_deps=tools_deps, ninja_path=ninja_data.path) return cc_external_rule_impl(ctx, attrs)
The implementation of the `ninja` rule Args: ctx (ctx): The rule's context object Returns: list: A list of providers. See `cc_external_rule_impl`
foreign_cc/ninja.bzl
_ninja_impl
rubensf/rules_foreign_cc
521
python
def _ninja_impl(ctx): "The implementation of the `ninja` rule\n\n Args:\n ctx (ctx): The rule's context object\n\n Returns:\n list: A list of providers. See `cc_external_rule_impl`\n " ninja_data = get_ninja_data(ctx) tools_deps = (ctx.attr.tools_deps + ninja_data.deps) attrs = create_attrs(ctx.attr, configure_name='Ninja', create_configure_script=_create_ninja_script, tools_deps=tools_deps, ninja_path=ninja_data.path) return cc_external_rule_impl(ctx, attrs)
def _ninja_impl(ctx): "The implementation of the `ninja` rule\n\n Args:\n ctx (ctx): The rule's context object\n\n Returns:\n list: A list of providers. See `cc_external_rule_impl`\n " ninja_data = get_ninja_data(ctx) tools_deps = (ctx.attr.tools_deps + ninja_data.deps) attrs = create_attrs(ctx.attr, configure_name='Ninja', create_configure_script=_create_ninja_script, tools_deps=tools_deps, ninja_path=ninja_data.path) return cc_external_rule_impl(ctx, attrs)<|docstring|>The implementation of the `ninja` rule Args: ctx (ctx): The rule's context object Returns: list: A list of providers. See `cc_external_rule_impl`<|endoftext|>
44aa8ff68f600488f643ee3b0f1ea26cc1bf5e8885cfbe0800521a03db5888d4
def _create_ninja_script(configureParameters): 'Creates the bash commands for invoking commands to build ninja projects\n\n Args:\n configureParameters (struct): See `ConfigureParameters`\n\n Returns:\n str: A string representing a section of a bash script\n ' ctx = configureParameters.ctx attrs = configureParameters.attrs script = [] root = detect_root(ctx.attr.lib_source) script.append('##symlink_contents_to_dir## $$EXT_BUILD_ROOT$$/{} $$BUILD_TMPDIR$$'.format(root)) data = (ctx.attr.data + ctx.attr.build_data) args = ' '.join([ctx.expand_location(arg, data) for arg in ctx.attr.args]) directory = '$$EXT_BUILD_ROOT$$/{}'.format(root) if ctx.attr.directory: directory = ctx.expand_location(ctx.attr.directory, data) prefix = ('{} '.format(expand_locations(ctx, attrs.tool_prefix, data)) if attrs.tool_prefix else '') for target in (ctx.attr.targets or ['']): script.append('{prefix}{ninja} -C {dir} {args} {target}'.format(prefix=prefix, ninja=attrs.ninja_path, dir=directory, args=args, target=target)) return script
Creates the bash commands for invoking commands to build ninja projects Args: configureParameters (struct): See `ConfigureParameters` Returns: str: A string representing a section of a bash script
foreign_cc/ninja.bzl
_create_ninja_script
rubensf/rules_foreign_cc
521
python
def _create_ninja_script(configureParameters): 'Creates the bash commands for invoking commands to build ninja projects\n\n Args:\n configureParameters (struct): See `ConfigureParameters`\n\n Returns:\n str: A string representing a section of a bash script\n ' ctx = configureParameters.ctx attrs = configureParameters.attrs script = [] root = detect_root(ctx.attr.lib_source) script.append('##symlink_contents_to_dir## $$EXT_BUILD_ROOT$$/{} $$BUILD_TMPDIR$$'.format(root)) data = (ctx.attr.data + ctx.attr.build_data) args = ' '.join([ctx.expand_location(arg, data) for arg in ctx.attr.args]) directory = '$$EXT_BUILD_ROOT$$/{}'.format(root) if ctx.attr.directory: directory = ctx.expand_location(ctx.attr.directory, data) prefix = ('{} '.format(expand_locations(ctx, attrs.tool_prefix, data)) if attrs.tool_prefix else ) for target in (ctx.attr.targets or []): script.append('{prefix}{ninja} -C {dir} {args} {target}'.format(prefix=prefix, ninja=attrs.ninja_path, dir=directory, args=args, target=target)) return script
def _create_ninja_script(configureParameters): 'Creates the bash commands for invoking commands to build ninja projects\n\n Args:\n configureParameters (struct): See `ConfigureParameters`\n\n Returns:\n str: A string representing a section of a bash script\n ' ctx = configureParameters.ctx attrs = configureParameters.attrs script = [] root = detect_root(ctx.attr.lib_source) script.append('##symlink_contents_to_dir## $$EXT_BUILD_ROOT$$/{} $$BUILD_TMPDIR$$'.format(root)) data = (ctx.attr.data + ctx.attr.build_data) args = ' '.join([ctx.expand_location(arg, data) for arg in ctx.attr.args]) directory = '$$EXT_BUILD_ROOT$$/{}'.format(root) if ctx.attr.directory: directory = ctx.expand_location(ctx.attr.directory, data) prefix = ('{} '.format(expand_locations(ctx, attrs.tool_prefix, data)) if attrs.tool_prefix else ) for target in (ctx.attr.targets or []): script.append('{prefix}{ninja} -C {dir} {args} {target}'.format(prefix=prefix, ninja=attrs.ninja_path, dir=directory, args=args, target=target)) return script<|docstring|>Creates the bash commands for invoking commands to build ninja projects Args: configureParameters (struct): See `ConfigureParameters` Returns: str: A string representing a section of a bash script<|endoftext|>
4dc21acc576b5e04e3fcc9e96e1c94b2f4372622985d8e647c0a345b20301b71
def _attrs(): 'Modifies the common set of attributes used by rules_foreign_cc and sets Ninja specific attrs\n\n Returns:\n dict: Attributes of the `ninja` rule\n ' attrs = dict(CC_EXTERNAL_RULE_ATTRIBUTES) attrs.update({'args': attr.string_list(doc='A list of arguments to pass to the call to `ninja`'), 'directory': attr.string(doc=('A directory to pass as the `-C` argument. The rule will always use the root ' + 'directory of the `lib_sources` attribute if this attribute is not set'))}) return attrs
Modifies the common set of attributes used by rules_foreign_cc and sets Ninja specific attrs Returns: dict: Attributes of the `ninja` rule
foreign_cc/ninja.bzl
_attrs
rubensf/rules_foreign_cc
521
python
def _attrs(): 'Modifies the common set of attributes used by rules_foreign_cc and sets Ninja specific attrs\n\n Returns:\n dict: Attributes of the `ninja` rule\n ' attrs = dict(CC_EXTERNAL_RULE_ATTRIBUTES) attrs.update({'args': attr.string_list(doc='A list of arguments to pass to the call to `ninja`'), 'directory': attr.string(doc=('A directory to pass as the `-C` argument. The rule will always use the root ' + 'directory of the `lib_sources` attribute if this attribute is not set'))}) return attrs
def _attrs(): 'Modifies the common set of attributes used by rules_foreign_cc and sets Ninja specific attrs\n\n Returns:\n dict: Attributes of the `ninja` rule\n ' attrs = dict(CC_EXTERNAL_RULE_ATTRIBUTES) attrs.update({'args': attr.string_list(doc='A list of arguments to pass to the call to `ninja`'), 'directory': attr.string(doc=('A directory to pass as the `-C` argument. The rule will always use the root ' + 'directory of the `lib_sources` attribute if this attribute is not set'))}) return attrs<|docstring|>Modifies the common set of attributes used by rules_foreign_cc and sets Ninja specific attrs Returns: dict: Attributes of the `ninja` rule<|endoftext|>
b2aa3ce0151c9b0c4072c2d7a5c2161c03536fa765444d55d40bb0f72dd6ce21
@classmethod def init_from_url(cls, url, rows=None): '\n Initializes from url\n\n :param url: screener url\n :type url: string\n :param rows: total number of rows to get\n :type rows: int\n ' split_query = urlparse_qs(urlparse(url).query) tickers = (split_query['t'][0].split(',') if ('t' in split_query) else None) filters = (split_query['f'][0].split(',') if ('f' in split_query) else None) custom = (split_query['c'][0].split(',') if ('c' in split_query) else None) order = (split_query['o'][0] if ('o' in split_query) else '') signal = (split_query['s'][0] if ('s' in split_query) else '') table = 'Overview' if ('v' in split_query): table_numbers_types = {v: k for (k, v) in TABLE_TYPES.items()} table_number_string = split_query['v'][0][0:3] try: table = table_numbers_types[table_number_string] except KeyError: raise InvalidTableType(split_query['v'][0]) return cls(tickers, filters, rows, order, signal, table, custom)
Initializes from url :param url: screener url :type url: string :param rows: total number of rows to get :type rows: int
finviz/screener.py
init_from_url
diveyez/finviz
746
python
@classmethod def init_from_url(cls, url, rows=None): '\n Initializes from url\n\n :param url: screener url\n :type url: string\n :param rows: total number of rows to get\n :type rows: int\n ' split_query = urlparse_qs(urlparse(url).query) tickers = (split_query['t'][0].split(',') if ('t' in split_query) else None) filters = (split_query['f'][0].split(',') if ('f' in split_query) else None) custom = (split_query['c'][0].split(',') if ('c' in split_query) else None) order = (split_query['o'][0] if ('o' in split_query) else ) signal = (split_query['s'][0] if ('s' in split_query) else ) table = 'Overview' if ('v' in split_query): table_numbers_types = {v: k for (k, v) in TABLE_TYPES.items()} table_number_string = split_query['v'][0][0:3] try: table = table_numbers_types[table_number_string] except KeyError: raise InvalidTableType(split_query['v'][0]) return cls(tickers, filters, rows, order, signal, table, custom)
@classmethod def init_from_url(cls, url, rows=None): '\n Initializes from url\n\n :param url: screener url\n :type url: string\n :param rows: total number of rows to get\n :type rows: int\n ' split_query = urlparse_qs(urlparse(url).query) tickers = (split_query['t'][0].split(',') if ('t' in split_query) else None) filters = (split_query['f'][0].split(',') if ('f' in split_query) else None) custom = (split_query['c'][0].split(',') if ('c' in split_query) else None) order = (split_query['o'][0] if ('o' in split_query) else ) signal = (split_query['s'][0] if ('s' in split_query) else ) table = 'Overview' if ('v' in split_query): table_numbers_types = {v: k for (k, v) in TABLE_TYPES.items()} table_number_string = split_query['v'][0][0:3] try: table = table_numbers_types[table_number_string] except KeyError: raise InvalidTableType(split_query['v'][0]) return cls(tickers, filters, rows, order, signal, table, custom)<|docstring|>Initializes from url :param url: screener url :type url: string :param rows: total number of rows to get :type rows: int<|endoftext|>
5b84d45833f61eb22a63315a611d4598f63adfe82d7e72bf387792ce25562ff8
def __init__(self, tickers=None, filters=None, rows=None, order='', signal='', table=None, custom=None, user_agent=generate_user_agent(), request_method='sequential'): "\n Initializes all variables to its values\n\n :param tickers: collection of ticker strings eg.: ['AAPL', 'AMD', 'WMT']\n :type tickers: list\n :param filters: collection of filters strings eg.: ['exch_nasd', 'idx_sp500', 'fa_div_none']\n :type filters: list\n :param rows: total number of rows to get\n :type rows: int\n :param order: table order eg.: '-price' (to sort table by descending price)\n :type order: str\n :param signal: show by signal eg.: 'n_majornews' (for stocks with major news)\n :type signal: str\n :param table: table type eg.: 'Performance'\n :type table: str\n :param custom: collection of custom columns eg.: ['1', '21', '23', '45']\n :type custom: list\n :var self.data: list of dictionaries containing row data\n :type self.data: list\n " if (tickers is None): self._tickers = [] else: self._tickers = tickers if (filters is None): self._filters = [] else: self._filters = filters if (table is None): self._table = '111' else: self._table = self.__check_table(table) if (custom is None): self._custom = [] else: self._table = '152' self._custom = custom if ('0' not in self._custom): self._custom = (['0'] + self._custom) self._rows = rows self._order = order self._signal = signal self._user_agent = user_agent self._request_method = request_method self.analysis = [] self.data = self.__search_screener()
Initializes all variables to its values :param tickers: collection of ticker strings eg.: ['AAPL', 'AMD', 'WMT'] :type tickers: list :param filters: collection of filters strings eg.: ['exch_nasd', 'idx_sp500', 'fa_div_none'] :type filters: list :param rows: total number of rows to get :type rows: int :param order: table order eg.: '-price' (to sort table by descending price) :type order: str :param signal: show by signal eg.: 'n_majornews' (for stocks with major news) :type signal: str :param table: table type eg.: 'Performance' :type table: str :param custom: collection of custom columns eg.: ['1', '21', '23', '45'] :type custom: list :var self.data: list of dictionaries containing row data :type self.data: list
finviz/screener.py
__init__
diveyez/finviz
746
python
def __init__(self, tickers=None, filters=None, rows=None, order=, signal=, table=None, custom=None, user_agent=generate_user_agent(), request_method='sequential'): "\n Initializes all variables to its values\n\n :param tickers: collection of ticker strings eg.: ['AAPL', 'AMD', 'WMT']\n :type tickers: list\n :param filters: collection of filters strings eg.: ['exch_nasd', 'idx_sp500', 'fa_div_none']\n :type filters: list\n :param rows: total number of rows to get\n :type rows: int\n :param order: table order eg.: '-price' (to sort table by descending price)\n :type order: str\n :param signal: show by signal eg.: 'n_majornews' (for stocks with major news)\n :type signal: str\n :param table: table type eg.: 'Performance'\n :type table: str\n :param custom: collection of custom columns eg.: ['1', '21', '23', '45']\n :type custom: list\n :var self.data: list of dictionaries containing row data\n :type self.data: list\n " if (tickers is None): self._tickers = [] else: self._tickers = tickers if (filters is None): self._filters = [] else: self._filters = filters if (table is None): self._table = '111' else: self._table = self.__check_table(table) if (custom is None): self._custom = [] else: self._table = '152' self._custom = custom if ('0' not in self._custom): self._custom = (['0'] + self._custom) self._rows = rows self._order = order self._signal = signal self._user_agent = user_agent self._request_method = request_method self.analysis = [] self.data = self.__search_screener()
def __init__(self, tickers=None, filters=None, rows=None, order=, signal=, table=None, custom=None, user_agent=generate_user_agent(), request_method='sequential'): "\n Initializes all variables to its values\n\n :param tickers: collection of ticker strings eg.: ['AAPL', 'AMD', 'WMT']\n :type tickers: list\n :param filters: collection of filters strings eg.: ['exch_nasd', 'idx_sp500', 'fa_div_none']\n :type filters: list\n :param rows: total number of rows to get\n :type rows: int\n :param order: table order eg.: '-price' (to sort table by descending price)\n :type order: str\n :param signal: show by signal eg.: 'n_majornews' (for stocks with major news)\n :type signal: str\n :param table: table type eg.: 'Performance'\n :type table: str\n :param custom: collection of custom columns eg.: ['1', '21', '23', '45']\n :type custom: list\n :var self.data: list of dictionaries containing row data\n :type self.data: list\n " if (tickers is None): self._tickers = [] else: self._tickers = tickers if (filters is None): self._filters = [] else: self._filters = filters if (table is None): self._table = '111' else: self._table = self.__check_table(table) if (custom is None): self._custom = [] else: self._table = '152' self._custom = custom if ('0' not in self._custom): self._custom = (['0'] + self._custom) self._rows = rows self._order = order self._signal = signal self._user_agent = user_agent self._request_method = request_method self.analysis = [] self.data = self.__search_screener()<|docstring|>Initializes all variables to its values :param tickers: collection of ticker strings eg.: ['AAPL', 'AMD', 'WMT'] :type tickers: list :param filters: collection of filters strings eg.: ['exch_nasd', 'idx_sp500', 'fa_div_none'] :type filters: list :param rows: total number of rows to get :type rows: int :param order: table order eg.: '-price' (to sort table by descending price) :type order: str :param signal: show by signal eg.: 'n_majornews' (for stocks with major news) :type signal: str :param table: table type eg.: 'Performance' :type table: str :param custom: collection of custom columns eg.: ['1', '21', '23', '45'] :type custom: list :var self.data: list of dictionaries containing row data :type self.data: list<|endoftext|>
f7adc5310cb9ab69449a2a48e77916c4bfb7b15d896e15fe1006673ce493e76d
def __call__(self, tickers=None, filters=None, rows=None, order='', signal='', table=None, custom=None): "\n Adds more filters to the screener. Example usage:\n\n stock_list = Screener(filters=['cap_large']) # All the stocks with large market cap\n # After analyzing you decide you want to see which of the stocks have high dividend yield\n # and show their performance:\n stock_list(filters=['fa_div_high'], table='Performance')\n # Shows performance of stocks with large market cap and high dividend yield\n " if tickers: [self._tickers.append(item) for item in tickers] if filters: [self._filters.append(item) for item in filters] if table: self._table = self.__check_table(table) if order: self._order = order if signal: self._signal = signal if rows: self._rows = rows if custom: self._custom = custom self.analysis = [] self.data = self.__search_screener()
Adds more filters to the screener. Example usage: stock_list = Screener(filters=['cap_large']) # All the stocks with large market cap # After analyzing you decide you want to see which of the stocks have high dividend yield # and show their performance: stock_list(filters=['fa_div_high'], table='Performance') # Shows performance of stocks with large market cap and high dividend yield
finviz/screener.py
__call__
diveyez/finviz
746
python
def __call__(self, tickers=None, filters=None, rows=None, order=, signal=, table=None, custom=None): "\n Adds more filters to the screener. Example usage:\n\n stock_list = Screener(filters=['cap_large']) # All the stocks with large market cap\n # After analyzing you decide you want to see which of the stocks have high dividend yield\n # and show their performance:\n stock_list(filters=['fa_div_high'], table='Performance')\n # Shows performance of stocks with large market cap and high dividend yield\n " if tickers: [self._tickers.append(item) for item in tickers] if filters: [self._filters.append(item) for item in filters] if table: self._table = self.__check_table(table) if order: self._order = order if signal: self._signal = signal if rows: self._rows = rows if custom: self._custom = custom self.analysis = [] self.data = self.__search_screener()
def __call__(self, tickers=None, filters=None, rows=None, order=, signal=, table=None, custom=None): "\n Adds more filters to the screener. Example usage:\n\n stock_list = Screener(filters=['cap_large']) # All the stocks with large market cap\n # After analyzing you decide you want to see which of the stocks have high dividend yield\n # and show their performance:\n stock_list(filters=['fa_div_high'], table='Performance')\n # Shows performance of stocks with large market cap and high dividend yield\n " if tickers: [self._tickers.append(item) for item in tickers] if filters: [self._filters.append(item) for item in filters] if table: self._table = self.__check_table(table) if order: self._order = order if signal: self._signal = signal if rows: self._rows = rows if custom: self._custom = custom self.analysis = [] self.data = self.__search_screener()<|docstring|>Adds more filters to the screener. Example usage: stock_list = Screener(filters=['cap_large']) # All the stocks with large market cap # After analyzing you decide you want to see which of the stocks have high dividend yield # and show their performance: stock_list(filters=['fa_div_high'], table='Performance') # Shows performance of stocks with large market cap and high dividend yield<|endoftext|>
e169c361e6d9a32f951b3b24847dca3922b4f3fee32628e789683b7d3d1afbb3
def __str__(self): ' Returns a readable representation of a table. ' table_list = [self.headers] for row in self.data: table_list.append([(row[col] or '') for col in self.headers]) return create_table_string(table_list)
Returns a readable representation of a table.
finviz/screener.py
__str__
diveyez/finviz
746
python
def __str__(self): ' ' table_list = [self.headers] for row in self.data: table_list.append([(row[col] or ) for col in self.headers]) return create_table_string(table_list)
def __str__(self): ' ' table_list = [self.headers] for row in self.data: table_list.append([(row[col] or ) for col in self.headers]) return create_table_string(table_list)<|docstring|>Returns a readable representation of a table.<|endoftext|>
1b549b81355aedd7ab64eadec7d18459989c83ada61b46519d3603f562ead92b
def __repr__(self): " Returns a string representation of the parameter's values. " values = f'''tickers: {tuple(self._tickers)} filters: {tuple(self._filters)} rows: {self._rows} order: {self._order} signal: {self._signal} table: {self._table} table: {self._custom}''' return values
Returns a string representation of the parameter's values.
finviz/screener.py
__repr__
diveyez/finviz
746
python
def __repr__(self): " " values = f'tickers: {tuple(self._tickers)} filters: {tuple(self._filters)} rows: {self._rows} order: {self._order} signal: {self._signal} table: {self._table} table: {self._custom}' return values
def __repr__(self): " " values = f'tickers: {tuple(self._tickers)} filters: {tuple(self._filters)} rows: {self._rows} order: {self._order} signal: {self._signal} table: {self._table} table: {self._custom}' return values<|docstring|>Returns a string representation of the parameter's values.<|endoftext|>
b356b3ca8cfcdf73836ce6156286d69abe4d9818694c773aa304de53c7a97b2a
def __len__(self): ' Returns an int with the number of total rows. ' return int(self._rows)
Returns an int with the number of total rows.
finviz/screener.py
__len__
diveyez/finviz
746
python
def __len__(self): ' ' return int(self._rows)
def __len__(self): ' ' return int(self._rows)<|docstring|>Returns an int with the number of total rows.<|endoftext|>
bbfd85e067b999be0260eaa894f6d521dcef75cf9a277ce4e6066605d6b53389
def __getitem__(self, position): ' Returns a dictionary containing specific row data. ' return self.data[position]
Returns a dictionary containing specific row data.
finviz/screener.py
__getitem__
diveyez/finviz
746
python
def __getitem__(self, position): ' ' return self.data[position]
def __getitem__(self, position): ' ' return self.data[position]<|docstring|>Returns a dictionary containing specific row data.<|endoftext|>
e432bdfc6cde9e495292e6e550d0f58220a30cf43075fbca8ea5af2baa0e1635
@staticmethod def __check_table(input_table): ' Checks if the user input for table type is correct. Otherwise, raises an InvalidTableType error. ' try: table = TABLE_TYPES[input_table] return table except KeyError: raise InvalidTableType(input_table)
Checks if the user input for table type is correct. Otherwise, raises an InvalidTableType error.
finviz/screener.py
__check_table
diveyez/finviz
746
python
@staticmethod def __check_table(input_table): ' ' try: table = TABLE_TYPES[input_table] return table except KeyError: raise InvalidTableType(input_table)
@staticmethod def __check_table(input_table): ' ' try: table = TABLE_TYPES[input_table] return table except KeyError: raise InvalidTableType(input_table)<|docstring|>Checks if the user input for table type is correct. Otherwise, raises an InvalidTableType error.<|endoftext|>
adaa95e6c6b790ac79e919010948dbf9efd66fdcd34f87f77f565bc3cf993113
@staticmethod def load_filter_dict(reload=True): "\n Get dict of available filters. File containing json specification of filters will be built if it doesn't exist\n or if reload is False\n " json_directory = pathlib.Path(__file__).parent json_file = pathlib.Path.joinpath(json_directory, 'filters.json') if (reload and json_file.is_file()): with open(json_file, 'r') as fp: return json.load(fp) hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'} url = 'https://finviz.com/screener.ashx?ft=4' req = urllib.request.Request(url, headers=hdr) with urllib.request.urlopen(req) as response: html = response.read().decode('utf-8') bs = BeautifulSoup(html, 'html.parser') filters_table = None for td in bs.find_all('td'): if (td.get_text().strip() == 'Exchange'): filters_table = td.find_parent('table') if (filters_table is None): raise Exception('Could not locate filter parameters') for div in filters_table.find_all('div'): div.decompose() filter_dict = {} td_list = filters_table.find_all('td') for i in range(0, (len(td_list) - 2), 2): current_dict = {} if (td_list[i].get_text().strip() == ''): continue filter_text = td_list[i].get_text().strip() selections = td_list[(i + 1)].find('select') filter_name = selections.get('data-filter').strip() options = selections.find_all('option', {'value': True}) for opt in options: value = opt.get('value').strip() text = opt.get_text() if ((value is None) or ('Elite' in text)): continue current_dict[text] = f'{filter_name}_{value}' filter_dict[filter_text] = current_dict try: with open(json_file, 'w') as fp: json.dump(filter_dict, fp) except Exception as e: print(e) print('Unable to write to file{}'.format(json_file)) return filter_dict
Get dict of available filters. File containing json specification of filters will be built if it doesn't exist or if reload is False
finviz/screener.py
load_filter_dict
diveyez/finviz
746
python
@staticmethod def load_filter_dict(reload=True): "\n Get dict of available filters. File containing json specification of filters will be built if it doesn't exist\n or if reload is False\n " json_directory = pathlib.Path(__file__).parent json_file = pathlib.Path.joinpath(json_directory, 'filters.json') if (reload and json_file.is_file()): with open(json_file, 'r') as fp: return json.load(fp) hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'} url = 'https://finviz.com/screener.ashx?ft=4' req = urllib.request.Request(url, headers=hdr) with urllib.request.urlopen(req) as response: html = response.read().decode('utf-8') bs = BeautifulSoup(html, 'html.parser') filters_table = None for td in bs.find_all('td'): if (td.get_text().strip() == 'Exchange'): filters_table = td.find_parent('table') if (filters_table is None): raise Exception('Could not locate filter parameters') for div in filters_table.find_all('div'): div.decompose() filter_dict = {} td_list = filters_table.find_all('td') for i in range(0, (len(td_list) - 2), 2): current_dict = {} if (td_list[i].get_text().strip() == ): continue filter_text = td_list[i].get_text().strip() selections = td_list[(i + 1)].find('select') filter_name = selections.get('data-filter').strip() options = selections.find_all('option', {'value': True}) for opt in options: value = opt.get('value').strip() text = opt.get_text() if ((value is None) or ('Elite' in text)): continue current_dict[text] = f'{filter_name}_{value}' filter_dict[filter_text] = current_dict try: with open(json_file, 'w') as fp: json.dump(filter_dict, fp) except Exception as e: print(e) print('Unable to write to file{}'.format(json_file)) return filter_dict
@staticmethod def load_filter_dict(reload=True): "\n Get dict of available filters. File containing json specification of filters will be built if it doesn't exist\n or if reload is False\n " json_directory = pathlib.Path(__file__).parent json_file = pathlib.Path.joinpath(json_directory, 'filters.json') if (reload and json_file.is_file()): with open(json_file, 'r') as fp: return json.load(fp) hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'} url = 'https://finviz.com/screener.ashx?ft=4' req = urllib.request.Request(url, headers=hdr) with urllib.request.urlopen(req) as response: html = response.read().decode('utf-8') bs = BeautifulSoup(html, 'html.parser') filters_table = None for td in bs.find_all('td'): if (td.get_text().strip() == 'Exchange'): filters_table = td.find_parent('table') if (filters_table is None): raise Exception('Could not locate filter parameters') for div in filters_table.find_all('div'): div.decompose() filter_dict = {} td_list = filters_table.find_all('td') for i in range(0, (len(td_list) - 2), 2): current_dict = {} if (td_list[i].get_text().strip() == ): continue filter_text = td_list[i].get_text().strip() selections = td_list[(i + 1)].find('select') filter_name = selections.get('data-filter').strip() options = selections.find_all('option', {'value': True}) for opt in options: value = opt.get('value').strip() text = opt.get_text() if ((value is None) or ('Elite' in text)): continue current_dict[text] = f'{filter_name}_{value}' filter_dict[filter_text] = current_dict try: with open(json_file, 'w') as fp: json.dump(filter_dict, fp) except Exception as e: print(e) print('Unable to write to file{}'.format(json_file)) return filter_dict<|docstring|>Get dict of available filters. File containing json specification of filters will be built if it doesn't exist or if reload is False<|endoftext|>
f25178b70943c84b71728446eeb200aab6c8fb1721914f17704e3ad12715071a
def to_sqlite(self, filename): 'Exports the generated table into a SQLite database.\n\n :param filename: SQLite database file path\n :type filename: str\n ' export_to_db(self.headers, self.data, filename)
Exports the generated table into a SQLite database. :param filename: SQLite database file path :type filename: str
finviz/screener.py
to_sqlite
diveyez/finviz
746
python
def to_sqlite(self, filename): 'Exports the generated table into a SQLite database.\n\n :param filename: SQLite database file path\n :type filename: str\n ' export_to_db(self.headers, self.data, filename)
def to_sqlite(self, filename): 'Exports the generated table into a SQLite database.\n\n :param filename: SQLite database file path\n :type filename: str\n ' export_to_db(self.headers, self.data, filename)<|docstring|>Exports the generated table into a SQLite database. :param filename: SQLite database file path :type filename: str<|endoftext|>
ad7971e4b84fe25a994d43bc9e880192bf61bf0ba3bc57f2d9dc3e94830a4b35
def to_csv(self, filename: str): 'Exports the generated table into a CSV file.\n Returns a CSV string if filename is None.\n\n :param filename: CSV file path\n :type filename: str\n ' if (filename and filename.endswith('.csv')): filename = filename[:(- 4)] if (len(self.analysis) > 0): export_to_csv(['ticker', 'date', 'category', 'analyst', 'rating', 'price_from', 'price_to'], self.analysis, f'{filename}-analysts.csv') return export_to_csv(self.headers, self.data, f'{filename}.csv')
Exports the generated table into a CSV file. Returns a CSV string if filename is None. :param filename: CSV file path :type filename: str
finviz/screener.py
to_csv
diveyez/finviz
746
python
def to_csv(self, filename: str): 'Exports the generated table into a CSV file.\n Returns a CSV string if filename is None.\n\n :param filename: CSV file path\n :type filename: str\n ' if (filename and filename.endswith('.csv')): filename = filename[:(- 4)] if (len(self.analysis) > 0): export_to_csv(['ticker', 'date', 'category', 'analyst', 'rating', 'price_from', 'price_to'], self.analysis, f'{filename}-analysts.csv') return export_to_csv(self.headers, self.data, f'{filename}.csv')
def to_csv(self, filename: str): 'Exports the generated table into a CSV file.\n Returns a CSV string if filename is None.\n\n :param filename: CSV file path\n :type filename: str\n ' if (filename and filename.endswith('.csv')): filename = filename[:(- 4)] if (len(self.analysis) > 0): export_to_csv(['ticker', 'date', 'category', 'analyst', 'rating', 'price_from', 'price_to'], self.analysis, f'{filename}-analysts.csv') return export_to_csv(self.headers, self.data, f'{filename}.csv')<|docstring|>Exports the generated table into a CSV file. Returns a CSV string if filename is None. :param filename: CSV file path :type filename: str<|endoftext|>
d3053a941625a38d82ea97c66419422d491bfa8394d644402bfcb3837d99f421
def get_charts(self, period='d', size='l', chart_type='c', ta='1'): "\n Downloads the charts of all tickers shown by the table.\n\n :param period: table period eg. : 'd', 'w' or 'm' for daily, weekly and monthly periods\n :type period: str\n :param size: table size eg.: 'l' for large or 's' for small - choose large for better quality but higher size\n :type size: str\n :param chart_type: chart type: 'c' for candles or 'l' for lines\n :type chart_type: str\n :param ta: technical analysis eg.: '1' to show ta '0' to hide ta\n :type ta: str\n " encoded_payload = urlencode({'ty': chart_type, 'ta': ta, 'p': period, 's': size}) sequential_data_scrape(scrape.download_chart_image, [f"https://finviz.com/chart.ashx?{encoded_payload}&t={row.get('Ticker')}" for row in self.data], self._user_agent)
Downloads the charts of all tickers shown by the table. :param period: table period eg. : 'd', 'w' or 'm' for daily, weekly and monthly periods :type period: str :param size: table size eg.: 'l' for large or 's' for small - choose large for better quality but higher size :type size: str :param chart_type: chart type: 'c' for candles or 'l' for lines :type chart_type: str :param ta: technical analysis eg.: '1' to show ta '0' to hide ta :type ta: str
finviz/screener.py
get_charts
diveyez/finviz
746
python
def get_charts(self, period='d', size='l', chart_type='c', ta='1'): "\n Downloads the charts of all tickers shown by the table.\n\n :param period: table period eg. : 'd', 'w' or 'm' for daily, weekly and monthly periods\n :type period: str\n :param size: table size eg.: 'l' for large or 's' for small - choose large for better quality but higher size\n :type size: str\n :param chart_type: chart type: 'c' for candles or 'l' for lines\n :type chart_type: str\n :param ta: technical analysis eg.: '1' to show ta '0' to hide ta\n :type ta: str\n " encoded_payload = urlencode({'ty': chart_type, 'ta': ta, 'p': period, 's': size}) sequential_data_scrape(scrape.download_chart_image, [f"https://finviz.com/chart.ashx?{encoded_payload}&t={row.get('Ticker')}" for row in self.data], self._user_agent)
def get_charts(self, period='d', size='l', chart_type='c', ta='1'): "\n Downloads the charts of all tickers shown by the table.\n\n :param period: table period eg. : 'd', 'w' or 'm' for daily, weekly and monthly periods\n :type period: str\n :param size: table size eg.: 'l' for large or 's' for small - choose large for better quality but higher size\n :type size: str\n :param chart_type: chart type: 'c' for candles or 'l' for lines\n :type chart_type: str\n :param ta: technical analysis eg.: '1' to show ta '0' to hide ta\n :type ta: str\n " encoded_payload = urlencode({'ty': chart_type, 'ta': ta, 'p': period, 's': size}) sequential_data_scrape(scrape.download_chart_image, [f"https://finviz.com/chart.ashx?{encoded_payload}&t={row.get('Ticker')}" for row in self.data], self._user_agent)<|docstring|>Downloads the charts of all tickers shown by the table. :param period: table period eg. : 'd', 'w' or 'm' for daily, weekly and monthly periods :type period: str :param size: table size eg.: 'l' for large or 's' for small - choose large for better quality but higher size :type size: str :param chart_type: chart type: 'c' for candles or 'l' for lines :type chart_type: str :param ta: technical analysis eg.: '1' to show ta '0' to hide ta :type ta: str<|endoftext|>
e9a28ad663f6d4c271a23388e15b16060b88b1e0dcfc8e32fe5693a41ff9df7b
def get_ticker_details(self): '\n Downloads the details of all tickers shown by the table.\n ' ticker_data = sequential_data_scrape(scrape.download_ticker_details, [f"https://finviz.com/quote.ashx?&t={row.get('Ticker')}" for row in self.data], self._user_agent) for entry in ticker_data: for (key, value) in entry.items(): for ticker_generic in self.data: if (ticker_generic.get('Ticker') == key): if ('Sales' not in self.headers): self.headers.extend(list(value[0].keys())) ticker_generic.update(value[0]) self.analysis.extend(value[1]) return self.data
Downloads the details of all tickers shown by the table.
finviz/screener.py
get_ticker_details
diveyez/finviz
746
python
def get_ticker_details(self): '\n \n ' ticker_data = sequential_data_scrape(scrape.download_ticker_details, [f"https://finviz.com/quote.ashx?&t={row.get('Ticker')}" for row in self.data], self._user_agent) for entry in ticker_data: for (key, value) in entry.items(): for ticker_generic in self.data: if (ticker_generic.get('Ticker') == key): if ('Sales' not in self.headers): self.headers.extend(list(value[0].keys())) ticker_generic.update(value[0]) self.analysis.extend(value[1]) return self.data
def get_ticker_details(self): '\n \n ' ticker_data = sequential_data_scrape(scrape.download_ticker_details, [f"https://finviz.com/quote.ashx?&t={row.get('Ticker')}" for row in self.data], self._user_agent) for entry in ticker_data: for (key, value) in entry.items(): for ticker_generic in self.data: if (ticker_generic.get('Ticker') == key): if ('Sales' not in self.headers): self.headers.extend(list(value[0].keys())) ticker_generic.update(value[0]) self.analysis.extend(value[1]) return self.data<|docstring|>Downloads the details of all tickers shown by the table.<|endoftext|>
3d6eedcec70d919fdf1ae9f06105191dd656fab0a466d8cea342936e0d5944fe
def __check_rows(self): '\n Checks if the user input for row number is correct.\n Otherwise, modifies the number or raises NoResults error.\n ' self._total_rows = scrape.get_total_rows(self._page_content) if (self._total_rows == 0): raise NoResults(self._url.split('?')[1]) elif ((self._rows is None) or (self._rows > self._total_rows)): return self._total_rows else: return self._rows
Checks if the user input for row number is correct. Otherwise, modifies the number or raises NoResults error.
finviz/screener.py
__check_rows
diveyez/finviz
746
python
def __check_rows(self): '\n Checks if the user input for row number is correct.\n Otherwise, modifies the number or raises NoResults error.\n ' self._total_rows = scrape.get_total_rows(self._page_content) if (self._total_rows == 0): raise NoResults(self._url.split('?')[1]) elif ((self._rows is None) or (self._rows > self._total_rows)): return self._total_rows else: return self._rows
def __check_rows(self): '\n Checks if the user input for row number is correct.\n Otherwise, modifies the number or raises NoResults error.\n ' self._total_rows = scrape.get_total_rows(self._page_content) if (self._total_rows == 0): raise NoResults(self._url.split('?')[1]) elif ((self._rows is None) or (self._rows > self._total_rows)): return self._total_rows else: return self._rows<|docstring|>Checks if the user input for row number is correct. Otherwise, modifies the number or raises NoResults error.<|endoftext|>
3334462c93ed830d7d5022c3ca33d8737e626c1db84a88ddc4d81b7d8ce9262e
def __get_table_headers(self): ' Private function used to return table headers. ' return self._page_content.cssselect('tr[valign="middle"]')[0].xpath('td//text()')
Private function used to return table headers.
finviz/screener.py
__get_table_headers
diveyez/finviz
746
python
def __get_table_headers(self): ' ' return self._page_content.cssselect('tr[valign="middle"]')[0].xpath('td//text()')
def __get_table_headers(self): ' ' return self._page_content.cssselect('tr[valign="middle"]')[0].xpath('td//text()')<|docstring|>Private function used to return table headers.<|endoftext|>
94f477bd517d00c37125db69743e0d85900dc6a570773dce9cd89c552c832f96
def __search_screener(self): ' Private function used to return data from the FinViz screener. ' (self._page_content, self._url) = http_request_get('https://finviz.com/screener.ashx', payload={'v': self._table, 't': ','.join(self._tickers), 'f': ','.join(self._filters), 'o': self._order, 's': self._signal, 'c': ','.join(self._custom)}, user_agent=self._user_agent) self._rows = self.__check_rows() self.headers = self.__get_table_headers() if (self._request_method == 'async'): async_connector = Connector(scrape.get_table, scrape.get_page_urls(self._page_content, self._rows, self._url), self._user_agent, self.headers, self._rows, css_select=True) pages_data = async_connector.run_connector() else: pages_data = sequential_data_scrape(scrape.get_table, scrape.get_page_urls(self._page_content, self._rows, self._url), self._user_agent, self.headers, self._rows) data = [] for page in pages_data: for row in page: data.append(row) return data
Private function used to return data from the FinViz screener.
finviz/screener.py
__search_screener
diveyez/finviz
746
python
def __search_screener(self): ' ' (self._page_content, self._url) = http_request_get('https://finviz.com/screener.ashx', payload={'v': self._table, 't': ','.join(self._tickers), 'f': ','.join(self._filters), 'o': self._order, 's': self._signal, 'c': ','.join(self._custom)}, user_agent=self._user_agent) self._rows = self.__check_rows() self.headers = self.__get_table_headers() if (self._request_method == 'async'): async_connector = Connector(scrape.get_table, scrape.get_page_urls(self._page_content, self._rows, self._url), self._user_agent, self.headers, self._rows, css_select=True) pages_data = async_connector.run_connector() else: pages_data = sequential_data_scrape(scrape.get_table, scrape.get_page_urls(self._page_content, self._rows, self._url), self._user_agent, self.headers, self._rows) data = [] for page in pages_data: for row in page: data.append(row) return data
def __search_screener(self): ' ' (self._page_content, self._url) = http_request_get('https://finviz.com/screener.ashx', payload={'v': self._table, 't': ','.join(self._tickers), 'f': ','.join(self._filters), 'o': self._order, 's': self._signal, 'c': ','.join(self._custom)}, user_agent=self._user_agent) self._rows = self.__check_rows() self.headers = self.__get_table_headers() if (self._request_method == 'async'): async_connector = Connector(scrape.get_table, scrape.get_page_urls(self._page_content, self._rows, self._url), self._user_agent, self.headers, self._rows, css_select=True) pages_data = async_connector.run_connector() else: pages_data = sequential_data_scrape(scrape.get_table, scrape.get_page_urls(self._page_content, self._rows, self._url), self._user_agent, self.headers, self._rows) data = [] for page in pages_data: for row in page: data.append(row) return data<|docstring|>Private function used to return data from the FinViz screener.<|endoftext|>
6ad6bd97d725ca8feab0ff43a5918dc3e13eb25915de1164d0353764d43101ee
def is_adb_available(): 'Checks if adb is available as a command line tool.\n\n Returns:\n True if adb binary is available in console, False otherwise.\n ' (ret, out, err) = utils.run_command('which adb', shell=True) clean_out = out.decode('utf-8').strip() if clean_out: return True return False
Checks if adb is available as a command line tool. Returns: True if adb binary is available in console, False otherwise.
mobly/controllers/android_device_lib/adb.py
is_adb_available
xianyuanjia/mobly
532
python
def is_adb_available(): 'Checks if adb is available as a command line tool.\n\n Returns:\n True if adb binary is available in console, False otherwise.\n ' (ret, out, err) = utils.run_command('which adb', shell=True) clean_out = out.decode('utf-8').strip() if clean_out: return True return False
def is_adb_available(): 'Checks if adb is available as a command line tool.\n\n Returns:\n True if adb binary is available in console, False otherwise.\n ' (ret, out, err) = utils.run_command('which adb', shell=True) clean_out = out.decode('utf-8').strip() if clean_out: return True return False<|docstring|>Checks if adb is available as a command line tool. Returns: True if adb binary is available in console, False otherwise.<|endoftext|>
92524a3bb0eefa2364b0b2edd650189e120229bf7d6e889820691db1ab1dd1fe
def list_occupied_adb_ports(): 'Lists all the host ports occupied by adb forward.\n\n This is useful because adb will silently override the binding if an attempt\n to bind to a port already used by adb was made, instead of throwing binding\n error. So one should always check what ports adb is using before trying to\n bind to a port with adb.\n\n Returns:\n A list of integers representing occupied host ports.\n ' out = AdbProxy().forward('--list') clean_lines = str(out, 'utf-8').strip().split('\n') used_ports = [] for line in clean_lines: tokens = line.split(' tcp:') if (len(tokens) != 3): continue used_ports.append(int(tokens[1])) return used_ports
Lists all the host ports occupied by adb forward. This is useful because adb will silently override the binding if an attempt to bind to a port already used by adb was made, instead of throwing binding error. So one should always check what ports adb is using before trying to bind to a port with adb. Returns: A list of integers representing occupied host ports.
mobly/controllers/android_device_lib/adb.py
list_occupied_adb_ports
xianyuanjia/mobly
532
python
def list_occupied_adb_ports(): 'Lists all the host ports occupied by adb forward.\n\n This is useful because adb will silently override the binding if an attempt\n to bind to a port already used by adb was made, instead of throwing binding\n error. So one should always check what ports adb is using before trying to\n bind to a port with adb.\n\n Returns:\n A list of integers representing occupied host ports.\n ' out = AdbProxy().forward('--list') clean_lines = str(out, 'utf-8').strip().split('\n') used_ports = [] for line in clean_lines: tokens = line.split(' tcp:') if (len(tokens) != 3): continue used_ports.append(int(tokens[1])) return used_ports
def list_occupied_adb_ports(): 'Lists all the host ports occupied by adb forward.\n\n This is useful because adb will silently override the binding if an attempt\n to bind to a port already used by adb was made, instead of throwing binding\n error. So one should always check what ports adb is using before trying to\n bind to a port with adb.\n\n Returns:\n A list of integers representing occupied host ports.\n ' out = AdbProxy().forward('--list') clean_lines = str(out, 'utf-8').strip().split('\n') used_ports = [] for line in clean_lines: tokens = line.split(' tcp:') if (len(tokens) != 3): continue used_ports.append(int(tokens[1])) return used_ports<|docstring|>Lists all the host ports occupied by adb forward. This is useful because adb will silently override the binding if an attempt to bind to a port already used by adb was made, instead of throwing binding error. So one should always check what ports adb is using before trying to bind to a port with adb. Returns: A list of integers representing occupied host ports.<|endoftext|>
869e40735517e335b8ea68b5225a06dd9f69ffd7cffe93a2ff09e6b28c115f06
def _exec_cmd(self, args, shell, timeout, stderr): 'Executes adb commands.\n\n Args:\n args: string or list of strings, program arguments.\n See subprocess.Popen() documentation.\n shell: bool, True to run this command through the system shell,\n False to invoke it directly. See subprocess.Popen() docs.\n timeout: float, the number of seconds to wait before timing out.\n If not specified, no timeout takes effect.\n stderr: a Byte stream, like io.BytesIO, stderr of the command will\n be written to this object if provided.\n\n Returns:\n The output of the adb command run if exit code is 0.\n\n Raises:\n ValueError: timeout value is invalid.\n AdbError: The adb command exit code is not 0.\n AdbTimeoutError: The adb command timed out.\n ' if (timeout and (timeout <= 0)): raise ValueError(('Timeout is not a positive value: %s' % timeout)) try: (ret, out, err) = utils.run_command(args, shell=shell, timeout=timeout) except psutil.TimeoutExpired: raise AdbTimeoutError(cmd=args, timeout=timeout, serial=self.serial) if stderr: stderr.write(err) logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s', utils.cli_cmd_to_string(args), out, err, ret) if (ret == 0): return out else: raise AdbError(cmd=args, stdout=out, stderr=err, ret_code=ret, serial=self.serial)
Executes adb commands. Args: args: string or list of strings, program arguments. See subprocess.Popen() documentation. shell: bool, True to run this command through the system shell, False to invoke it directly. See subprocess.Popen() docs. timeout: float, the number of seconds to wait before timing out. If not specified, no timeout takes effect. stderr: a Byte stream, like io.BytesIO, stderr of the command will be written to this object if provided. Returns: The output of the adb command run if exit code is 0. Raises: ValueError: timeout value is invalid. AdbError: The adb command exit code is not 0. AdbTimeoutError: The adb command timed out.
mobly/controllers/android_device_lib/adb.py
_exec_cmd
xianyuanjia/mobly
532
python
def _exec_cmd(self, args, shell, timeout, stderr): 'Executes adb commands.\n\n Args:\n args: string or list of strings, program arguments.\n See subprocess.Popen() documentation.\n shell: bool, True to run this command through the system shell,\n False to invoke it directly. See subprocess.Popen() docs.\n timeout: float, the number of seconds to wait before timing out.\n If not specified, no timeout takes effect.\n stderr: a Byte stream, like io.BytesIO, stderr of the command will\n be written to this object if provided.\n\n Returns:\n The output of the adb command run if exit code is 0.\n\n Raises:\n ValueError: timeout value is invalid.\n AdbError: The adb command exit code is not 0.\n AdbTimeoutError: The adb command timed out.\n ' if (timeout and (timeout <= 0)): raise ValueError(('Timeout is not a positive value: %s' % timeout)) try: (ret, out, err) = utils.run_command(args, shell=shell, timeout=timeout) except psutil.TimeoutExpired: raise AdbTimeoutError(cmd=args, timeout=timeout, serial=self.serial) if stderr: stderr.write(err) logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s', utils.cli_cmd_to_string(args), out, err, ret) if (ret == 0): return out else: raise AdbError(cmd=args, stdout=out, stderr=err, ret_code=ret, serial=self.serial)
def _exec_cmd(self, args, shell, timeout, stderr): 'Executes adb commands.\n\n Args:\n args: string or list of strings, program arguments.\n See subprocess.Popen() documentation.\n shell: bool, True to run this command through the system shell,\n False to invoke it directly. See subprocess.Popen() docs.\n timeout: float, the number of seconds to wait before timing out.\n If not specified, no timeout takes effect.\n stderr: a Byte stream, like io.BytesIO, stderr of the command will\n be written to this object if provided.\n\n Returns:\n The output of the adb command run if exit code is 0.\n\n Raises:\n ValueError: timeout value is invalid.\n AdbError: The adb command exit code is not 0.\n AdbTimeoutError: The adb command timed out.\n ' if (timeout and (timeout <= 0)): raise ValueError(('Timeout is not a positive value: %s' % timeout)) try: (ret, out, err) = utils.run_command(args, shell=shell, timeout=timeout) except psutil.TimeoutExpired: raise AdbTimeoutError(cmd=args, timeout=timeout, serial=self.serial) if stderr: stderr.write(err) logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s', utils.cli_cmd_to_string(args), out, err, ret) if (ret == 0): return out else: raise AdbError(cmd=args, stdout=out, stderr=err, ret_code=ret, serial=self.serial)<|docstring|>Executes adb commands. Args: args: string or list of strings, program arguments. See subprocess.Popen() documentation. shell: bool, True to run this command through the system shell, False to invoke it directly. See subprocess.Popen() docs. timeout: float, the number of seconds to wait before timing out. If not specified, no timeout takes effect. stderr: a Byte stream, like io.BytesIO, stderr of the command will be written to this object if provided. Returns: The output of the adb command run if exit code is 0. Raises: ValueError: timeout value is invalid. AdbError: The adb command exit code is not 0. AdbTimeoutError: The adb command timed out.<|endoftext|>
04af845a7df574abf67dd2e47e5987d3becfd1ac513de951b9fff3b8d1d4959a
def _execute_and_process_stdout(self, args, shell, handler): 'Executes adb commands and processes the stdout with a handler.\n\n Args:\n args: string or list of strings, program arguments.\n See subprocess.Popen() documentation.\n shell: bool, True to run this command through the system shell,\n False to invoke it directly. See subprocess.Popen() docs.\n handler: func, a function to handle adb stdout line by line.\n\n Returns:\n The stderr of the adb command run if exit code is 0.\n\n Raises:\n AdbError: The adb command exit code is not 0.\n ' proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell, bufsize=1) out = '[elided, processed via handler]' try: while True: line = proc.stdout.readline() if line: handler(line) else: break finally: (unexpected_out, err) = proc.communicate() if unexpected_out: out = ('[unexpected stdout] %s' % unexpected_out) for line in unexpected_out.splitlines(): handler(line) ret = proc.returncode logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s', utils.cli_cmd_to_string(args), out, err, ret) if (ret == 0): return err else: raise AdbError(cmd=args, stdout=out, stderr=err, ret_code=ret)
Executes adb commands and processes the stdout with a handler. Args: args: string or list of strings, program arguments. See subprocess.Popen() documentation. shell: bool, True to run this command through the system shell, False to invoke it directly. See subprocess.Popen() docs. handler: func, a function to handle adb stdout line by line. Returns: The stderr of the adb command run if exit code is 0. Raises: AdbError: The adb command exit code is not 0.
mobly/controllers/android_device_lib/adb.py
_execute_and_process_stdout
xianyuanjia/mobly
532
python
def _execute_and_process_stdout(self, args, shell, handler): 'Executes adb commands and processes the stdout with a handler.\n\n Args:\n args: string or list of strings, program arguments.\n See subprocess.Popen() documentation.\n shell: bool, True to run this command through the system shell,\n False to invoke it directly. See subprocess.Popen() docs.\n handler: func, a function to handle adb stdout line by line.\n\n Returns:\n The stderr of the adb command run if exit code is 0.\n\n Raises:\n AdbError: The adb command exit code is not 0.\n ' proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell, bufsize=1) out = '[elided, processed via handler]' try: while True: line = proc.stdout.readline() if line: handler(line) else: break finally: (unexpected_out, err) = proc.communicate() if unexpected_out: out = ('[unexpected stdout] %s' % unexpected_out) for line in unexpected_out.splitlines(): handler(line) ret = proc.returncode logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s', utils.cli_cmd_to_string(args), out, err, ret) if (ret == 0): return err else: raise AdbError(cmd=args, stdout=out, stderr=err, ret_code=ret)
def _execute_and_process_stdout(self, args, shell, handler): 'Executes adb commands and processes the stdout with a handler.\n\n Args:\n args: string or list of strings, program arguments.\n See subprocess.Popen() documentation.\n shell: bool, True to run this command through the system shell,\n False to invoke it directly. See subprocess.Popen() docs.\n handler: func, a function to handle adb stdout line by line.\n\n Returns:\n The stderr of the adb command run if exit code is 0.\n\n Raises:\n AdbError: The adb command exit code is not 0.\n ' proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell, bufsize=1) out = '[elided, processed via handler]' try: while True: line = proc.stdout.readline() if line: handler(line) else: break finally: (unexpected_out, err) = proc.communicate() if unexpected_out: out = ('[unexpected stdout] %s' % unexpected_out) for line in unexpected_out.splitlines(): handler(line) ret = proc.returncode logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s', utils.cli_cmd_to_string(args), out, err, ret) if (ret == 0): return err else: raise AdbError(cmd=args, stdout=out, stderr=err, ret_code=ret)<|docstring|>Executes adb commands and processes the stdout with a handler. Args: args: string or list of strings, program arguments. See subprocess.Popen() documentation. shell: bool, True to run this command through the system shell, False to invoke it directly. See subprocess.Popen() docs. handler: func, a function to handle adb stdout line by line. Returns: The stderr of the adb command run if exit code is 0. Raises: AdbError: The adb command exit code is not 0.<|endoftext|>
88c4cbd2a97fdcb60a7265d190848dc7d612e71d26567c7e9fea22418db0c58d
def _construct_adb_cmd(self, raw_name, args, shell): 'Constructs an adb command with arguments for a subprocess call.\n\n Args:\n raw_name: string, the raw unsanitized name of the adb command to\n format.\n args: string or list of strings, arguments to the adb command.\n See subprocess.Proc() documentation.\n shell: bool, True to run this command through the system shell,\n False to invoke it directly. See subprocess.Proc() docs.\n\n Returns:\n The adb command in a format appropriate for subprocess. If shell is\n True, then this is a string; otherwise, this is a list of\n strings.\n ' args = (args or '') name = raw_name.replace('_', '-') if shell: args = utils.cli_cmd_to_string(args) if self.serial: adb_cmd = ('"%s" -s "%s" %s %s' % (ADB, self.serial, name, args)) else: adb_cmd = ('"%s" %s %s' % (ADB, name, args)) else: adb_cmd = [ADB] if self.serial: adb_cmd.extend(['-s', self.serial]) adb_cmd.append(name) if args: if isinstance(args, str): adb_cmd.append(args) else: adb_cmd.extend(args) return adb_cmd
Constructs an adb command with arguments for a subprocess call. Args: raw_name: string, the raw unsanitized name of the adb command to format. args: string or list of strings, arguments to the adb command. See subprocess.Proc() documentation. shell: bool, True to run this command through the system shell, False to invoke it directly. See subprocess.Proc() docs. Returns: The adb command in a format appropriate for subprocess. If shell is True, then this is a string; otherwise, this is a list of strings.
mobly/controllers/android_device_lib/adb.py
_construct_adb_cmd
xianyuanjia/mobly
532
python
def _construct_adb_cmd(self, raw_name, args, shell): 'Constructs an adb command with arguments for a subprocess call.\n\n Args:\n raw_name: string, the raw unsanitized name of the adb command to\n format.\n args: string or list of strings, arguments to the adb command.\n See subprocess.Proc() documentation.\n shell: bool, True to run this command through the system shell,\n False to invoke it directly. See subprocess.Proc() docs.\n\n Returns:\n The adb command in a format appropriate for subprocess. If shell is\n True, then this is a string; otherwise, this is a list of\n strings.\n ' args = (args or ) name = raw_name.replace('_', '-') if shell: args = utils.cli_cmd_to_string(args) if self.serial: adb_cmd = ('"%s" -s "%s" %s %s' % (ADB, self.serial, name, args)) else: adb_cmd = ('"%s" %s %s' % (ADB, name, args)) else: adb_cmd = [ADB] if self.serial: adb_cmd.extend(['-s', self.serial]) adb_cmd.append(name) if args: if isinstance(args, str): adb_cmd.append(args) else: adb_cmd.extend(args) return adb_cmd
def _construct_adb_cmd(self, raw_name, args, shell): 'Constructs an adb command with arguments for a subprocess call.\n\n Args:\n raw_name: string, the raw unsanitized name of the adb command to\n format.\n args: string or list of strings, arguments to the adb command.\n See subprocess.Proc() documentation.\n shell: bool, True to run this command through the system shell,\n False to invoke it directly. See subprocess.Proc() docs.\n\n Returns:\n The adb command in a format appropriate for subprocess. If shell is\n True, then this is a string; otherwise, this is a list of\n strings.\n ' args = (args or ) name = raw_name.replace('_', '-') if shell: args = utils.cli_cmd_to_string(args) if self.serial: adb_cmd = ('"%s" -s "%s" %s %s' % (ADB, self.serial, name, args)) else: adb_cmd = ('"%s" %s %s' % (ADB, name, args)) else: adb_cmd = [ADB] if self.serial: adb_cmd.extend(['-s', self.serial]) adb_cmd.append(name) if args: if isinstance(args, str): adb_cmd.append(args) else: adb_cmd.extend(args) return adb_cmd<|docstring|>Constructs an adb command with arguments for a subprocess call. Args: raw_name: string, the raw unsanitized name of the adb command to format. args: string or list of strings, arguments to the adb command. See subprocess.Proc() documentation. shell: bool, True to run this command through the system shell, False to invoke it directly. See subprocess.Proc() docs. Returns: The adb command in a format appropriate for subprocess. If shell is True, then this is a string; otherwise, this is a list of strings.<|endoftext|>
40b23fe4755fd43bbb518c9ca98d30446050ffb368e5e99e903183cdfb7e927d
def _parse_getprop_output(self, output): 'Parses the raw output of `adb shell getprop` into a dictionary.\n\n Args:\n output: byte str, the raw output of the `adb shell getprop` call.\n\n Returns:\n dict, name-value pairs of the properties.\n ' output = output.decode('utf-8', errors='ignore').replace('\r\n', '\n') results = {} for line in output.split(']\n'): if (not line): continue try: (name, value) = line.split(': ', 1) except ValueError: logging.debug('Failed to parse adb getprop line %s', line) continue name = name.strip()[1:(- 1)] if (value and (value[0] == '[')): value = value[1:] results[name] = value return results
Parses the raw output of `adb shell getprop` into a dictionary. Args: output: byte str, the raw output of the `adb shell getprop` call. Returns: dict, name-value pairs of the properties.
mobly/controllers/android_device_lib/adb.py
_parse_getprop_output
xianyuanjia/mobly
532
python
def _parse_getprop_output(self, output): 'Parses the raw output of `adb shell getprop` into a dictionary.\n\n Args:\n output: byte str, the raw output of the `adb shell getprop` call.\n\n Returns:\n dict, name-value pairs of the properties.\n ' output = output.decode('utf-8', errors='ignore').replace('\r\n', '\n') results = {} for line in output.split(']\n'): if (not line): continue try: (name, value) = line.split(': ', 1) except ValueError: logging.debug('Failed to parse adb getprop line %s', line) continue name = name.strip()[1:(- 1)] if (value and (value[0] == '[')): value = value[1:] results[name] = value return results
def _parse_getprop_output(self, output): 'Parses the raw output of `adb shell getprop` into a dictionary.\n\n Args:\n output: byte str, the raw output of the `adb shell getprop` call.\n\n Returns:\n dict, name-value pairs of the properties.\n ' output = output.decode('utf-8', errors='ignore').replace('\r\n', '\n') results = {} for line in output.split(']\n'): if (not line): continue try: (name, value) = line.split(': ', 1) except ValueError: logging.debug('Failed to parse adb getprop line %s', line) continue name = name.strip()[1:(- 1)] if (value and (value[0] == '[')): value = value[1:] results[name] = value return results<|docstring|>Parses the raw output of `adb shell getprop` into a dictionary. Args: output: byte str, the raw output of the `adb shell getprop` call. Returns: dict, name-value pairs of the properties.<|endoftext|>
f992d6397b4c3798bad3ad8a13fe2d0000c25414ffbded3ac9a76d156a179d75
@property def current_user_id(self): 'The integer ID of the current Android user.\n\n Some adb commands require specifying a user ID to work properly. Use\n this to get the current user ID.\n\n Note a "user" is not the same as an "account" in Android. See AOSP\'s\n documentation for details.\n https://source.android.com/devices/tech/admin/multi-user\n ' sdk_int = int(self.getprop('ro.build.version.sdk')) if (sdk_int >= 24): return int(self.shell(['am', 'get-current-user'])) if (sdk_int >= 21): user_info_str = self.shell(['dumpsys', 'user']).decode('utf-8') return int(re.findall('\\{(\\d+):', user_info_str)[0]) return 0
The integer ID of the current Android user. Some adb commands require specifying a user ID to work properly. Use this to get the current user ID. Note a "user" is not the same as an "account" in Android. See AOSP's documentation for details. https://source.android.com/devices/tech/admin/multi-user
mobly/controllers/android_device_lib/adb.py
current_user_id
xianyuanjia/mobly
532
python
@property def current_user_id(self): 'The integer ID of the current Android user.\n\n Some adb commands require specifying a user ID to work properly. Use\n this to get the current user ID.\n\n Note a "user" is not the same as an "account" in Android. See AOSP\'s\n documentation for details.\n https://source.android.com/devices/tech/admin/multi-user\n ' sdk_int = int(self.getprop('ro.build.version.sdk')) if (sdk_int >= 24): return int(self.shell(['am', 'get-current-user'])) if (sdk_int >= 21): user_info_str = self.shell(['dumpsys', 'user']).decode('utf-8') return int(re.findall('\\{(\\d+):', user_info_str)[0]) return 0
@property def current_user_id(self): 'The integer ID of the current Android user.\n\n Some adb commands require specifying a user ID to work properly. Use\n this to get the current user ID.\n\n Note a "user" is not the same as an "account" in Android. See AOSP\'s\n documentation for details.\n https://source.android.com/devices/tech/admin/multi-user\n ' sdk_int = int(self.getprop('ro.build.version.sdk')) if (sdk_int >= 24): return int(self.shell(['am', 'get-current-user'])) if (sdk_int >= 21): user_info_str = self.shell(['dumpsys', 'user']).decode('utf-8') return int(re.findall('\\{(\\d+):', user_info_str)[0]) return 0<|docstring|>The integer ID of the current Android user. Some adb commands require specifying a user ID to work properly. Use this to get the current user ID. Note a "user" is not the same as an "account" in Android. See AOSP's documentation for details. https://source.android.com/devices/tech/admin/multi-user<|endoftext|>
6f8d135056cc4afc42f79b74892a85fe47f8d3b14f0287f532fd4614d789de4e
def connect(self, address): 'Executes the `adb connect` command with proper status checking.\n\n Args:\n address: string, the address of the Android instance to connect to.\n\n Returns:\n The stdout content.\n\n Raises:\n AdbError: if the connection failed.\n ' stdout = self._exec_adb_cmd('connect', address, shell=False, timeout=None, stderr=None) if (PATTERN_ADB_CONNECT_SUCCESS.match(stdout.decode('utf-8')) is None): raise AdbError(cmd=f'connect {address}', stdout=stdout, stderr='', ret_code=0) return stdout
Executes the `adb connect` command with proper status checking. Args: address: string, the address of the Android instance to connect to. Returns: The stdout content. Raises: AdbError: if the connection failed.
mobly/controllers/android_device_lib/adb.py
connect
xianyuanjia/mobly
532
python
def connect(self, address): 'Executes the `adb connect` command with proper status checking.\n\n Args:\n address: string, the address of the Android instance to connect to.\n\n Returns:\n The stdout content.\n\n Raises:\n AdbError: if the connection failed.\n ' stdout = self._exec_adb_cmd('connect', address, shell=False, timeout=None, stderr=None) if (PATTERN_ADB_CONNECT_SUCCESS.match(stdout.decode('utf-8')) is None): raise AdbError(cmd=f'connect {address}', stdout=stdout, stderr=, ret_code=0) return stdout
def connect(self, address): 'Executes the `adb connect` command with proper status checking.\n\n Args:\n address: string, the address of the Android instance to connect to.\n\n Returns:\n The stdout content.\n\n Raises:\n AdbError: if the connection failed.\n ' stdout = self._exec_adb_cmd('connect', address, shell=False, timeout=None, stderr=None) if (PATTERN_ADB_CONNECT_SUCCESS.match(stdout.decode('utf-8')) is None): raise AdbError(cmd=f'connect {address}', stdout=stdout, stderr=, ret_code=0) return stdout<|docstring|>Executes the `adb connect` command with proper status checking. Args: address: string, the address of the Android instance to connect to. Returns: The stdout content. Raises: AdbError: if the connection failed.<|endoftext|>
d49c85bcd98964f716a1791abb3bf6dd3c06ea765c4ff9f4e0ee56e6e9551ec9
def getprop(self, prop_name): "Get a property of the device.\n\n This is a convenience wrapper for `adb shell getprop xxx`.\n\n Args:\n prop_name: A string that is the name of the property to get.\n\n Returns:\n A string that is the value of the property, or None if the property\n doesn't exist.\n " return self.shell(['getprop', prop_name], timeout=DEFAULT_GETPROP_TIMEOUT_SEC).decode('utf-8').strip()
Get a property of the device. This is a convenience wrapper for `adb shell getprop xxx`. Args: prop_name: A string that is the name of the property to get. Returns: A string that is the value of the property, or None if the property doesn't exist.
mobly/controllers/android_device_lib/adb.py
getprop
xianyuanjia/mobly
532
python
def getprop(self, prop_name): "Get a property of the device.\n\n This is a convenience wrapper for `adb shell getprop xxx`.\n\n Args:\n prop_name: A string that is the name of the property to get.\n\n Returns:\n A string that is the value of the property, or None if the property\n doesn't exist.\n " return self.shell(['getprop', prop_name], timeout=DEFAULT_GETPROP_TIMEOUT_SEC).decode('utf-8').strip()
def getprop(self, prop_name): "Get a property of the device.\n\n This is a convenience wrapper for `adb shell getprop xxx`.\n\n Args:\n prop_name: A string that is the name of the property to get.\n\n Returns:\n A string that is the value of the property, or None if the property\n doesn't exist.\n " return self.shell(['getprop', prop_name], timeout=DEFAULT_GETPROP_TIMEOUT_SEC).decode('utf-8').strip()<|docstring|>Get a property of the device. This is a convenience wrapper for `adb shell getprop xxx`. Args: prop_name: A string that is the name of the property to get. Returns: A string that is the value of the property, or None if the property doesn't exist.<|endoftext|>
85d2dc7cff2f9a1a63b756b3a35453bc101ea0f948f19795425c8af9be496b24
def getprops(self, prop_names): 'Get multiple properties of the device.\n\n This is a convenience wrapper for `adb shell getprop`. Use this to\n reduce the number of adb calls when getting multiple properties.\n\n Args:\n prop_names: list of strings, the names of the properties to get.\n\n Returns:\n A dict containing name-value pairs of the properties requested, if\n they exist.\n ' attempts = DEFAULT_GETPROPS_ATTEMPTS results = {} for attempt in range(attempts): raw_output = self.shell(['getprop'], timeout=DEFAULT_GETPROP_TIMEOUT_SEC) properties = self._parse_getprop_output(raw_output) if properties: for name in prop_names: if (name in properties): results[name] = properties[name] break if (attempt < (attempts - 1)): time.sleep(DEFAULT_GETPROPS_RETRY_SLEEP_SEC) return results
Get multiple properties of the device. This is a convenience wrapper for `adb shell getprop`. Use this to reduce the number of adb calls when getting multiple properties. Args: prop_names: list of strings, the names of the properties to get. Returns: A dict containing name-value pairs of the properties requested, if they exist.
mobly/controllers/android_device_lib/adb.py
getprops
xianyuanjia/mobly
532
python
def getprops(self, prop_names): 'Get multiple properties of the device.\n\n This is a convenience wrapper for `adb shell getprop`. Use this to\n reduce the number of adb calls when getting multiple properties.\n\n Args:\n prop_names: list of strings, the names of the properties to get.\n\n Returns:\n A dict containing name-value pairs of the properties requested, if\n they exist.\n ' attempts = DEFAULT_GETPROPS_ATTEMPTS results = {} for attempt in range(attempts): raw_output = self.shell(['getprop'], timeout=DEFAULT_GETPROP_TIMEOUT_SEC) properties = self._parse_getprop_output(raw_output) if properties: for name in prop_names: if (name in properties): results[name] = properties[name] break if (attempt < (attempts - 1)): time.sleep(DEFAULT_GETPROPS_RETRY_SLEEP_SEC) return results
def getprops(self, prop_names): 'Get multiple properties of the device.\n\n This is a convenience wrapper for `adb shell getprop`. Use this to\n reduce the number of adb calls when getting multiple properties.\n\n Args:\n prop_names: list of strings, the names of the properties to get.\n\n Returns:\n A dict containing name-value pairs of the properties requested, if\n they exist.\n ' attempts = DEFAULT_GETPROPS_ATTEMPTS results = {} for attempt in range(attempts): raw_output = self.shell(['getprop'], timeout=DEFAULT_GETPROP_TIMEOUT_SEC) properties = self._parse_getprop_output(raw_output) if properties: for name in prop_names: if (name in properties): results[name] = properties[name] break if (attempt < (attempts - 1)): time.sleep(DEFAULT_GETPROPS_RETRY_SLEEP_SEC) return results<|docstring|>Get multiple properties of the device. This is a convenience wrapper for `adb shell getprop`. Use this to reduce the number of adb calls when getting multiple properties. Args: prop_names: list of strings, the names of the properties to get. Returns: A dict containing name-value pairs of the properties requested, if they exist.<|endoftext|>
328a41850106a39d5b869addd9e39499aaf9a3d596bdff7908637fdf50bd8a82
def has_shell_command(self, command): 'Checks to see if a given check command exists on the device.\n\n Args:\n command: A string that is the name of the command to check.\n\n Returns:\n A boolean that is True if the command exists and False otherwise.\n ' try: output = self.shell(['command', '-v', command]).decode('utf-8').strip() return (command in output) except AdbError: return False
Checks to see if a given check command exists on the device. Args: command: A string that is the name of the command to check. Returns: A boolean that is True if the command exists and False otherwise.
mobly/controllers/android_device_lib/adb.py
has_shell_command
xianyuanjia/mobly
532
python
def has_shell_command(self, command): 'Checks to see if a given check command exists on the device.\n\n Args:\n command: A string that is the name of the command to check.\n\n Returns:\n A boolean that is True if the command exists and False otherwise.\n ' try: output = self.shell(['command', '-v', command]).decode('utf-8').strip() return (command in output) except AdbError: return False
def has_shell_command(self, command): 'Checks to see if a given check command exists on the device.\n\n Args:\n command: A string that is the name of the command to check.\n\n Returns:\n A boolean that is True if the command exists and False otherwise.\n ' try: output = self.shell(['command', '-v', command]).decode('utf-8').strip() return (command in output) except AdbError: return False<|docstring|>Checks to see if a given check command exists on the device. Args: command: A string that is the name of the command to check. Returns: A boolean that is True if the command exists and False otherwise.<|endoftext|>
e7a0a1ea65ed5b1536b82306cda4c4f87b77de07d07758ccf6dd8f7e15c9ea62
def instrument(self, package, options=None, runner=None, handler=None): "Runs an instrumentation command on the device.\n\n This is a convenience wrapper to avoid parameter formatting.\n\n Example:\n\n .. code-block:: python\n\n device.instrument(\n 'com.my.package.test',\n options = {\n 'class': 'com.my.package.test.TestSuite',\n },\n )\n\n Args:\n package: string, the package of the instrumentation tests.\n options: dict, the instrumentation options including the test\n class.\n runner: string, the test runner name, which defaults to\n DEFAULT_INSTRUMENTATION_RUNNER.\n handler: optional func, when specified the function is used to parse\n the instrumentation stdout line by line as the output is\n generated; otherwise, the stdout is simply returned once the\n instrumentation is finished.\n\n Returns:\n The stdout of instrumentation command or the stderr if the handler\n is set.\n " if (runner is None): runner = DEFAULT_INSTRUMENTATION_RUNNER if (options is None): options = {} options_list = [] for (option_key, option_value) in options.items(): options_list.append(('-e %s %s' % (option_key, option_value))) options_string = ' '.join(options_list) instrumentation_command = ('am instrument -r -w %s %s/%s' % (options_string, package, runner)) logging.info('AndroidDevice|%s: Executing adb shell %s', self.serial, instrumentation_command) if (handler is None): return self._exec_adb_cmd('shell', instrumentation_command, shell=False, timeout=None, stderr=None) else: return self._execute_adb_and_process_stdout('shell', instrumentation_command, shell=False, handler=handler)
Runs an instrumentation command on the device. This is a convenience wrapper to avoid parameter formatting. Example: .. code-block:: python device.instrument( 'com.my.package.test', options = { 'class': 'com.my.package.test.TestSuite', }, ) Args: package: string, the package of the instrumentation tests. options: dict, the instrumentation options including the test class. runner: string, the test runner name, which defaults to DEFAULT_INSTRUMENTATION_RUNNER. handler: optional func, when specified the function is used to parse the instrumentation stdout line by line as the output is generated; otherwise, the stdout is simply returned once the instrumentation is finished. Returns: The stdout of instrumentation command or the stderr if the handler is set.
mobly/controllers/android_device_lib/adb.py
instrument
xianyuanjia/mobly
532
python
def instrument(self, package, options=None, runner=None, handler=None): "Runs an instrumentation command on the device.\n\n This is a convenience wrapper to avoid parameter formatting.\n\n Example:\n\n .. code-block:: python\n\n device.instrument(\n 'com.my.package.test',\n options = {\n 'class': 'com.my.package.test.TestSuite',\n },\n )\n\n Args:\n package: string, the package of the instrumentation tests.\n options: dict, the instrumentation options including the test\n class.\n runner: string, the test runner name, which defaults to\n DEFAULT_INSTRUMENTATION_RUNNER.\n handler: optional func, when specified the function is used to parse\n the instrumentation stdout line by line as the output is\n generated; otherwise, the stdout is simply returned once the\n instrumentation is finished.\n\n Returns:\n The stdout of instrumentation command or the stderr if the handler\n is set.\n " if (runner is None): runner = DEFAULT_INSTRUMENTATION_RUNNER if (options is None): options = {} options_list = [] for (option_key, option_value) in options.items(): options_list.append(('-e %s %s' % (option_key, option_value))) options_string = ' '.join(options_list) instrumentation_command = ('am instrument -r -w %s %s/%s' % (options_string, package, runner)) logging.info('AndroidDevice|%s: Executing adb shell %s', self.serial, instrumentation_command) if (handler is None): return self._exec_adb_cmd('shell', instrumentation_command, shell=False, timeout=None, stderr=None) else: return self._execute_adb_and_process_stdout('shell', instrumentation_command, shell=False, handler=handler)
def instrument(self, package, options=None, runner=None, handler=None): "Runs an instrumentation command on the device.\n\n This is a convenience wrapper to avoid parameter formatting.\n\n Example:\n\n .. code-block:: python\n\n device.instrument(\n 'com.my.package.test',\n options = {\n 'class': 'com.my.package.test.TestSuite',\n },\n )\n\n Args:\n package: string, the package of the instrumentation tests.\n options: dict, the instrumentation options including the test\n class.\n runner: string, the test runner name, which defaults to\n DEFAULT_INSTRUMENTATION_RUNNER.\n handler: optional func, when specified the function is used to parse\n the instrumentation stdout line by line as the output is\n generated; otherwise, the stdout is simply returned once the\n instrumentation is finished.\n\n Returns:\n The stdout of instrumentation command or the stderr if the handler\n is set.\n " if (runner is None): runner = DEFAULT_INSTRUMENTATION_RUNNER if (options is None): options = {} options_list = [] for (option_key, option_value) in options.items(): options_list.append(('-e %s %s' % (option_key, option_value))) options_string = ' '.join(options_list) instrumentation_command = ('am instrument -r -w %s %s/%s' % (options_string, package, runner)) logging.info('AndroidDevice|%s: Executing adb shell %s', self.serial, instrumentation_command) if (handler is None): return self._exec_adb_cmd('shell', instrumentation_command, shell=False, timeout=None, stderr=None) else: return self._execute_adb_and_process_stdout('shell', instrumentation_command, shell=False, handler=handler)<|docstring|>Runs an instrumentation command on the device. This is a convenience wrapper to avoid parameter formatting. Example: .. code-block:: python device.instrument( 'com.my.package.test', options = { 'class': 'com.my.package.test.TestSuite', }, ) Args: package: string, the package of the instrumentation tests. options: dict, the instrumentation options including the test class. runner: string, the test runner name, which defaults to DEFAULT_INSTRUMENTATION_RUNNER. handler: optional func, when specified the function is used to parse the instrumentation stdout line by line as the output is generated; otherwise, the stdout is simply returned once the instrumentation is finished. Returns: The stdout of instrumentation command or the stderr if the handler is set.<|endoftext|>
2559a563eaa65586dc8e6e6447e275a7be4265c6c4f1e5171713553857feb96e
def root(self): 'Enables ADB root mode on the device.\n\n This method will retry to execute the command `adb root` when an\n AdbError occurs, since sometimes the error `adb: unable to connect\n for root: closed` is raised when executing `adb root` immediately after\n the device is booted to OS.\n\n Returns:\n A string that is the stdout of root command.\n\n Raises:\n AdbError: If the command exit code is not 0.\n ' for attempt in range(ADB_ROOT_RETRY_ATTMEPTS): try: return self._exec_adb_cmd('root', args=None, shell=False, timeout=None, stderr=None) except AdbError as e: if ((attempt + 1) < ADB_ROOT_RETRY_ATTMEPTS): logging.debug(('Retry the command "%s" since Error "%s" occurred.' % (utils.cli_cmd_to_string(e.cmd), e.stderr.decode('utf-8').strip()))) time.sleep(ADB_ROOT_RETRY_ATTEMPT_INTERVAL_SEC) else: raise e
Enables ADB root mode on the device. This method will retry to execute the command `adb root` when an AdbError occurs, since sometimes the error `adb: unable to connect for root: closed` is raised when executing `adb root` immediately after the device is booted to OS. Returns: A string that is the stdout of root command. Raises: AdbError: If the command exit code is not 0.
mobly/controllers/android_device_lib/adb.py
root
xianyuanjia/mobly
532
python
def root(self): 'Enables ADB root mode on the device.\n\n This method will retry to execute the command `adb root` when an\n AdbError occurs, since sometimes the error `adb: unable to connect\n for root: closed` is raised when executing `adb root` immediately after\n the device is booted to OS.\n\n Returns:\n A string that is the stdout of root command.\n\n Raises:\n AdbError: If the command exit code is not 0.\n ' for attempt in range(ADB_ROOT_RETRY_ATTMEPTS): try: return self._exec_adb_cmd('root', args=None, shell=False, timeout=None, stderr=None) except AdbError as e: if ((attempt + 1) < ADB_ROOT_RETRY_ATTMEPTS): logging.debug(('Retry the command "%s" since Error "%s" occurred.' % (utils.cli_cmd_to_string(e.cmd), e.stderr.decode('utf-8').strip()))) time.sleep(ADB_ROOT_RETRY_ATTEMPT_INTERVAL_SEC) else: raise e
def root(self): 'Enables ADB root mode on the device.\n\n This method will retry to execute the command `adb root` when an\n AdbError occurs, since sometimes the error `adb: unable to connect\n for root: closed` is raised when executing `adb root` immediately after\n the device is booted to OS.\n\n Returns:\n A string that is the stdout of root command.\n\n Raises:\n AdbError: If the command exit code is not 0.\n ' for attempt in range(ADB_ROOT_RETRY_ATTMEPTS): try: return self._exec_adb_cmd('root', args=None, shell=False, timeout=None, stderr=None) except AdbError as e: if ((attempt + 1) < ADB_ROOT_RETRY_ATTMEPTS): logging.debug(('Retry the command "%s" since Error "%s" occurred.' % (utils.cli_cmd_to_string(e.cmd), e.stderr.decode('utf-8').strip()))) time.sleep(ADB_ROOT_RETRY_ATTEMPT_INTERVAL_SEC) else: raise e<|docstring|>Enables ADB root mode on the device. This method will retry to execute the command `adb root` when an AdbError occurs, since sometimes the error `adb: unable to connect for root: closed` is raised when executing `adb root` immediately after the device is booted to OS. Returns: A string that is the stdout of root command. Raises: AdbError: If the command exit code is not 0.<|endoftext|>
4ba2e42e0674d00a95db5da6bb1cffd957ce8b2e0cc88cc3c828e7a9a0d82b14
def adb_call(args=None, shell=False, timeout=None, stderr=None): 'Wrapper for an ADB command.\n\n Args:\n args: string or list of strings, arguments to the adb command.\n See subprocess.Proc() documentation.\n shell: bool, True to run this command through the system shell,\n False to invoke it directly. See subprocess.Proc() docs.\n timeout: float, the number of seconds to wait before timing out.\n If not specified, no timeout takes effect.\n stderr: a Byte stream, like io.BytesIO, stderr of the command\n will be written to this object if provided.\n\n Returns:\n The output of the adb command run if exit code is 0.\n ' return self._exec_adb_cmd(name, args, shell=shell, timeout=timeout, stderr=stderr)
Wrapper for an ADB command. Args: args: string or list of strings, arguments to the adb command. See subprocess.Proc() documentation. shell: bool, True to run this command through the system shell, False to invoke it directly. See subprocess.Proc() docs. timeout: float, the number of seconds to wait before timing out. If not specified, no timeout takes effect. stderr: a Byte stream, like io.BytesIO, stderr of the command will be written to this object if provided. Returns: The output of the adb command run if exit code is 0.
mobly/controllers/android_device_lib/adb.py
adb_call
xianyuanjia/mobly
532
python
def adb_call(args=None, shell=False, timeout=None, stderr=None): 'Wrapper for an ADB command.\n\n Args:\n args: string or list of strings, arguments to the adb command.\n See subprocess.Proc() documentation.\n shell: bool, True to run this command through the system shell,\n False to invoke it directly. See subprocess.Proc() docs.\n timeout: float, the number of seconds to wait before timing out.\n If not specified, no timeout takes effect.\n stderr: a Byte stream, like io.BytesIO, stderr of the command\n will be written to this object if provided.\n\n Returns:\n The output of the adb command run if exit code is 0.\n ' return self._exec_adb_cmd(name, args, shell=shell, timeout=timeout, stderr=stderr)
def adb_call(args=None, shell=False, timeout=None, stderr=None): 'Wrapper for an ADB command.\n\n Args:\n args: string or list of strings, arguments to the adb command.\n See subprocess.Proc() documentation.\n shell: bool, True to run this command through the system shell,\n False to invoke it directly. See subprocess.Proc() docs.\n timeout: float, the number of seconds to wait before timing out.\n If not specified, no timeout takes effect.\n stderr: a Byte stream, like io.BytesIO, stderr of the command\n will be written to this object if provided.\n\n Returns:\n The output of the adb command run if exit code is 0.\n ' return self._exec_adb_cmd(name, args, shell=shell, timeout=timeout, stderr=stderr)<|docstring|>Wrapper for an ADB command. Args: args: string or list of strings, arguments to the adb command. See subprocess.Proc() documentation. shell: bool, True to run this command through the system shell, False to invoke it directly. See subprocess.Proc() docs. timeout: float, the number of seconds to wait before timing out. If not specified, no timeout takes effect. stderr: a Byte stream, like io.BytesIO, stderr of the command will be written to this object if provided. Returns: The output of the adb command run if exit code is 0.<|endoftext|>
d75d9ce9350d7bad0462aadbad08d509fd88f6157e38ef000b8fb17bffddf8cb
def chapter_current() -> int: 'Return current chapter number'
Return current chapter number
code/chapters.py
chapter_current
hanuele/knausj_talon
298
python
def chapter_current() -> int:
def chapter_current() -> int: <|docstring|>Return current chapter number<|endoftext|>
0ec4a225429196ca70f25ef5c14d425f6a3d6e53538bb260d80df8dd863dcdad
def chapter_next(): 'Go to next chapter' actions.user.chapter_jump((actions.user.chapter_current() + 1))
Go to next chapter
code/chapters.py
chapter_next
hanuele/knausj_talon
298
python
def chapter_next(): actions.user.chapter_jump((actions.user.chapter_current() + 1))
def chapter_next(): actions.user.chapter_jump((actions.user.chapter_current() + 1))<|docstring|>Go to next chapter<|endoftext|>
7c32194a1747165edcfda3614dfd940a7f3eaa9ef2af389f17c37c335a4db1ba
def chapter_previous(): 'Go to previous chapter' actions.user.chapter_jump((actions.user.chapter_current() - 1))
Go to previous chapter
code/chapters.py
chapter_previous
hanuele/knausj_talon
298
python
def chapter_previous(): actions.user.chapter_jump((actions.user.chapter_current() - 1))
def chapter_previous(): actions.user.chapter_jump((actions.user.chapter_current() - 1))<|docstring|>Go to previous chapter<|endoftext|>
1259410c3505f95c238c8805a87e4844e5f2842fcd5f4e925554bdc8e5c4ff0a
def chapter_jump(number: int): 'Go to chapter number'
Go to chapter number
code/chapters.py
chapter_jump
hanuele/knausj_talon
298
python
def chapter_jump(number: int):
def chapter_jump(number: int): <|docstring|>Go to chapter number<|endoftext|>
184438187d213184696d03f7b4543071fd6c9e10c294ef78b1f42ca4b05082a6
def chapter_final(): 'Go to final chapter'
Go to final chapter
code/chapters.py
chapter_final
hanuele/knausj_talon
298
python
def chapter_final():
def chapter_final(): <|docstring|>Go to final chapter<|endoftext|>
78d6c0f34249eb3c54697a1170548afababcd7e85cf6489622797749e72a10b6
async def async_setup_platform(hass: HomeAssistant, _: ConfigType, add_entities: AddEntitiesCallback, discovery_info: (DiscoveryInfoType | None)=None) -> None: 'Add lights from the main Qwikswitch component.' if (discovery_info is None): return qsusb = hass.data[QWIKSWITCH] devs = [QSLight(qsid, qsusb) for qsid in discovery_info[QWIKSWITCH]] add_entities(devs)
Add lights from the main Qwikswitch component.
homeassistant/components/qwikswitch/light.py
async_setup_platform
a-p-z/core
30,023
python
async def async_setup_platform(hass: HomeAssistant, _: ConfigType, add_entities: AddEntitiesCallback, discovery_info: (DiscoveryInfoType | None)=None) -> None: if (discovery_info is None): return qsusb = hass.data[QWIKSWITCH] devs = [QSLight(qsid, qsusb) for qsid in discovery_info[QWIKSWITCH]] add_entities(devs)
async def async_setup_platform(hass: HomeAssistant, _: ConfigType, add_entities: AddEntitiesCallback, discovery_info: (DiscoveryInfoType | None)=None) -> None: if (discovery_info is None): return qsusb = hass.data[QWIKSWITCH] devs = [QSLight(qsid, qsusb) for qsid in discovery_info[QWIKSWITCH]] add_entities(devs)<|docstring|>Add lights from the main Qwikswitch component.<|endoftext|>
fae9ec509428c3c0ef0f4e92f4068ce79d8c1ab287dbe4013d56b36084829877
@property def brightness(self): 'Return the brightness of this light (0-255).' return (self.device.value if self.device.is_dimmer else None)
Return the brightness of this light (0-255).
homeassistant/components/qwikswitch/light.py
brightness
a-p-z/core
30,023
python
@property def brightness(self): return (self.device.value if self.device.is_dimmer else None)
@property def brightness(self): return (self.device.value if self.device.is_dimmer else None)<|docstring|>Return the brightness of this light (0-255).<|endoftext|>
11f9dd03330054ce354ce5ffdcd2e555216b0de41fb0cbb894a4354c998389e7
@property def color_mode(self) -> ColorMode: 'Return the color mode of the light.' return (ColorMode.BRIGHTNESS if self.device.is_dimmer else ColorMode.ONOFF)
Return the color mode of the light.
homeassistant/components/qwikswitch/light.py
color_mode
a-p-z/core
30,023
python
@property def color_mode(self) -> ColorMode: return (ColorMode.BRIGHTNESS if self.device.is_dimmer else ColorMode.ONOFF)
@property def color_mode(self) -> ColorMode: return (ColorMode.BRIGHTNESS if self.device.is_dimmer else ColorMode.ONOFF)<|docstring|>Return the color mode of the light.<|endoftext|>
e1334a939068e3e3dda5ddf15c7f852364b3335c4fef788a84aaf85326f6ff85
@property def supported_color_modes(self) -> set[ColorMode]: 'Flag supported color modes.' return {self.color_mode}
Flag supported color modes.
homeassistant/components/qwikswitch/light.py
supported_color_modes
a-p-z/core
30,023
python
@property def supported_color_modes(self) -> set[ColorMode]: return {self.color_mode}
@property def supported_color_modes(self) -> set[ColorMode]: return {self.color_mode}<|docstring|>Flag supported color modes.<|endoftext|>
d49db9a90b848a70bb6e46939e350e3003452f08c817915b3f8ff0e9c2aa8388
@distributed_trace def list_by_product(self, resource_group_name: str, service_name: str, product_id: str, filter: Optional[str]=None, top: Optional[int]=None, skip: Optional[int]=None, **kwargs: Any) -> Iterable['_models.GroupCollection']: 'Lists the collection of developer groups associated with the specified product.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param service_name: The name of the API Management service.\n :type service_name: str\n :param product_id: Product identifier. Must be unique in the current API Management service\n instance.\n :type product_id: str\n :param filter: | Field | Usage | Supported operators | Supported\n functions |</br>|-------------|-------------|-------------|-------------|</br>| name |\n filter | ge, le, eq, ne, gt, lt | |</br>| displayName | filter | eq, ne | |</br>|\n description | filter | eq, ne | |</br>.\n :type filter: str\n :param top: Number of records to return.\n :type top: int\n :param skip: Number of records to skip.\n :type skip: int\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either GroupCollection or the result of cls(response)\n :rtype: ~azure.core.paging.ItemPaged[~api_management_client.models.GroupCollection]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if (not next_link): request = build_list_by_product_request(resource_group_name=resource_group_name, service_name=service_name, product_id=product_id, subscription_id=self._config.subscription_id, filter=filter, top=top, skip=skip, template_url=self.list_by_product.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_by_product_request(resource_group_name=resource_group_name, service_name=service_name, product_id=product_id, subscription_id=self._config.subscription_id, filter=filter, top=top, skip=skip, template_url=next_link) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = 'GET' return request def extract_data(pipeline_response): deserialized = self._deserialize('GroupCollection', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return ((deserialized.next_link or None), iter(list_of_elem)) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged(get_next, extract_data)
Lists the collection of developer groups associated with the specified product. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param service_name: The name of the API Management service. :type service_name: str :param product_id: Product identifier. Must be unique in the current API Management service instance. :type product_id: str :param filter: | Field | Usage | Supported operators | Supported functions |</br>|-------------|-------------|-------------|-------------|</br>| name | filter | ge, le, eq, ne, gt, lt | |</br>| displayName | filter | eq, ne | |</br>| description | filter | eq, ne | |</br>. :type filter: str :param top: Number of records to return. :type top: int :param skip: Number of records to skip. :type skip: int :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either GroupCollection or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~api_management_client.models.GroupCollection] :raises: ~azure.core.exceptions.HttpResponseError
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/operations/_product_group_operations.py
list_by_product
AFengKK/azure-sdk-for-python
1
python
@distributed_trace def list_by_product(self, resource_group_name: str, service_name: str, product_id: str, filter: Optional[str]=None, top: Optional[int]=None, skip: Optional[int]=None, **kwargs: Any) -> Iterable['_models.GroupCollection']: 'Lists the collection of developer groups associated with the specified product.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param service_name: The name of the API Management service.\n :type service_name: str\n :param product_id: Product identifier. Must be unique in the current API Management service\n instance.\n :type product_id: str\n :param filter: | Field | Usage | Supported operators | Supported\n functions |</br>|-------------|-------------|-------------|-------------|</br>| name |\n filter | ge, le, eq, ne, gt, lt | |</br>| displayName | filter | eq, ne | |</br>|\n description | filter | eq, ne | |</br>.\n :type filter: str\n :param top: Number of records to return.\n :type top: int\n :param skip: Number of records to skip.\n :type skip: int\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either GroupCollection or the result of cls(response)\n :rtype: ~azure.core.paging.ItemPaged[~api_management_client.models.GroupCollection]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if (not next_link): request = build_list_by_product_request(resource_group_name=resource_group_name, service_name=service_name, product_id=product_id, subscription_id=self._config.subscription_id, filter=filter, top=top, skip=skip, template_url=self.list_by_product.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_by_product_request(resource_group_name=resource_group_name, service_name=service_name, product_id=product_id, subscription_id=self._config.subscription_id, filter=filter, top=top, skip=skip, template_url=next_link) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = 'GET' return request def extract_data(pipeline_response): deserialized = self._deserialize('GroupCollection', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return ((deserialized.next_link or None), iter(list_of_elem)) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged(get_next, extract_data)
@distributed_trace def list_by_product(self, resource_group_name: str, service_name: str, product_id: str, filter: Optional[str]=None, top: Optional[int]=None, skip: Optional[int]=None, **kwargs: Any) -> Iterable['_models.GroupCollection']: 'Lists the collection of developer groups associated with the specified product.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param service_name: The name of the API Management service.\n :type service_name: str\n :param product_id: Product identifier. Must be unique in the current API Management service\n instance.\n :type product_id: str\n :param filter: | Field | Usage | Supported operators | Supported\n functions |</br>|-------------|-------------|-------------|-------------|</br>| name |\n filter | ge, le, eq, ne, gt, lt | |</br>| displayName | filter | eq, ne | |</br>|\n description | filter | eq, ne | |</br>.\n :type filter: str\n :param top: Number of records to return.\n :type top: int\n :param skip: Number of records to skip.\n :type skip: int\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either GroupCollection or the result of cls(response)\n :rtype: ~azure.core.paging.ItemPaged[~api_management_client.models.GroupCollection]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if (not next_link): request = build_list_by_product_request(resource_group_name=resource_group_name, service_name=service_name, product_id=product_id, subscription_id=self._config.subscription_id, filter=filter, top=top, skip=skip, template_url=self.list_by_product.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_by_product_request(resource_group_name=resource_group_name, service_name=service_name, product_id=product_id, subscription_id=self._config.subscription_id, filter=filter, top=top, skip=skip, template_url=next_link) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = 'GET' return request def extract_data(pipeline_response): deserialized = self._deserialize('GroupCollection', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return ((deserialized.next_link or None), iter(list_of_elem)) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged(get_next, extract_data)<|docstring|>Lists the collection of developer groups associated with the specified product. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param service_name: The name of the API Management service. :type service_name: str :param product_id: Product identifier. Must be unique in the current API Management service instance. :type product_id: str :param filter: | Field | Usage | Supported operators | Supported functions |</br>|-------------|-------------|-------------|-------------|</br>| name | filter | ge, le, eq, ne, gt, lt | |</br>| displayName | filter | eq, ne | |</br>| description | filter | eq, ne | |</br>. :type filter: str :param top: Number of records to return. :type top: int :param skip: Number of records to skip. :type skip: int :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either GroupCollection or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~api_management_client.models.GroupCollection] :raises: ~azure.core.exceptions.HttpResponseError<|endoftext|>
6e96bf2157dcf1fc8003af87482c3828baa8878a6f98def6ff6cfdffebe0fca6
@distributed_trace def check_entity_exists(self, resource_group_name: str, service_name: str, product_id: str, group_id: str, **kwargs: Any) -> bool: 'Checks that Group entity specified by identifier is associated with the Product entity.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param service_name: The name of the API Management service.\n :type service_name: str\n :param product_id: Product identifier. Must be unique in the current API Management service\n instance.\n :type product_id: str\n :param group_id: Group identifier. Must be unique in the current API Management service\n instance.\n :type group_id: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: bool, or the result of cls(response)\n :rtype: bool\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_check_entity_exists_request(resource_group_name=resource_group_name, service_name=service_name, product_id=product_id, group_id=group_id, subscription_id=self._config.subscription_id, template_url=self.check_entity_exists.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [204]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) return (200 <= response.status_code <= 299)
Checks that Group entity specified by identifier is associated with the Product entity. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param service_name: The name of the API Management service. :type service_name: str :param product_id: Product identifier. Must be unique in the current API Management service instance. :type product_id: str :param group_id: Group identifier. Must be unique in the current API Management service instance. :type group_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: bool, or the result of cls(response) :rtype: bool :raises: ~azure.core.exceptions.HttpResponseError
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/operations/_product_group_operations.py
check_entity_exists
AFengKK/azure-sdk-for-python
1
python
@distributed_trace def check_entity_exists(self, resource_group_name: str, service_name: str, product_id: str, group_id: str, **kwargs: Any) -> bool: 'Checks that Group entity specified by identifier is associated with the Product entity.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param service_name: The name of the API Management service.\n :type service_name: str\n :param product_id: Product identifier. Must be unique in the current API Management service\n instance.\n :type product_id: str\n :param group_id: Group identifier. Must be unique in the current API Management service\n instance.\n :type group_id: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: bool, or the result of cls(response)\n :rtype: bool\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_check_entity_exists_request(resource_group_name=resource_group_name, service_name=service_name, product_id=product_id, group_id=group_id, subscription_id=self._config.subscription_id, template_url=self.check_entity_exists.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [204]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) return (200 <= response.status_code <= 299)
@distributed_trace def check_entity_exists(self, resource_group_name: str, service_name: str, product_id: str, group_id: str, **kwargs: Any) -> bool: 'Checks that Group entity specified by identifier is associated with the Product entity.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param service_name: The name of the API Management service.\n :type service_name: str\n :param product_id: Product identifier. Must be unique in the current API Management service\n instance.\n :type product_id: str\n :param group_id: Group identifier. Must be unique in the current API Management service\n instance.\n :type group_id: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: bool, or the result of cls(response)\n :rtype: bool\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_check_entity_exists_request(resource_group_name=resource_group_name, service_name=service_name, product_id=product_id, group_id=group_id, subscription_id=self._config.subscription_id, template_url=self.check_entity_exists.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [204]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) return (200 <= response.status_code <= 299)<|docstring|>Checks that Group entity specified by identifier is associated with the Product entity. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param service_name: The name of the API Management service. :type service_name: str :param product_id: Product identifier. Must be unique in the current API Management service instance. :type product_id: str :param group_id: Group identifier. Must be unique in the current API Management service instance. :type group_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: bool, or the result of cls(response) :rtype: bool :raises: ~azure.core.exceptions.HttpResponseError<|endoftext|>
28f6b262d02bc173c6a6ef4636fb2f796332d4866c296f8dc453bc1e6644dd23
@distributed_trace def create_or_update(self, resource_group_name: str, service_name: str, product_id: str, group_id: str, **kwargs: Any) -> '_models.GroupContract': 'Adds the association between the specified developer group with the specified product.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param service_name: The name of the API Management service.\n :type service_name: str\n :param product_id: Product identifier. Must be unique in the current API Management service\n instance.\n :type product_id: str\n :param group_id: Group identifier. Must be unique in the current API Management service\n instance.\n :type group_id: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: GroupContract, or the result of cls(response)\n :rtype: ~api_management_client.models.GroupContract\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_create_or_update_request(resource_group_name=resource_group_name, service_name=service_name, product_id=product_id, group_id=group_id, subscription_id=self._config.subscription_id, template_url=self.create_or_update.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200, 201]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if (response.status_code == 200): deserialized = self._deserialize('GroupContract', pipeline_response) if (response.status_code == 201): deserialized = self._deserialize('GroupContract', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
Adds the association between the specified developer group with the specified product. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param service_name: The name of the API Management service. :type service_name: str :param product_id: Product identifier. Must be unique in the current API Management service instance. :type product_id: str :param group_id: Group identifier. Must be unique in the current API Management service instance. :type group_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: GroupContract, or the result of cls(response) :rtype: ~api_management_client.models.GroupContract :raises: ~azure.core.exceptions.HttpResponseError
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/operations/_product_group_operations.py
create_or_update
AFengKK/azure-sdk-for-python
1
python
@distributed_trace def create_or_update(self, resource_group_name: str, service_name: str, product_id: str, group_id: str, **kwargs: Any) -> '_models.GroupContract': 'Adds the association between the specified developer group with the specified product.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param service_name: The name of the API Management service.\n :type service_name: str\n :param product_id: Product identifier. Must be unique in the current API Management service\n instance.\n :type product_id: str\n :param group_id: Group identifier. Must be unique in the current API Management service\n instance.\n :type group_id: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: GroupContract, or the result of cls(response)\n :rtype: ~api_management_client.models.GroupContract\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_create_or_update_request(resource_group_name=resource_group_name, service_name=service_name, product_id=product_id, group_id=group_id, subscription_id=self._config.subscription_id, template_url=self.create_or_update.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200, 201]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if (response.status_code == 200): deserialized = self._deserialize('GroupContract', pipeline_response) if (response.status_code == 201): deserialized = self._deserialize('GroupContract', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
@distributed_trace def create_or_update(self, resource_group_name: str, service_name: str, product_id: str, group_id: str, **kwargs: Any) -> '_models.GroupContract': 'Adds the association between the specified developer group with the specified product.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param service_name: The name of the API Management service.\n :type service_name: str\n :param product_id: Product identifier. Must be unique in the current API Management service\n instance.\n :type product_id: str\n :param group_id: Group identifier. Must be unique in the current API Management service\n instance.\n :type group_id: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: GroupContract, or the result of cls(response)\n :rtype: ~api_management_client.models.GroupContract\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_create_or_update_request(resource_group_name=resource_group_name, service_name=service_name, product_id=product_id, group_id=group_id, subscription_id=self._config.subscription_id, template_url=self.create_or_update.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200, 201]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if (response.status_code == 200): deserialized = self._deserialize('GroupContract', pipeline_response) if (response.status_code == 201): deserialized = self._deserialize('GroupContract', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized<|docstring|>Adds the association between the specified developer group with the specified product. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param service_name: The name of the API Management service. :type service_name: str :param product_id: Product identifier. Must be unique in the current API Management service instance. :type product_id: str :param group_id: Group identifier. Must be unique in the current API Management service instance. :type group_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: GroupContract, or the result of cls(response) :rtype: ~api_management_client.models.GroupContract :raises: ~azure.core.exceptions.HttpResponseError<|endoftext|>
d133dd6a3806fc637fd062fb4bd50118f4de2f14ee2ef486697d8f60041c7e0b
@distributed_trace def delete(self, resource_group_name: str, service_name: str, product_id: str, group_id: str, **kwargs: Any) -> None: 'Deletes the association between the specified group and product.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param service_name: The name of the API Management service.\n :type service_name: str\n :param product_id: Product identifier. Must be unique in the current API Management service\n instance.\n :type product_id: str\n :param group_id: Group identifier. Must be unique in the current API Management service\n instance.\n :type group_id: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_delete_request(resource_group_name=resource_group_name, service_name=service_name, product_id=product_id, group_id=group_id, subscription_id=self._config.subscription_id, template_url=self.delete.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200, 204]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {})
Deletes the association between the specified group and product. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param service_name: The name of the API Management service. :type service_name: str :param product_id: Product identifier. Must be unique in the current API Management service instance. :type product_id: str :param group_id: Group identifier. Must be unique in the current API Management service instance. :type group_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/operations/_product_group_operations.py
delete
AFengKK/azure-sdk-for-python
1
python
@distributed_trace def delete(self, resource_group_name: str, service_name: str, product_id: str, group_id: str, **kwargs: Any) -> None: 'Deletes the association between the specified group and product.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param service_name: The name of the API Management service.\n :type service_name: str\n :param product_id: Product identifier. Must be unique in the current API Management service\n instance.\n :type product_id: str\n :param group_id: Group identifier. Must be unique in the current API Management service\n instance.\n :type group_id: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_delete_request(resource_group_name=resource_group_name, service_name=service_name, product_id=product_id, group_id=group_id, subscription_id=self._config.subscription_id, template_url=self.delete.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200, 204]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {})
@distributed_trace def delete(self, resource_group_name: str, service_name: str, product_id: str, group_id: str, **kwargs: Any) -> None: 'Deletes the association between the specified group and product.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param service_name: The name of the API Management service.\n :type service_name: str\n :param product_id: Product identifier. Must be unique in the current API Management service\n instance.\n :type product_id: str\n :param group_id: Group identifier. Must be unique in the current API Management service\n instance.\n :type group_id: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_delete_request(resource_group_name=resource_group_name, service_name=service_name, product_id=product_id, group_id=group_id, subscription_id=self._config.subscription_id, template_url=self.delete.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200, 204]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {})<|docstring|>Deletes the association between the specified group and product. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param service_name: The name of the API Management service. :type service_name: str :param product_id: Product identifier. Must be unique in the current API Management service instance. :type product_id: str :param group_id: Group identifier. Must be unique in the current API Management service instance. :type group_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError<|endoftext|>
c314707ebb716bc7664f32ae7caa44630970cb3178b1bf8d485b59fec5b22039
def average_distributed_scalar(scalar, args): ' Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation. ' if (args.local_rank == (- 1)): return scalar scalar_t = (torch.tensor(scalar, dtype=torch.float, device=args.device) / torch.distributed.get_world_size()) torch.distributed.all_reduce(scalar_t, op=torch.distributed.ReduceOp.SUM) return scalar_t.item()
Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation.
utils/auxiliary.py
average_distributed_scalar
lxchtan/Dialogue-Generation
2
python
def average_distributed_scalar(scalar, args): ' ' if (args.local_rank == (- 1)): return scalar scalar_t = (torch.tensor(scalar, dtype=torch.float, device=args.device) / torch.distributed.get_world_size()) torch.distributed.all_reduce(scalar_t, op=torch.distributed.ReduceOp.SUM) return scalar_t.item()
def average_distributed_scalar(scalar, args): ' ' if (args.local_rank == (- 1)): return scalar scalar_t = (torch.tensor(scalar, dtype=torch.float, device=args.device) / torch.distributed.get_world_size()) torch.distributed.all_reduce(scalar_t, op=torch.distributed.ReduceOp.SUM) return scalar_t.item()<|docstring|>Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation.<|endoftext|>
bc25e14b9f700c7e5723c16de3410b4f7a5ca5dd3dacac2c56affd81d8700483
def top_filtering(logits, top_k=0, top_p=0.0, threshold=(- float('Inf')), filter_value=(- float('Inf'))): ' Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering\n Args:\n logits: logits distribution shape (..., vocabulary size)\n top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.\n top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset\n whose total probability mass is greater than or equal to the threshold top_p.\n In practice, we select the highest probability tokens whose cumulative probability mass exceeds\n the threshold top_p.\n threshold: a minimal threshold to keep logits\n ' top_k = min(top_k, logits.size((- 1))) if (top_k > 0): indices_to_remove = (logits < torch.topk(logits, top_k)[0][(..., (- 1), None)]) logits[indices_to_remove] = filter_value if (top_p > 0.0): (sorted_logits, sorted_indices) = torch.sort(logits, descending=True) cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=(- 1)), dim=(- 1)) sorted_indices_to_remove = (cumulative_probabilities > top_p) sorted_indices_to_remove[(..., 1:)] = sorted_indices_to_remove[(..., :(- 1))].clone() sorted_indices_to_remove[(..., 0)] = 0 indices_to_remove = sorted_indices[sorted_indices_to_remove] logits[indices_to_remove] = filter_value indices_to_remove = (logits < threshold) logits[indices_to_remove] = filter_value return logits
Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering Args: logits: logits distribution shape (..., vocabulary size) top_k: <=0: no filtering, >0: keep only top k tokens with highest probability. top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset whose total probability mass is greater than or equal to the threshold top_p. In practice, we select the highest probability tokens whose cumulative probability mass exceeds the threshold top_p. threshold: a minimal threshold to keep logits
utils/auxiliary.py
top_filtering
lxchtan/Dialogue-Generation
2
python
def top_filtering(logits, top_k=0, top_p=0.0, threshold=(- float('Inf')), filter_value=(- float('Inf'))): ' Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering\n Args:\n logits: logits distribution shape (..., vocabulary size)\n top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.\n top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset\n whose total probability mass is greater than or equal to the threshold top_p.\n In practice, we select the highest probability tokens whose cumulative probability mass exceeds\n the threshold top_p.\n threshold: a minimal threshold to keep logits\n ' top_k = min(top_k, logits.size((- 1))) if (top_k > 0): indices_to_remove = (logits < torch.topk(logits, top_k)[0][(..., (- 1), None)]) logits[indices_to_remove] = filter_value if (top_p > 0.0): (sorted_logits, sorted_indices) = torch.sort(logits, descending=True) cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=(- 1)), dim=(- 1)) sorted_indices_to_remove = (cumulative_probabilities > top_p) sorted_indices_to_remove[(..., 1:)] = sorted_indices_to_remove[(..., :(- 1))].clone() sorted_indices_to_remove[(..., 0)] = 0 indices_to_remove = sorted_indices[sorted_indices_to_remove] logits[indices_to_remove] = filter_value indices_to_remove = (logits < threshold) logits[indices_to_remove] = filter_value return logits
def top_filtering(logits, top_k=0, top_p=0.0, threshold=(- float('Inf')), filter_value=(- float('Inf'))): ' Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering\n Args:\n logits: logits distribution shape (..., vocabulary size)\n top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.\n top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset\n whose total probability mass is greater than or equal to the threshold top_p.\n In practice, we select the highest probability tokens whose cumulative probability mass exceeds\n the threshold top_p.\n threshold: a minimal threshold to keep logits\n ' top_k = min(top_k, logits.size((- 1))) if (top_k > 0): indices_to_remove = (logits < torch.topk(logits, top_k)[0][(..., (- 1), None)]) logits[indices_to_remove] = filter_value if (top_p > 0.0): (sorted_logits, sorted_indices) = torch.sort(logits, descending=True) cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=(- 1)), dim=(- 1)) sorted_indices_to_remove = (cumulative_probabilities > top_p) sorted_indices_to_remove[(..., 1:)] = sorted_indices_to_remove[(..., :(- 1))].clone() sorted_indices_to_remove[(..., 0)] = 0 indices_to_remove = sorted_indices[sorted_indices_to_remove] logits[indices_to_remove] = filter_value indices_to_remove = (logits < threshold) logits[indices_to_remove] = filter_value return logits<|docstring|>Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering Args: logits: logits distribution shape (..., vocabulary size) top_k: <=0: no filtering, >0: keep only top k tokens with highest probability. top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset whose total probability mass is greater than or equal to the threshold top_p. In practice, we select the highest probability tokens whose cumulative probability mass exceeds the threshold top_p. threshold: a minimal threshold to keep logits<|endoftext|>
704a0ea42d94364e22110ab522330e0e9e73528dc501f9d7a6ef6f9153dde8f8
def grading_context_for_course(course): '\n Same as grading_context, but takes in a course key.\n ' course_structure = get_course_in_cache(course.id) return grading_context(course, course_structure)
Same as grading_context, but takes in a course key.
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/grades/context.py
grading_context_for_course
osoco/better-ways-of-thinking-about-software
3
python
def grading_context_for_course(course): '\n \n ' course_structure = get_course_in_cache(course.id) return grading_context(course, course_structure)
def grading_context_for_course(course): '\n \n ' course_structure = get_course_in_cache(course.id) return grading_context(course, course_structure)<|docstring|>Same as grading_context, but takes in a course key.<|endoftext|>
c0c6bb3c33e7e1638611fd266b15921dbe658c5767f6ae673eda1eb39289eba7
def graded_subsections_for_course(course_structure): '\n Given a course block structure, yields the subsections of the course that are graded\n and visible to non-staff users.\n Args:\n course_structure: A course structure object.\n ' for chapter_key in course_structure.get_children(course_structure.root_block_usage_key): for subsection_key in course_structure.get_children(chapter_key): subsection = course_structure[subsection_key] if ((not _visible_to_staff_only(subsection)) and subsection.graded): (yield subsection)
Given a course block structure, yields the subsections of the course that are graded and visible to non-staff users. Args: course_structure: A course structure object.
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/grades/context.py
graded_subsections_for_course
osoco/better-ways-of-thinking-about-software
3
python
def graded_subsections_for_course(course_structure): '\n Given a course block structure, yields the subsections of the course that are graded\n and visible to non-staff users.\n Args:\n course_structure: A course structure object.\n ' for chapter_key in course_structure.get_children(course_structure.root_block_usage_key): for subsection_key in course_structure.get_children(chapter_key): subsection = course_structure[subsection_key] if ((not _visible_to_staff_only(subsection)) and subsection.graded): (yield subsection)
def graded_subsections_for_course(course_structure): '\n Given a course block structure, yields the subsections of the course that are graded\n and visible to non-staff users.\n Args:\n course_structure: A course structure object.\n ' for chapter_key in course_structure.get_children(course_structure.root_block_usage_key): for subsection_key in course_structure.get_children(chapter_key): subsection = course_structure[subsection_key] if ((not _visible_to_staff_only(subsection)) and subsection.graded): (yield subsection)<|docstring|>Given a course block structure, yields the subsections of the course that are graded and visible to non-staff users. Args: course_structure: A course structure object.<|endoftext|>
b16f5e29cc2e918fa9c7955c34fcdb282b26a4407bc58b84e9804e66c4002f88
def grading_context(course, course_structure): '\n This returns a dictionary with keys necessary for quickly grading\n a student.\n\n The grading context has two keys:\n all_graded_subsections_by_type - This contains all subsections that are\n graded, keyed by subsection format (assignment type).\n\n The values are arrays of dictionaries containing\n "subsection_block" : The subsection block\n "scored_descendants" : An array of usage keys for blocks\n that could possibly be in the subsection, for any student\n\n all_graded_blocks - This contains a list of all blocks that can\n affect grading a student. This is used to efficiently fetch\n all the xmodule state for a FieldDataCache without walking\n the descriptor tree again.\n\n ' count_all_graded_blocks = 0 all_graded_subsections_by_type = OrderedDict() for subsection in graded_subsections_for_course(course_structure): scored_descendants_of_subsection = [] for descendant_key in course_structure.post_order_traversal(filter_func=possibly_scored, start_node=subsection.location): scored_descendants_of_subsection.append(course_structure[descendant_key]) subsection_info = {'subsection_block': subsection, 'scored_descendants': [child for child in scored_descendants_of_subsection if getattr(child, 'has_score', None)]} subsection_format = getattr(subsection, 'format', '') if (subsection_format not in all_graded_subsections_by_type): all_graded_subsections_by_type[subsection_format] = [] all_graded_subsections_by_type[subsection_format].append(subsection_info) count_all_graded_blocks += len(scored_descendants_of_subsection) return {'all_graded_subsections_by_type': all_graded_subsections_by_type, 'count_all_graded_blocks': count_all_graded_blocks, 'subsection_type_graders': CourseGrade.get_subsection_type_graders(course)}
This returns a dictionary with keys necessary for quickly grading a student. The grading context has two keys: all_graded_subsections_by_type - This contains all subsections that are graded, keyed by subsection format (assignment type). The values are arrays of dictionaries containing "subsection_block" : The subsection block "scored_descendants" : An array of usage keys for blocks that could possibly be in the subsection, for any student all_graded_blocks - This contains a list of all blocks that can affect grading a student. This is used to efficiently fetch all the xmodule state for a FieldDataCache without walking the descriptor tree again.
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/grades/context.py
grading_context
osoco/better-ways-of-thinking-about-software
3
python
def grading_context(course, course_structure): '\n This returns a dictionary with keys necessary for quickly grading\n a student.\n\n The grading context has two keys:\n all_graded_subsections_by_type - This contains all subsections that are\n graded, keyed by subsection format (assignment type).\n\n The values are arrays of dictionaries containing\n "subsection_block" : The subsection block\n "scored_descendants" : An array of usage keys for blocks\n that could possibly be in the subsection, for any student\n\n all_graded_blocks - This contains a list of all blocks that can\n affect grading a student. This is used to efficiently fetch\n all the xmodule state for a FieldDataCache without walking\n the descriptor tree again.\n\n ' count_all_graded_blocks = 0 all_graded_subsections_by_type = OrderedDict() for subsection in graded_subsections_for_course(course_structure): scored_descendants_of_subsection = [] for descendant_key in course_structure.post_order_traversal(filter_func=possibly_scored, start_node=subsection.location): scored_descendants_of_subsection.append(course_structure[descendant_key]) subsection_info = {'subsection_block': subsection, 'scored_descendants': [child for child in scored_descendants_of_subsection if getattr(child, 'has_score', None)]} subsection_format = getattr(subsection, 'format', ) if (subsection_format not in all_graded_subsections_by_type): all_graded_subsections_by_type[subsection_format] = [] all_graded_subsections_by_type[subsection_format].append(subsection_info) count_all_graded_blocks += len(scored_descendants_of_subsection) return {'all_graded_subsections_by_type': all_graded_subsections_by_type, 'count_all_graded_blocks': count_all_graded_blocks, 'subsection_type_graders': CourseGrade.get_subsection_type_graders(course)}
def grading_context(course, course_structure): '\n This returns a dictionary with keys necessary for quickly grading\n a student.\n\n The grading context has two keys:\n all_graded_subsections_by_type - This contains all subsections that are\n graded, keyed by subsection format (assignment type).\n\n The values are arrays of dictionaries containing\n "subsection_block" : The subsection block\n "scored_descendants" : An array of usage keys for blocks\n that could possibly be in the subsection, for any student\n\n all_graded_blocks - This contains a list of all blocks that can\n affect grading a student. This is used to efficiently fetch\n all the xmodule state for a FieldDataCache without walking\n the descriptor tree again.\n\n ' count_all_graded_blocks = 0 all_graded_subsections_by_type = OrderedDict() for subsection in graded_subsections_for_course(course_structure): scored_descendants_of_subsection = [] for descendant_key in course_structure.post_order_traversal(filter_func=possibly_scored, start_node=subsection.location): scored_descendants_of_subsection.append(course_structure[descendant_key]) subsection_info = {'subsection_block': subsection, 'scored_descendants': [child for child in scored_descendants_of_subsection if getattr(child, 'has_score', None)]} subsection_format = getattr(subsection, 'format', ) if (subsection_format not in all_graded_subsections_by_type): all_graded_subsections_by_type[subsection_format] = [] all_graded_subsections_by_type[subsection_format].append(subsection_info) count_all_graded_blocks += len(scored_descendants_of_subsection) return {'all_graded_subsections_by_type': all_graded_subsections_by_type, 'count_all_graded_blocks': count_all_graded_blocks, 'subsection_type_graders': CourseGrade.get_subsection_type_graders(course)}<|docstring|>This returns a dictionary with keys necessary for quickly grading a student. The grading context has two keys: all_graded_subsections_by_type - This contains all subsections that are graded, keyed by subsection format (assignment type). The values are arrays of dictionaries containing "subsection_block" : The subsection block "scored_descendants" : An array of usage keys for blocks that could possibly be in the subsection, for any student all_graded_blocks - This contains a list of all blocks that can affect grading a student. This is used to efficiently fetch all the xmodule state for a FieldDataCache without walking the descriptor tree again.<|endoftext|>
ced47cb260584a565ede41cecee2915c0999d6755d57c66128fcc8b8c409eb83
def _visible_to_staff_only(subsection): '\n Returns True if the given subsection is visible to staff only else False\n ' try: return subsection.transformer_data['visibility'].fields['merged_visible_to_staff_only'] except KeyError: return False
Returns True if the given subsection is visible to staff only else False
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/grades/context.py
_visible_to_staff_only
osoco/better-ways-of-thinking-about-software
3
python
def _visible_to_staff_only(subsection): '\n \n ' try: return subsection.transformer_data['visibility'].fields['merged_visible_to_staff_only'] except KeyError: return False
def _visible_to_staff_only(subsection): '\n \n ' try: return subsection.transformer_data['visibility'].fields['merged_visible_to_staff_only'] except KeyError: return False<|docstring|>Returns True if the given subsection is visible to staff only else False<|endoftext|>
06dbfec78c18f7ff0d20ac8f0d78145168de497fce14d87600999a76dc786f96
def display_errors_summary(build_errors: Dict[(str, List[DocBuildError])]) -> None: 'Displays summary of errors' print(('#' * 20), 'Docs build errors summary', ('#' * 20)) for (package_name, errors) in build_errors.items(): if package_name: print(('=' * 20), package_name, ('=' * 20)) else: print(('=' * 20), 'General', ('=' * 20)) for (warning_no, error) in enumerate(sorted(errors), 1): print(('-' * 20), f'Error {warning_no:3}', ('-' * 20)) print(error.message) print() if (error.file_path and (error.file_path != '<unknown>') and error.line_no): print(f'File path: {os.path.relpath(error.file_path, start=DOCS_DIR)} ({error.line_no})') print() print(prepare_code_snippet(error.file_path, error.line_no)) elif error.file_path: print(f'File path: {error.file_path}') print(('#' * 50))
Displays summary of errors
docs/exts/docs_build/errors.py
display_errors_summary
smowden/airflow
79
python
def display_errors_summary(build_errors: Dict[(str, List[DocBuildError])]) -> None: print(('#' * 20), 'Docs build errors summary', ('#' * 20)) for (package_name, errors) in build_errors.items(): if package_name: print(('=' * 20), package_name, ('=' * 20)) else: print(('=' * 20), 'General', ('=' * 20)) for (warning_no, error) in enumerate(sorted(errors), 1): print(('-' * 20), f'Error {warning_no:3}', ('-' * 20)) print(error.message) print() if (error.file_path and (error.file_path != '<unknown>') and error.line_no): print(f'File path: {os.path.relpath(error.file_path, start=DOCS_DIR)} ({error.line_no})') print() print(prepare_code_snippet(error.file_path, error.line_no)) elif error.file_path: print(f'File path: {error.file_path}') print(('#' * 50))
def display_errors_summary(build_errors: Dict[(str, List[DocBuildError])]) -> None: print(('#' * 20), 'Docs build errors summary', ('#' * 20)) for (package_name, errors) in build_errors.items(): if package_name: print(('=' * 20), package_name, ('=' * 20)) else: print(('=' * 20), 'General', ('=' * 20)) for (warning_no, error) in enumerate(sorted(errors), 1): print(('-' * 20), f'Error {warning_no:3}', ('-' * 20)) print(error.message) print() if (error.file_path and (error.file_path != '<unknown>') and error.line_no): print(f'File path: {os.path.relpath(error.file_path, start=DOCS_DIR)} ({error.line_no})') print() print(prepare_code_snippet(error.file_path, error.line_no)) elif error.file_path: print(f'File path: {error.file_path}') print(('#' * 50))<|docstring|>Displays summary of errors<|endoftext|>
e47c88721b7175b04f954c6c20708675a7a2fb6ab9132b9e46851376cb768190
def parse_sphinx_warnings(warning_text: str, docs_dir: str) -> List[DocBuildError]: '\n Parses warnings from Sphinx.\n\n :param warning_text: warning to parse\n :return: list of DocBuildErrors.\n ' sphinx_build_errors = [] for sphinx_warning in warning_text.split('\n'): if (not sphinx_warning): continue warning_parts = sphinx_warning.split(':', 2) if (len(warning_parts) == 3): try: sphinx_build_errors.append(DocBuildError(file_path=os.path.join(docs_dir, warning_parts[0]), line_no=int(warning_parts[1]), message=warning_parts[2])) except Exception: sphinx_build_errors.append(DocBuildError(file_path=None, line_no=None, message=sphinx_warning)) else: sphinx_build_errors.append(DocBuildError(file_path=None, line_no=None, message=sphinx_warning)) return sphinx_build_errors
Parses warnings from Sphinx. :param warning_text: warning to parse :return: list of DocBuildErrors.
docs/exts/docs_build/errors.py
parse_sphinx_warnings
smowden/airflow
79
python
def parse_sphinx_warnings(warning_text: str, docs_dir: str) -> List[DocBuildError]: '\n Parses warnings from Sphinx.\n\n :param warning_text: warning to parse\n :return: list of DocBuildErrors.\n ' sphinx_build_errors = [] for sphinx_warning in warning_text.split('\n'): if (not sphinx_warning): continue warning_parts = sphinx_warning.split(':', 2) if (len(warning_parts) == 3): try: sphinx_build_errors.append(DocBuildError(file_path=os.path.join(docs_dir, warning_parts[0]), line_no=int(warning_parts[1]), message=warning_parts[2])) except Exception: sphinx_build_errors.append(DocBuildError(file_path=None, line_no=None, message=sphinx_warning)) else: sphinx_build_errors.append(DocBuildError(file_path=None, line_no=None, message=sphinx_warning)) return sphinx_build_errors
def parse_sphinx_warnings(warning_text: str, docs_dir: str) -> List[DocBuildError]: '\n Parses warnings from Sphinx.\n\n :param warning_text: warning to parse\n :return: list of DocBuildErrors.\n ' sphinx_build_errors = [] for sphinx_warning in warning_text.split('\n'): if (not sphinx_warning): continue warning_parts = sphinx_warning.split(':', 2) if (len(warning_parts) == 3): try: sphinx_build_errors.append(DocBuildError(file_path=os.path.join(docs_dir, warning_parts[0]), line_no=int(warning_parts[1]), message=warning_parts[2])) except Exception: sphinx_build_errors.append(DocBuildError(file_path=None, line_no=None, message=sphinx_warning)) else: sphinx_build_errors.append(DocBuildError(file_path=None, line_no=None, message=sphinx_warning)) return sphinx_build_errors<|docstring|>Parses warnings from Sphinx. :param warning_text: warning to parse :return: list of DocBuildErrors.<|endoftext|>
852f5bdc74cf84a2a8f082761553ad036994f0a10d0221d8bdac8491532ea3e2
@pytest.yield_fixture(autouse=True, scope='session') def prevent_dialog_box(): 'Do not open dreaded dialog box on segfault on Windows' import ctypes SEM_NOGPFAULTERRORBOX = 2 old_err_mode = ctypes.windll.kernel32.GetErrorMode() new_err_mode = (old_err_mode | SEM_NOGPFAULTERRORBOX) ctypes.windll.kernel32.SetErrorMode(new_err_mode) (yield) ctypes.windll.kernel32.SetErrorMode(old_err_mode)
Do not open dreaded dialog box on segfault on Windows
pypy/module/cpyext/test/conftest.py
prevent_dialog_box
prg-titech/pypy
333
python
@pytest.yield_fixture(autouse=True, scope='session') def prevent_dialog_box(): import ctypes SEM_NOGPFAULTERRORBOX = 2 old_err_mode = ctypes.windll.kernel32.GetErrorMode() new_err_mode = (old_err_mode | SEM_NOGPFAULTERRORBOX) ctypes.windll.kernel32.SetErrorMode(new_err_mode) (yield) ctypes.windll.kernel32.SetErrorMode(old_err_mode)
@pytest.yield_fixture(autouse=True, scope='session') def prevent_dialog_box(): import ctypes SEM_NOGPFAULTERRORBOX = 2 old_err_mode = ctypes.windll.kernel32.GetErrorMode() new_err_mode = (old_err_mode | SEM_NOGPFAULTERRORBOX) ctypes.windll.kernel32.SetErrorMode(new_err_mode) (yield) ctypes.windll.kernel32.SetErrorMode(old_err_mode)<|docstring|>Do not open dreaded dialog box on segfault on Windows<|endoftext|>
7276e10f3b60b9e910326e1b6bacb006de4b03c085483dcc0895e1cba712083a
def do_um_synchro_task(u_id: str, um_socks) -> UmTask: '\n 开始执行友盟同步任务\n :param u_id:\n :param um_socks:\n :return:\n ' task: UmTask = UmTask(u_id=u_id, um_socks=um_socks) (key_master, key_slaves) = _get_um_key_config(u_id=u_id) if (not key_master): return None task.down_events(um_keys=([key_master] + list(key_slaves))) for um_key in key_slaves: task.synchro_um_data(um_key=um_key, um_key_master=key_master) return task
开始执行友盟同步任务 :param u_id: :param um_socks: :return:
api/um/um_tasks.py
do_um_synchro_task
Samge0/UmengEventManage
0
python
def do_um_synchro_task(u_id: str, um_socks) -> UmTask: '\n 开始执行友盟同步任务\n :param u_id:\n :param um_socks:\n :return:\n ' task: UmTask = UmTask(u_id=u_id, um_socks=um_socks) (key_master, key_slaves) = _get_um_key_config(u_id=u_id) if (not key_master): return None task.down_events(um_keys=([key_master] + list(key_slaves))) for um_key in key_slaves: task.synchro_um_data(um_key=um_key, um_key_master=key_master) return task
def do_um_synchro_task(u_id: str, um_socks) -> UmTask: '\n 开始执行友盟同步任务\n :param u_id:\n :param um_socks:\n :return:\n ' task: UmTask = UmTask(u_id=u_id, um_socks=um_socks) (key_master, key_slaves) = _get_um_key_config(u_id=u_id) if (not key_master): return None task.down_events(um_keys=([key_master] + list(key_slaves))) for um_key in key_slaves: task.synchro_um_data(um_key=um_key, um_key_master=key_master) return task<|docstring|>开始执行友盟同步任务 :param u_id: :param um_socks: :return:<|endoftext|>
b6243aad2a6de3221087249a1222accbdb45a014b66c1c4d1fb564aa29eda72e
def do_add_or_update_task(u_id: str, um_socks) -> UmTask: '\n 执行添加/更新友盟自定义事件的任务\n :param u_id:\n :param um_socks:\n :return:\n ' task: UmTask = UmTask(u_id=u_id, um_socks=um_socks) (key_master, key_slaves) = _get_um_key_config(u_id=u_id) if (not key_master): return None task.down_events(um_keys=([key_master] + list(key_slaves))) task.add_or_update_event_by_file(um_key=key_master) task.update_local_db_events(um_key=key_master) return task
执行添加/更新友盟自定义事件的任务 :param u_id: :param um_socks: :return:
api/um/um_tasks.py
do_add_or_update_task
Samge0/UmengEventManage
0
python
def do_add_or_update_task(u_id: str, um_socks) -> UmTask: '\n 执行添加/更新友盟自定义事件的任务\n :param u_id:\n :param um_socks:\n :return:\n ' task: UmTask = UmTask(u_id=u_id, um_socks=um_socks) (key_master, key_slaves) = _get_um_key_config(u_id=u_id) if (not key_master): return None task.down_events(um_keys=([key_master] + list(key_slaves))) task.add_or_update_event_by_file(um_key=key_master) task.update_local_db_events(um_key=key_master) return task
def do_add_or_update_task(u_id: str, um_socks) -> UmTask: '\n 执行添加/更新友盟自定义事件的任务\n :param u_id:\n :param um_socks:\n :return:\n ' task: UmTask = UmTask(u_id=u_id, um_socks=um_socks) (key_master, key_slaves) = _get_um_key_config(u_id=u_id) if (not key_master): return None task.down_events(um_keys=([key_master] + list(key_slaves))) task.add_or_update_event_by_file(um_key=key_master) task.update_local_db_events(um_key=key_master) return task<|docstring|>执行添加/更新友盟自定义事件的任务 :param u_id: :param um_socks: :return:<|endoftext|>
a82a343b64909696ccdace76ab086bb8265aea3630b900ed6eee5ee880385a2e
def load_analysis_event_file(u_id: str, um_key: str, refresh: bool): '\n 获取友盟所有自定义事件列表(有效的&暂停的)\n :param u_id:\n :param um_key:\n :param refresh: 是否需要从网络中重新获取数据\n :return:\n ' task: UmTask = UmTask(u_id=u_id, um_socks=None) need_refresh: bool = (refresh or (task.is_exists_pause(um_key=um_key) is False) or (task.is_exists_normal_analysis(um_key=um_key) is False)) if need_refresh: task.update_local_db_events(um_key=um_key)
获取友盟所有自定义事件列表(有效的&暂停的) :param u_id: :param um_key: :param refresh: 是否需要从网络中重新获取数据 :return:
api/um/um_tasks.py
load_analysis_event_file
Samge0/UmengEventManage
0
python
def load_analysis_event_file(u_id: str, um_key: str, refresh: bool): '\n 获取友盟所有自定义事件列表(有效的&暂停的)\n :param u_id:\n :param um_key:\n :param refresh: 是否需要从网络中重新获取数据\n :return:\n ' task: UmTask = UmTask(u_id=u_id, um_socks=None) need_refresh: bool = (refresh or (task.is_exists_pause(um_key=um_key) is False) or (task.is_exists_normal_analysis(um_key=um_key) is False)) if need_refresh: task.update_local_db_events(um_key=um_key)
def load_analysis_event_file(u_id: str, um_key: str, refresh: bool): '\n 获取友盟所有自定义事件列表(有效的&暂停的)\n :param u_id:\n :param um_key:\n :param refresh: 是否需要从网络中重新获取数据\n :return:\n ' task: UmTask = UmTask(u_id=u_id, um_socks=None) need_refresh: bool = (refresh or (task.is_exists_pause(um_key=um_key) is False) or (task.is_exists_normal_analysis(um_key=um_key) is False)) if need_refresh: task.update_local_db_events(um_key=um_key)<|docstring|>获取友盟所有自定义事件列表(有效的&暂停的) :param u_id: :param um_key: :param refresh: 是否需要从网络中重新获取数据 :return:<|endoftext|>
6d4a584ab9c38cd003f077ce839b5bd01e6d9f51b58cb6bb76a60f450e95c4a5
def _get_um_key_config(u_id: str) -> (str, list): '\n 获取友盟key配置信息\n :param u_id:\n :return:\n ' values = (UserConfig.objects.filter(u_id=u_id).values() or []) if (len(values) == 0): return ('', []) uc_key_master: str = values[0].get('uc_key_master') uc_key_slaves: str = values[0].get('uc_key_slaves') if uc_key_slaves: return (uc_key_master, uc_key_slaves.split('|')) else: return (uc_key_master, [])
获取友盟key配置信息 :param u_id: :return:
api/um/um_tasks.py
_get_um_key_config
Samge0/UmengEventManage
0
python
def _get_um_key_config(u_id: str) -> (str, list): '\n 获取友盟key配置信息\n :param u_id:\n :return:\n ' values = (UserConfig.objects.filter(u_id=u_id).values() or []) if (len(values) == 0): return (, []) uc_key_master: str = values[0].get('uc_key_master') uc_key_slaves: str = values[0].get('uc_key_slaves') if uc_key_slaves: return (uc_key_master, uc_key_slaves.split('|')) else: return (uc_key_master, [])
def _get_um_key_config(u_id: str) -> (str, list): '\n 获取友盟key配置信息\n :param u_id:\n :return:\n ' values = (UserConfig.objects.filter(u_id=u_id).values() or []) if (len(values) == 0): return (, []) uc_key_master: str = values[0].get('uc_key_master') uc_key_slaves: str = values[0].get('uc_key_slaves') if uc_key_slaves: return (uc_key_master, uc_key_slaves.split('|')) else: return (uc_key_master, [])<|docstring|>获取友盟key配置信息 :param u_id: :return:<|endoftext|>
cecda7bf3bd23cd24e2901e4ac31ec6c5e94892e830b9822f9c3ed7d55b1ddb9
def __init__(self, models, tgt_dict, beam_size=1, max_len_a=0, max_len_b=200, min_len=1, normalize_scores=True, len_penalty=1.0, unk_penalty=0.0, desired_length=(- 1), retain_dropout=False, temperature=1.0, match_source_len=False, no_repeat_ngram_size=0, search_strategy=None, eos=None): 'Generates translations of a given source sentence.\n\n Args:\n models (List[~fairseq.models.FairseqModel]): ensemble of models,\n currently support fairseq.models.TransformerModel for scripting\n beam_size (int, optional): beam width (default: 1)\n max_len_a/b (int, optional): generate sequences of maximum length\n ax + b, where x is the source length\n min_len (int, optional): the minimum length of the generated output\n (not including end-of-sentence)\n normalize_scores (bool, optional): normalize scores by the length\n of the output (default: True)\n len_penalty (float, optional): length penalty, where <1.0 favors\n shorter, >1.0 favors longer sentences (default: 1.0)\n unk_penalty (float, optional): unknown word penalty, where <0\n produces more unks, >0 produces fewer (default: 0.0)\n retain_dropout (bool, optional): use dropout when generating\n (default: False)\n temperature (float, optional): temperature, where values\n >1.0 produce more uniform samples and values <1.0 produce\n sharper samples (default: 1.0)\n match_source_len (bool, optional): outputs should match the source\n length (default: False)\n ' super().__init__() if isinstance(models, EnsembleModel): self.model = models else: self.model = EnsembleModel(models) self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = (tgt_dict.eos() if (eos is None) else eos) self.vocab_size = len(tgt_dict) self.beam_size = beam_size self.beam_size = min(beam_size, (self.vocab_size - 1)) self.max_len_a = max_len_a self.max_len_b = max_len_b self.min_len = min_len self.normalize_scores = normalize_scores self.len_penalty = len_penalty self.unk_penalty = unk_penalty self.desired_length = desired_length self.retain_dropout = retain_dropout self.temperature = temperature self.match_source_len = match_source_len self.no_repeat_ngram_size = no_repeat_ngram_size assert (temperature > 0), '--temperature must be greater than 0' self.search = (search.BeamSearch(tgt_dict) if (search_strategy is None) else search_strategy) if (not self.retain_dropout): self.model.eval()
Generates translations of a given source sentence. Args: models (List[~fairseq.models.FairseqModel]): ensemble of models, currently support fairseq.models.TransformerModel for scripting beam_size (int, optional): beam width (default: 1) max_len_a/b (int, optional): generate sequences of maximum length ax + b, where x is the source length min_len (int, optional): the minimum length of the generated output (not including end-of-sentence) normalize_scores (bool, optional): normalize scores by the length of the output (default: True) len_penalty (float, optional): length penalty, where <1.0 favors shorter, >1.0 favors longer sentences (default: 1.0) unk_penalty (float, optional): unknown word penalty, where <0 produces more unks, >0 produces fewer (default: 0.0) retain_dropout (bool, optional): use dropout when generating (default: False) temperature (float, optional): temperature, where values >1.0 produce more uniform samples and values <1.0 produce sharper samples (default: 1.0) match_source_len (bool, optional): outputs should match the source length (default: False)
fairseq/sequence_generator.py
__init__
takase/alone_seq2seq
25
python
def __init__(self, models, tgt_dict, beam_size=1, max_len_a=0, max_len_b=200, min_len=1, normalize_scores=True, len_penalty=1.0, unk_penalty=0.0, desired_length=(- 1), retain_dropout=False, temperature=1.0, match_source_len=False, no_repeat_ngram_size=0, search_strategy=None, eos=None): 'Generates translations of a given source sentence.\n\n Args:\n models (List[~fairseq.models.FairseqModel]): ensemble of models,\n currently support fairseq.models.TransformerModel for scripting\n beam_size (int, optional): beam width (default: 1)\n max_len_a/b (int, optional): generate sequences of maximum length\n ax + b, where x is the source length\n min_len (int, optional): the minimum length of the generated output\n (not including end-of-sentence)\n normalize_scores (bool, optional): normalize scores by the length\n of the output (default: True)\n len_penalty (float, optional): length penalty, where <1.0 favors\n shorter, >1.0 favors longer sentences (default: 1.0)\n unk_penalty (float, optional): unknown word penalty, where <0\n produces more unks, >0 produces fewer (default: 0.0)\n retain_dropout (bool, optional): use dropout when generating\n (default: False)\n temperature (float, optional): temperature, where values\n >1.0 produce more uniform samples and values <1.0 produce\n sharper samples (default: 1.0)\n match_source_len (bool, optional): outputs should match the source\n length (default: False)\n ' super().__init__() if isinstance(models, EnsembleModel): self.model = models else: self.model = EnsembleModel(models) self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = (tgt_dict.eos() if (eos is None) else eos) self.vocab_size = len(tgt_dict) self.beam_size = beam_size self.beam_size = min(beam_size, (self.vocab_size - 1)) self.max_len_a = max_len_a self.max_len_b = max_len_b self.min_len = min_len self.normalize_scores = normalize_scores self.len_penalty = len_penalty self.unk_penalty = unk_penalty self.desired_length = desired_length self.retain_dropout = retain_dropout self.temperature = temperature self.match_source_len = match_source_len self.no_repeat_ngram_size = no_repeat_ngram_size assert (temperature > 0), '--temperature must be greater than 0' self.search = (search.BeamSearch(tgt_dict) if (search_strategy is None) else search_strategy) if (not self.retain_dropout): self.model.eval()
def __init__(self, models, tgt_dict, beam_size=1, max_len_a=0, max_len_b=200, min_len=1, normalize_scores=True, len_penalty=1.0, unk_penalty=0.0, desired_length=(- 1), retain_dropout=False, temperature=1.0, match_source_len=False, no_repeat_ngram_size=0, search_strategy=None, eos=None): 'Generates translations of a given source sentence.\n\n Args:\n models (List[~fairseq.models.FairseqModel]): ensemble of models,\n currently support fairseq.models.TransformerModel for scripting\n beam_size (int, optional): beam width (default: 1)\n max_len_a/b (int, optional): generate sequences of maximum length\n ax + b, where x is the source length\n min_len (int, optional): the minimum length of the generated output\n (not including end-of-sentence)\n normalize_scores (bool, optional): normalize scores by the length\n of the output (default: True)\n len_penalty (float, optional): length penalty, where <1.0 favors\n shorter, >1.0 favors longer sentences (default: 1.0)\n unk_penalty (float, optional): unknown word penalty, where <0\n produces more unks, >0 produces fewer (default: 0.0)\n retain_dropout (bool, optional): use dropout when generating\n (default: False)\n temperature (float, optional): temperature, where values\n >1.0 produce more uniform samples and values <1.0 produce\n sharper samples (default: 1.0)\n match_source_len (bool, optional): outputs should match the source\n length (default: False)\n ' super().__init__() if isinstance(models, EnsembleModel): self.model = models else: self.model = EnsembleModel(models) self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = (tgt_dict.eos() if (eos is None) else eos) self.vocab_size = len(tgt_dict) self.beam_size = beam_size self.beam_size = min(beam_size, (self.vocab_size - 1)) self.max_len_a = max_len_a self.max_len_b = max_len_b self.min_len = min_len self.normalize_scores = normalize_scores self.len_penalty = len_penalty self.unk_penalty = unk_penalty self.desired_length = desired_length self.retain_dropout = retain_dropout self.temperature = temperature self.match_source_len = match_source_len self.no_repeat_ngram_size = no_repeat_ngram_size assert (temperature > 0), '--temperature must be greater than 0' self.search = (search.BeamSearch(tgt_dict) if (search_strategy is None) else search_strategy) if (not self.retain_dropout): self.model.eval()<|docstring|>Generates translations of a given source sentence. Args: models (List[~fairseq.models.FairseqModel]): ensemble of models, currently support fairseq.models.TransformerModel for scripting beam_size (int, optional): beam width (default: 1) max_len_a/b (int, optional): generate sequences of maximum length ax + b, where x is the source length min_len (int, optional): the minimum length of the generated output (not including end-of-sentence) normalize_scores (bool, optional): normalize scores by the length of the output (default: True) len_penalty (float, optional): length penalty, where <1.0 favors shorter, >1.0 favors longer sentences (default: 1.0) unk_penalty (float, optional): unknown word penalty, where <0 produces more unks, >0 produces fewer (default: 0.0) retain_dropout (bool, optional): use dropout when generating (default: False) temperature (float, optional): temperature, where values >1.0 produce more uniform samples and values <1.0 produce sharper samples (default: 1.0) match_source_len (bool, optional): outputs should match the source length (default: False)<|endoftext|>
74c150b01ba597ebb4fcad69386626606f8ce7f4ae20bcacc8759b8f0d6cd521
@torch.no_grad() def forward(self, sample: Dict[(str, Dict[(str, Tensor)])], prefix_tokens: Optional[Tensor]=None, bos_token: Optional[int]=None): 'Generate a batch of translations.\n\n Args:\n sample (dict): batch\n prefix_tokens (torch.LongTensor, optional): force decoder to begin\n with these tokens\n bos_token (int, optional): beginning of sentence token\n (default: self.eos)\n ' self.model.reset_incremental_state() return self._generate(sample, prefix_tokens, bos_token)
Generate a batch of translations. Args: sample (dict): batch prefix_tokens (torch.LongTensor, optional): force decoder to begin with these tokens bos_token (int, optional): beginning of sentence token (default: self.eos)
fairseq/sequence_generator.py
forward
takase/alone_seq2seq
25
python
@torch.no_grad() def forward(self, sample: Dict[(str, Dict[(str, Tensor)])], prefix_tokens: Optional[Tensor]=None, bos_token: Optional[int]=None): 'Generate a batch of translations.\n\n Args:\n sample (dict): batch\n prefix_tokens (torch.LongTensor, optional): force decoder to begin\n with these tokens\n bos_token (int, optional): beginning of sentence token\n (default: self.eos)\n ' self.model.reset_incremental_state() return self._generate(sample, prefix_tokens, bos_token)
@torch.no_grad() def forward(self, sample: Dict[(str, Dict[(str, Tensor)])], prefix_tokens: Optional[Tensor]=None, bos_token: Optional[int]=None): 'Generate a batch of translations.\n\n Args:\n sample (dict): batch\n prefix_tokens (torch.LongTensor, optional): force decoder to begin\n with these tokens\n bos_token (int, optional): beginning of sentence token\n (default: self.eos)\n ' self.model.reset_incremental_state() return self._generate(sample, prefix_tokens, bos_token)<|docstring|>Generate a batch of translations. Args: sample (dict): batch prefix_tokens (torch.LongTensor, optional): force decoder to begin with these tokens bos_token (int, optional): beginning of sentence token (default: self.eos)<|endoftext|>
d7b4b7081ddd571a763f203d593131bdfdaeefd5c6af014d003361ff0bb5dede
def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None): 'Iterate over a batched dataset and yield individual translations.\n Args:\n cuda (bool, optional): use GPU for generation\n timer (StopwatchMeter, optional): time generations\n ' for sample in data_itr: s = (utils.move_to_cuda(sample) if cuda else sample) if ('net_input' not in s): continue input = s['net_input'] encoder_input = {k: v for (k, v) in input.items() if (k != 'prev_output_tokens')} if (timer is not None): timer.start() with torch.no_grad(): hypos = self.generate(encoder_input) if (timer is not None): timer.stop(sum((len(h[0]['tokens']) for h in hypos))) for (i, id) in enumerate(s['id'].data): src = utils.strip_pad(input['src_tokens'].data[(i, :)], self.pad) ref = (utils.strip_pad(s['target'].data[(i, :)], self.pad) if (s['target'] is not None) else None) (yield (id, src, ref, hypos[i]))
Iterate over a batched dataset and yield individual translations. Args: cuda (bool, optional): use GPU for generation timer (StopwatchMeter, optional): time generations
fairseq/sequence_generator.py
generate_batched_itr
takase/alone_seq2seq
25
python
def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None): 'Iterate over a batched dataset and yield individual translations.\n Args:\n cuda (bool, optional): use GPU for generation\n timer (StopwatchMeter, optional): time generations\n ' for sample in data_itr: s = (utils.move_to_cuda(sample) if cuda else sample) if ('net_input' not in s): continue input = s['net_input'] encoder_input = {k: v for (k, v) in input.items() if (k != 'prev_output_tokens')} if (timer is not None): timer.start() with torch.no_grad(): hypos = self.generate(encoder_input) if (timer is not None): timer.stop(sum((len(h[0]['tokens']) for h in hypos))) for (i, id) in enumerate(s['id'].data): src = utils.strip_pad(input['src_tokens'].data[(i, :)], self.pad) ref = (utils.strip_pad(s['target'].data[(i, :)], self.pad) if (s['target'] is not None) else None) (yield (id, src, ref, hypos[i]))
def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None): 'Iterate over a batched dataset and yield individual translations.\n Args:\n cuda (bool, optional): use GPU for generation\n timer (StopwatchMeter, optional): time generations\n ' for sample in data_itr: s = (utils.move_to_cuda(sample) if cuda else sample) if ('net_input' not in s): continue input = s['net_input'] encoder_input = {k: v for (k, v) in input.items() if (k != 'prev_output_tokens')} if (timer is not None): timer.start() with torch.no_grad(): hypos = self.generate(encoder_input) if (timer is not None): timer.stop(sum((len(h[0]['tokens']) for h in hypos))) for (i, id) in enumerate(s['id'].data): src = utils.strip_pad(input['src_tokens'].data[(i, :)], self.pad) ref = (utils.strip_pad(s['target'].data[(i, :)], self.pad) if (s['target'] is not None) else None) (yield (id, src, ref, hypos[i]))<|docstring|>Iterate over a batched dataset and yield individual translations. Args: cuda (bool, optional): use GPU for generation timer (StopwatchMeter, optional): time generations<|endoftext|>