body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
@property
def device_state_attributes(self) -> (Mapping[(str, Any)] | None):
'Return entity specific state attributes.\n\n This method is deprecated, platform classes should implement\n extra_state_attributes instead.\n '
return None | -1,323,270,001,650,424,000 | Return entity specific state attributes.
This method is deprecated, platform classes should implement
extra_state_attributes instead. | homeassistant/helpers/entity.py | device_state_attributes | algra4/core | python | @property
def device_state_attributes(self) -> (Mapping[(str, Any)] | None):
'Return entity specific state attributes.\n\n This method is deprecated, platform classes should implement\n extra_state_attributes instead.\n '
return None |
@property
def extra_state_attributes(self) -> (Mapping[(str, Any)] | None):
'Return entity specific state attributes.\n\n Implemented by platform classes. Convention for attribute names\n is lowercase snake_case.\n '
if hasattr(self, '_attr_extra_state_attributes'):
return self._attr_extra_state_attributes
return None | 7,074,571,657,564,478,000 | Return entity specific state attributes.
Implemented by platform classes. Convention for attribute names
is lowercase snake_case. | homeassistant/helpers/entity.py | extra_state_attributes | algra4/core | python | @property
def extra_state_attributes(self) -> (Mapping[(str, Any)] | None):
'Return entity specific state attributes.\n\n Implemented by platform classes. Convention for attribute names\n is lowercase snake_case.\n '
if hasattr(self, '_attr_extra_state_attributes'):
return self._attr_extra_state_attributes
return None |
@property
def device_info(self) -> (DeviceInfo | None):
'Return device specific attributes.\n\n Implemented by platform classes.\n '
return self._attr_device_info | -131,480,113,962,214,270 | Return device specific attributes.
Implemented by platform classes. | homeassistant/helpers/entity.py | device_info | algra4/core | python | @property
def device_info(self) -> (DeviceInfo | None):
'Return device specific attributes.\n\n Implemented by platform classes.\n '
return self._attr_device_info |
@property
def device_class(self) -> (str | None):
'Return the class of this device, from component DEVICE_CLASSES.'
if hasattr(self, '_attr_device_class'):
return self._attr_device_class
if hasattr(self, 'entity_description'):
return self.entity_description.device_class
return None | -5,219,341,481,153,056,000 | Return the class of this device, from component DEVICE_CLASSES. | homeassistant/helpers/entity.py | device_class | algra4/core | python | @property
def device_class(self) -> (str | None):
if hasattr(self, '_attr_device_class'):
return self._attr_device_class
if hasattr(self, 'entity_description'):
return self.entity_description.device_class
return None |
@property
def unit_of_measurement(self) -> (str | None):
'Return the unit of measurement of this entity, if any.'
if hasattr(self, '_attr_unit_of_measurement'):
return self._attr_unit_of_measurement
if hasattr(self, 'entity_description'):
return self.entity_description.unit_of_measurement
return None | -3,540,950,534,694,055,000 | Return the unit of measurement of this entity, if any. | homeassistant/helpers/entity.py | unit_of_measurement | algra4/core | python | @property
def unit_of_measurement(self) -> (str | None):
if hasattr(self, '_attr_unit_of_measurement'):
return self._attr_unit_of_measurement
if hasattr(self, 'entity_description'):
return self.entity_description.unit_of_measurement
return None |
@property
def icon(self) -> (str | None):
'Return the icon to use in the frontend, if any.'
if hasattr(self, '_attr_icon'):
return self._attr_icon
if hasattr(self, 'entity_description'):
return self.entity_description.icon
return None | -3,294,291,017,478,937,600 | Return the icon to use in the frontend, if any. | homeassistant/helpers/entity.py | icon | algra4/core | python | @property
def icon(self) -> (str | None):
if hasattr(self, '_attr_icon'):
return self._attr_icon
if hasattr(self, 'entity_description'):
return self.entity_description.icon
return None |
@property
def entity_picture(self) -> (str | None):
'Return the entity picture to use in the frontend, if any.'
return self._attr_entity_picture | -7,043,696,854,560,105,000 | Return the entity picture to use in the frontend, if any. | homeassistant/helpers/entity.py | entity_picture | algra4/core | python | @property
def entity_picture(self) -> (str | None):
return self._attr_entity_picture |
@property
def available(self) -> bool:
'Return True if entity is available.'
return self._attr_available | -1,685,129,627,032,272,600 | Return True if entity is available. | homeassistant/helpers/entity.py | available | algra4/core | python | @property
def available(self) -> bool:
return self._attr_available |
@property
def assumed_state(self) -> bool:
'Return True if unable to access real state of the entity.'
return self._attr_assumed_state | 7,459,250,895,532,809,000 | Return True if unable to access real state of the entity. | homeassistant/helpers/entity.py | assumed_state | algra4/core | python | @property
def assumed_state(self) -> bool:
return self._attr_assumed_state |
@property
def force_update(self) -> bool:
'Return True if state updates should be forced.\n\n If True, a state change will be triggered anytime the state property is\n updated, not just when the value changes.\n '
if hasattr(self, '_attr_force_update'):
return self._attr_force_update
if hasattr(self, 'entity_description'):
return self.entity_description.force_update
return False | 5,255,442,537,079,365,000 | Return True if state updates should be forced.
If True, a state change will be triggered anytime the state property is
updated, not just when the value changes. | homeassistant/helpers/entity.py | force_update | algra4/core | python | @property
def force_update(self) -> bool:
'Return True if state updates should be forced.\n\n If True, a state change will be triggered anytime the state property is\n updated, not just when the value changes.\n '
if hasattr(self, '_attr_force_update'):
return self._attr_force_update
if hasattr(self, 'entity_description'):
return self.entity_description.force_update
return False |
@property
def supported_features(self) -> (int | None):
'Flag supported features.'
return self._attr_supported_features | 5,283,299,116,938,334,000 | Flag supported features. | homeassistant/helpers/entity.py | supported_features | algra4/core | python | @property
def supported_features(self) -> (int | None):
return self._attr_supported_features |
@property
def context_recent_time(self) -> timedelta:
'Time that a context is considered recent.'
return self._attr_context_recent_time | -5,792,920,726,120,755,000 | Time that a context is considered recent. | homeassistant/helpers/entity.py | context_recent_time | algra4/core | python | @property
def context_recent_time(self) -> timedelta:
return self._attr_context_recent_time |
@property
def entity_registry_enabled_default(self) -> bool:
'Return if the entity should be enabled when first added to the entity registry.'
if hasattr(self, '_attr_entity_registry_enabled_default'):
return self._attr_entity_registry_enabled_default
if hasattr(self, 'entity_description'):
return self.entity_description.entity_registry_enabled_default
return True | 7,500,122,496,969,868,000 | Return if the entity should be enabled when first added to the entity registry. | homeassistant/helpers/entity.py | entity_registry_enabled_default | algra4/core | python | @property
def entity_registry_enabled_default(self) -> bool:
if hasattr(self, '_attr_entity_registry_enabled_default'):
return self._attr_entity_registry_enabled_default
if hasattr(self, 'entity_description'):
return self.entity_description.entity_registry_enabled_default
return True |
@property
def entity_registry_visible_default(self) -> bool:
'Return if the entity should be visible when first added to the entity registry.'
if hasattr(self, '_attr_entity_registry_visible_default'):
return self._attr_entity_registry_visible_default
if hasattr(self, 'entity_description'):
return self.entity_description.entity_registry_visible_default
return True | 557,998,721,559,602,300 | Return if the entity should be visible when first added to the entity registry. | homeassistant/helpers/entity.py | entity_registry_visible_default | algra4/core | python | @property
def entity_registry_visible_default(self) -> bool:
if hasattr(self, '_attr_entity_registry_visible_default'):
return self._attr_entity_registry_visible_default
if hasattr(self, 'entity_description'):
return self.entity_description.entity_registry_visible_default
return True |
@property
def attribution(self) -> (str | None):
'Return the attribution.'
return self._attr_attribution | 6,895,732,983,763,131,000 | Return the attribution. | homeassistant/helpers/entity.py | attribution | algra4/core | python | @property
def attribution(self) -> (str | None):
return self._attr_attribution |
@property
def entity_category(self) -> (EntityCategory | None):
'Return the category of the entity, if any.'
if hasattr(self, '_attr_entity_category'):
return self._attr_entity_category
if hasattr(self, 'entity_description'):
return self.entity_description.entity_category
return None | -5,674,864,290,690,756,000 | Return the category of the entity, if any. | homeassistant/helpers/entity.py | entity_category | algra4/core | python | @property
def entity_category(self) -> (EntityCategory | None):
if hasattr(self, '_attr_entity_category'):
return self._attr_entity_category
if hasattr(self, 'entity_description'):
return self.entity_description.entity_category
return None |
@property
def enabled(self) -> bool:
'Return if the entity is enabled in the entity registry.\n\n If an entity is not part of the registry, it cannot be disabled\n and will therefore always be enabled.\n '
return ((self.registry_entry is None) or (not self.registry_entry.disabled)) | -4,212,866,319,016,292,000 | Return if the entity is enabled in the entity registry.
If an entity is not part of the registry, it cannot be disabled
and will therefore always be enabled. | homeassistant/helpers/entity.py | enabled | algra4/core | python | @property
def enabled(self) -> bool:
'Return if the entity is enabled in the entity registry.\n\n If an entity is not part of the registry, it cannot be disabled\n and will therefore always be enabled.\n '
return ((self.registry_entry is None) or (not self.registry_entry.disabled)) |
@callback
def async_set_context(self, context: Context) -> None:
'Set the context the entity currently operates under.'
self._context = context
self._context_set = dt_util.utcnow() | 7,498,294,261,140,403,000 | Set the context the entity currently operates under. | homeassistant/helpers/entity.py | async_set_context | algra4/core | python | @callback
def async_set_context(self, context: Context) -> None:
self._context = context
self._context_set = dt_util.utcnow() |
async def async_update_ha_state(self, force_refresh: bool=False) -> None:
'Update Home Assistant with current state of entity.\n\n If force_refresh == True will update entity before setting state.\n\n This method must be run in the event loop.\n '
if (self.hass is None):
raise RuntimeError(f'Attribute hass is None for {self}')
if (self.entity_id is None):
raise NoEntitySpecifiedError(f'No entity id specified for entity {self.name}')
if force_refresh:
try:
(await self.async_device_update())
except Exception:
_LOGGER.exception('Update for %s fails', self.entity_id)
return
self._async_write_ha_state() | -2,629,200,371,007,674,000 | Update Home Assistant with current state of entity.
If force_refresh == True will update entity before setting state.
This method must be run in the event loop. | homeassistant/helpers/entity.py | async_update_ha_state | algra4/core | python | async def async_update_ha_state(self, force_refresh: bool=False) -> None:
'Update Home Assistant with current state of entity.\n\n If force_refresh == True will update entity before setting state.\n\n This method must be run in the event loop.\n '
if (self.hass is None):
raise RuntimeError(f'Attribute hass is None for {self}')
if (self.entity_id is None):
raise NoEntitySpecifiedError(f'No entity id specified for entity {self.name}')
if force_refresh:
try:
(await self.async_device_update())
except Exception:
_LOGGER.exception('Update for %s fails', self.entity_id)
return
self._async_write_ha_state() |
@callback
def async_write_ha_state(self) -> None:
'Write the state to the state machine.'
if (self.hass is None):
raise RuntimeError(f'Attribute hass is None for {self}')
if (self.entity_id is None):
raise NoEntitySpecifiedError(f'No entity id specified for entity {self.name}')
self._async_write_ha_state() | -7,814,871,863,252,089,000 | Write the state to the state machine. | homeassistant/helpers/entity.py | async_write_ha_state | algra4/core | python | @callback
def async_write_ha_state(self) -> None:
if (self.hass is None):
raise RuntimeError(f'Attribute hass is None for {self}')
if (self.entity_id is None):
raise NoEntitySpecifiedError(f'No entity id specified for entity {self.name}')
self._async_write_ha_state() |
def _stringify_state(self, available: bool) -> str:
'Convert state to string.'
if (not available):
return STATE_UNAVAILABLE
if ((state := self.state) is None):
return STATE_UNKNOWN
if isinstance(state, float):
return f'{state:.{FLOAT_PRECISION}}'
return str(state) | -8,878,403,925,894,521,000 | Convert state to string. | homeassistant/helpers/entity.py | _stringify_state | algra4/core | python | def _stringify_state(self, available: bool) -> str:
if (not available):
return STATE_UNAVAILABLE
if ((state := self.state) is None):
return STATE_UNKNOWN
if isinstance(state, float):
return f'{state:.{FLOAT_PRECISION}}'
return str(state) |
@callback
def _async_write_ha_state(self) -> None:
'Write the state to the state machine.'
if (self._platform_state == EntityPlatformState.REMOVED):
return
if (self.registry_entry and self.registry_entry.disabled_by):
if (not self._disabled_reported):
self._disabled_reported = True
assert (self.platform is not None)
_LOGGER.warning('Entity %s is incorrectly being triggered for updates while it is disabled. This is a bug in the %s integration', self.entity_id, self.platform.platform_name)
return
start = timer()
attr = self.capability_attributes
attr = (dict(attr) if attr else {})
available = self.available
state = self._stringify_state(available)
if available:
attr.update((self.state_attributes or {}))
attr.update((self.extra_state_attributes or {}))
if ((unit_of_measurement := self.unit_of_measurement) is not None):
attr[ATTR_UNIT_OF_MEASUREMENT] = unit_of_measurement
entry = self.registry_entry
if (assumed_state := self.assumed_state):
attr[ATTR_ASSUMED_STATE] = assumed_state
if ((attribution := self.attribution) is not None):
attr[ATTR_ATTRIBUTION] = attribution
if ((device_class := ((entry and entry.device_class) or self.device_class)) is not None):
attr[ATTR_DEVICE_CLASS] = str(device_class)
if ((entity_picture := self.entity_picture) is not None):
attr[ATTR_ENTITY_PICTURE] = entity_picture
if ((icon := ((entry and entry.icon) or self.icon)) is not None):
attr[ATTR_ICON] = icon
if ((name := ((entry and entry.name) or self.name)) is not None):
attr[ATTR_FRIENDLY_NAME] = name
if ((supported_features := self.supported_features) is not None):
attr[ATTR_SUPPORTED_FEATURES] = supported_features
end = timer()
if (((end - start) > 0.4) and (not self._slow_reported)):
self._slow_reported = True
report_issue = self._suggest_report_issue()
_LOGGER.warning('Updating state for %s (%s) took %.3f seconds. Please %s', self.entity_id, type(self), (end - start), report_issue)
if (DATA_CUSTOMIZE in self.hass.data):
attr.update(self.hass.data[DATA_CUSTOMIZE].get(self.entity_id))
def _convert_temperature(state: str, attr: dict) -> str:
from homeassistant.components.sensor import SensorEntity
unit_of_measure = attr.get(ATTR_UNIT_OF_MEASUREMENT)
units = self.hass.config.units
if ((unit_of_measure == units.temperature_unit) or (unit_of_measure not in (TEMP_CELSIUS, TEMP_FAHRENHEIT))):
return state
domain = split_entity_id(self.entity_id)[0]
if (domain != 'sensor'):
if (not self._temperature_reported):
self._temperature_reported = True
report_issue = self._suggest_report_issue()
_LOGGER.warning('Entity %s (%s) relies on automatic temperature conversion, this will be unsupported in Home Assistant Core 2022.7. Please %s', self.entity_id, type(self), report_issue)
elif (not isinstance(self, SensorEntity)):
if (not self._temperature_reported):
self._temperature_reported = True
report_issue = self._suggest_report_issue()
_LOGGER.warning('Temperature sensor %s (%s) does not inherit SensorEntity, this will be unsupported in Home Assistant Core 2022.7.Please %s', self.entity_id, type(self), report_issue)
else:
return state
try:
prec = (((len(state) - state.index('.')) - 1) if ('.' in state) else 0)
temp = units.temperature(float(state), unit_of_measure)
state = str((round(temp) if (prec == 0) else round(temp, prec)))
attr[ATTR_UNIT_OF_MEASUREMENT] = units.temperature_unit
except ValueError:
pass
return state
state = _convert_temperature(state, attr)
if ((self._context_set is not None) and ((dt_util.utcnow() - self._context_set) > self.context_recent_time)):
self._context = None
self._context_set = None
self.hass.states.async_set(self.entity_id, state, attr, self.force_update, self._context) | -6,635,918,349,019,637,000 | Write the state to the state machine. | homeassistant/helpers/entity.py | _async_write_ha_state | algra4/core | python | @callback
def _async_write_ha_state(self) -> None:
if (self._platform_state == EntityPlatformState.REMOVED):
return
if (self.registry_entry and self.registry_entry.disabled_by):
if (not self._disabled_reported):
self._disabled_reported = True
assert (self.platform is not None)
_LOGGER.warning('Entity %s is incorrectly being triggered for updates while it is disabled. This is a bug in the %s integration', self.entity_id, self.platform.platform_name)
return
start = timer()
attr = self.capability_attributes
attr = (dict(attr) if attr else {})
available = self.available
state = self._stringify_state(available)
if available:
attr.update((self.state_attributes or {}))
attr.update((self.extra_state_attributes or {}))
if ((unit_of_measurement := self.unit_of_measurement) is not None):
attr[ATTR_UNIT_OF_MEASUREMENT] = unit_of_measurement
entry = self.registry_entry
if (assumed_state := self.assumed_state):
attr[ATTR_ASSUMED_STATE] = assumed_state
if ((attribution := self.attribution) is not None):
attr[ATTR_ATTRIBUTION] = attribution
if ((device_class := ((entry and entry.device_class) or self.device_class)) is not None):
attr[ATTR_DEVICE_CLASS] = str(device_class)
if ((entity_picture := self.entity_picture) is not None):
attr[ATTR_ENTITY_PICTURE] = entity_picture
if ((icon := ((entry and entry.icon) or self.icon)) is not None):
attr[ATTR_ICON] = icon
if ((name := ((entry and entry.name) or self.name)) is not None):
attr[ATTR_FRIENDLY_NAME] = name
if ((supported_features := self.supported_features) is not None):
attr[ATTR_SUPPORTED_FEATURES] = supported_features
end = timer()
if (((end - start) > 0.4) and (not self._slow_reported)):
self._slow_reported = True
report_issue = self._suggest_report_issue()
_LOGGER.warning('Updating state for %s (%s) took %.3f seconds. Please %s', self.entity_id, type(self), (end - start), report_issue)
if (DATA_CUSTOMIZE in self.hass.data):
attr.update(self.hass.data[DATA_CUSTOMIZE].get(self.entity_id))
def _convert_temperature(state: str, attr: dict) -> str:
from homeassistant.components.sensor import SensorEntity
unit_of_measure = attr.get(ATTR_UNIT_OF_MEASUREMENT)
units = self.hass.config.units
if ((unit_of_measure == units.temperature_unit) or (unit_of_measure not in (TEMP_CELSIUS, TEMP_FAHRENHEIT))):
return state
domain = split_entity_id(self.entity_id)[0]
if (domain != 'sensor'):
if (not self._temperature_reported):
self._temperature_reported = True
report_issue = self._suggest_report_issue()
_LOGGER.warning('Entity %s (%s) relies on automatic temperature conversion, this will be unsupported in Home Assistant Core 2022.7. Please %s', self.entity_id, type(self), report_issue)
elif (not isinstance(self, SensorEntity)):
if (not self._temperature_reported):
self._temperature_reported = True
report_issue = self._suggest_report_issue()
_LOGGER.warning('Temperature sensor %s (%s) does not inherit SensorEntity, this will be unsupported in Home Assistant Core 2022.7.Please %s', self.entity_id, type(self), report_issue)
else:
return state
try:
prec = (((len(state) - state.index('.')) - 1) if ('.' in state) else 0)
temp = units.temperature(float(state), unit_of_measure)
state = str((round(temp) if (prec == 0) else round(temp, prec)))
attr[ATTR_UNIT_OF_MEASUREMENT] = units.temperature_unit
except ValueError:
pass
return state
state = _convert_temperature(state, attr)
if ((self._context_set is not None) and ((dt_util.utcnow() - self._context_set) > self.context_recent_time)):
self._context = None
self._context_set = None
self.hass.states.async_set(self.entity_id, state, attr, self.force_update, self._context) |
def schedule_update_ha_state(self, force_refresh: bool=False) -> None:
'Schedule an update ha state change task.\n\n Scheduling the update avoids executor deadlocks.\n\n Entity state and attributes are read when the update ha state change\n task is executed.\n If state is changed more than once before the ha state change task has\n been executed, the intermediate state transitions will be missed.\n '
self.hass.add_job(self.async_update_ha_state(force_refresh)) | 4,930,532,841,608,269,000 | Schedule an update ha state change task.
Scheduling the update avoids executor deadlocks.
Entity state and attributes are read when the update ha state change
task is executed.
If state is changed more than once before the ha state change task has
been executed, the intermediate state transitions will be missed. | homeassistant/helpers/entity.py | schedule_update_ha_state | algra4/core | python | def schedule_update_ha_state(self, force_refresh: bool=False) -> None:
'Schedule an update ha state change task.\n\n Scheduling the update avoids executor deadlocks.\n\n Entity state and attributes are read when the update ha state change\n task is executed.\n If state is changed more than once before the ha state change task has\n been executed, the intermediate state transitions will be missed.\n '
self.hass.add_job(self.async_update_ha_state(force_refresh)) |
@callback
def async_schedule_update_ha_state(self, force_refresh: bool=False) -> None:
'Schedule an update ha state change task.\n\n This method must be run in the event loop.\n Scheduling the update avoids executor deadlocks.\n\n Entity state and attributes are read when the update ha state change\n task is executed.\n If state is changed more than once before the ha state change task has\n been executed, the intermediate state transitions will be missed.\n '
if force_refresh:
self.hass.async_create_task(self.async_update_ha_state(force_refresh))
else:
self.async_write_ha_state() | 221,238,759,961,931,260 | Schedule an update ha state change task.
This method must be run in the event loop.
Scheduling the update avoids executor deadlocks.
Entity state and attributes are read when the update ha state change
task is executed.
If state is changed more than once before the ha state change task has
been executed, the intermediate state transitions will be missed. | homeassistant/helpers/entity.py | async_schedule_update_ha_state | algra4/core | python | @callback
def async_schedule_update_ha_state(self, force_refresh: bool=False) -> None:
'Schedule an update ha state change task.\n\n This method must be run in the event loop.\n Scheduling the update avoids executor deadlocks.\n\n Entity state and attributes are read when the update ha state change\n task is executed.\n If state is changed more than once before the ha state change task has\n been executed, the intermediate state transitions will be missed.\n '
if force_refresh:
self.hass.async_create_task(self.async_update_ha_state(force_refresh))
else:
self.async_write_ha_state() |
async def async_device_update(self, warning: bool=True) -> None:
"Process 'update' or 'async_update' from entity.\n\n This method is a coroutine.\n "
if self._update_staged:
return
self._update_staged = True
if self.parallel_updates:
(await self.parallel_updates.acquire())
try:
task: asyncio.Future[None]
if hasattr(self, 'async_update'):
task = self.hass.async_create_task(self.async_update())
elif hasattr(self, 'update'):
task = self.hass.async_add_executor_job(self.update)
else:
return
if (not warning):
(await task)
return
(finished, _) = (await asyncio.wait([task], timeout=SLOW_UPDATE_WARNING))
for done in finished:
if (exc := done.exception()):
raise exc
return
_LOGGER.warning('Update of %s is taking over %s seconds', self.entity_id, SLOW_UPDATE_WARNING)
(await task)
finally:
self._update_staged = False
if self.parallel_updates:
self.parallel_updates.release() | -9,035,073,522,249,991,000 | Process 'update' or 'async_update' from entity.
This method is a coroutine. | homeassistant/helpers/entity.py | async_device_update | algra4/core | python | async def async_device_update(self, warning: bool=True) -> None:
"Process 'update' or 'async_update' from entity.\n\n This method is a coroutine.\n "
if self._update_staged:
return
self._update_staged = True
if self.parallel_updates:
(await self.parallel_updates.acquire())
try:
task: asyncio.Future[None]
if hasattr(self, 'async_update'):
task = self.hass.async_create_task(self.async_update())
elif hasattr(self, 'update'):
task = self.hass.async_add_executor_job(self.update)
else:
return
if (not warning):
(await task)
return
(finished, _) = (await asyncio.wait([task], timeout=SLOW_UPDATE_WARNING))
for done in finished:
if (exc := done.exception()):
raise exc
return
_LOGGER.warning('Update of %s is taking over %s seconds', self.entity_id, SLOW_UPDATE_WARNING)
(await task)
finally:
self._update_staged = False
if self.parallel_updates:
self.parallel_updates.release() |
@callback
def async_on_remove(self, func: CALLBACK_TYPE) -> None:
'Add a function to call when entity is removed or not added.'
if (self._on_remove is None):
self._on_remove = []
self._on_remove.append(func) | 3,669,661,432,994,837,000 | Add a function to call when entity is removed or not added. | homeassistant/helpers/entity.py | async_on_remove | algra4/core | python | @callback
def async_on_remove(self, func: CALLBACK_TYPE) -> None:
if (self._on_remove is None):
self._on_remove = []
self._on_remove.append(func) |
async def async_removed_from_registry(self) -> None:
'Run when entity has been removed from entity registry.\n\n To be extended by integrations.\n ' | -4,028,870,512,133,826,600 | Run when entity has been removed from entity registry.
To be extended by integrations. | homeassistant/helpers/entity.py | async_removed_from_registry | algra4/core | python | async def async_removed_from_registry(self) -> None:
'Run when entity has been removed from entity registry.\n\n To be extended by integrations.\n ' |
@callback
def add_to_platform_start(self, hass: HomeAssistant, platform: EntityPlatform, parallel_updates: (asyncio.Semaphore | None)) -> None:
'Start adding an entity to a platform.'
if (self._platform_state == EntityPlatformState.ADDED):
raise HomeAssistantError(f'Entity {self.entity_id} cannot be added a second time to an entity platform')
self.hass = hass
self.platform = platform
self.parallel_updates = parallel_updates
self._platform_state = EntityPlatformState.ADDED | -8,311,372,291,523,876,000 | Start adding an entity to a platform. | homeassistant/helpers/entity.py | add_to_platform_start | algra4/core | python | @callback
def add_to_platform_start(self, hass: HomeAssistant, platform: EntityPlatform, parallel_updates: (asyncio.Semaphore | None)) -> None:
if (self._platform_state == EntityPlatformState.ADDED):
raise HomeAssistantError(f'Entity {self.entity_id} cannot be added a second time to an entity platform')
self.hass = hass
self.platform = platform
self.parallel_updates = parallel_updates
self._platform_state = EntityPlatformState.ADDED |
def _call_on_remove_callbacks(self) -> None:
'Call callbacks registered by async_on_remove.'
if (self._on_remove is None):
return
while self._on_remove:
self._on_remove.pop()() | -4,055,272,837,616,563,700 | Call callbacks registered by async_on_remove. | homeassistant/helpers/entity.py | _call_on_remove_callbacks | algra4/core | python | def _call_on_remove_callbacks(self) -> None:
if (self._on_remove is None):
return
while self._on_remove:
self._on_remove.pop()() |
@callback
def add_to_platform_abort(self) -> None:
'Abort adding an entity to a platform.'
self._platform_state = EntityPlatformState.NOT_ADDED
self._call_on_remove_callbacks()
self.hass = None
self.platform = None
self.parallel_updates = None | -4,424,440,936,371,736,600 | Abort adding an entity to a platform. | homeassistant/helpers/entity.py | add_to_platform_abort | algra4/core | python | @callback
def add_to_platform_abort(self) -> None:
self._platform_state = EntityPlatformState.NOT_ADDED
self._call_on_remove_callbacks()
self.hass = None
self.platform = None
self.parallel_updates = None |
async def add_to_platform_finish(self) -> None:
'Finish adding an entity to a platform.'
(await self.async_internal_added_to_hass())
(await self.async_added_to_hass())
self.async_write_ha_state() | -2,320,288,375,294,386,000 | Finish adding an entity to a platform. | homeassistant/helpers/entity.py | add_to_platform_finish | algra4/core | python | async def add_to_platform_finish(self) -> None:
(await self.async_internal_added_to_hass())
(await self.async_added_to_hass())
self.async_write_ha_state() |
async def async_remove(self, *, force_remove: bool=False) -> None:
"Remove entity from Home Assistant.\n\n If the entity has a non disabled entry in the entity registry,\n the entity's state will be set to unavailable, in the same way\n as when the entity registry is loaded.\n\n If the entity doesn't have a non disabled entry in the entity registry,\n or if force_remove=True, its state will be removed.\n "
if (self.platform and (self._platform_state != EntityPlatformState.ADDED)):
raise HomeAssistantError(f'Entity {self.entity_id} async_remove called twice')
self._platform_state = EntityPlatformState.REMOVED
self._call_on_remove_callbacks()
(await self.async_internal_will_remove_from_hass())
(await self.async_will_remove_from_hass())
if ((not force_remove) and self.registry_entry and (not self.registry_entry.disabled)):
self.registry_entry.write_unavailable_state(self.hass)
else:
self.hass.states.async_remove(self.entity_id, context=self._context) | 8,390,034,581,321,268,000 | Remove entity from Home Assistant.
If the entity has a non disabled entry in the entity registry,
the entity's state will be set to unavailable, in the same way
as when the entity registry is loaded.
If the entity doesn't have a non disabled entry in the entity registry,
or if force_remove=True, its state will be removed. | homeassistant/helpers/entity.py | async_remove | algra4/core | python | async def async_remove(self, *, force_remove: bool=False) -> None:
"Remove entity from Home Assistant.\n\n If the entity has a non disabled entry in the entity registry,\n the entity's state will be set to unavailable, in the same way\n as when the entity registry is loaded.\n\n If the entity doesn't have a non disabled entry in the entity registry,\n or if force_remove=True, its state will be removed.\n "
if (self.platform and (self._platform_state != EntityPlatformState.ADDED)):
raise HomeAssistantError(f'Entity {self.entity_id} async_remove called twice')
self._platform_state = EntityPlatformState.REMOVED
self._call_on_remove_callbacks()
(await self.async_internal_will_remove_from_hass())
(await self.async_will_remove_from_hass())
if ((not force_remove) and self.registry_entry and (not self.registry_entry.disabled)):
self.registry_entry.write_unavailable_state(self.hass)
else:
self.hass.states.async_remove(self.entity_id, context=self._context) |
async def async_added_to_hass(self) -> None:
'Run when entity about to be added to hass.\n\n To be extended by integrations.\n ' | -5,585,018,881,033,491,000 | Run when entity about to be added to hass.
To be extended by integrations. | homeassistant/helpers/entity.py | async_added_to_hass | algra4/core | python | async def async_added_to_hass(self) -> None:
'Run when entity about to be added to hass.\n\n To be extended by integrations.\n ' |
async def async_will_remove_from_hass(self) -> None:
'Run when entity will be removed from hass.\n\n To be extended by integrations.\n ' | -8,817,618,303,532,068,000 | Run when entity will be removed from hass.
To be extended by integrations. | homeassistant/helpers/entity.py | async_will_remove_from_hass | algra4/core | python | async def async_will_remove_from_hass(self) -> None:
'Run when entity will be removed from hass.\n\n To be extended by integrations.\n ' |
@callback
def async_registry_entry_updated(self) -> None:
'Run when the entity registry entry has been updated.\n\n To be extended by integrations.\n ' | -3,633,366,077,162,349,600 | Run when the entity registry entry has been updated.
To be extended by integrations. | homeassistant/helpers/entity.py | async_registry_entry_updated | algra4/core | python | @callback
def async_registry_entry_updated(self) -> None:
'Run when the entity registry entry has been updated.\n\n To be extended by integrations.\n ' |
async def async_internal_added_to_hass(self) -> None:
'Run when entity about to be added to hass.\n\n Not to be extended by integrations.\n '
if self.platform:
info = {'domain': self.platform.platform_name, 'custom_component': ('custom_components' in type(self).__module__)}
if self.platform.config_entry:
info['source'] = SOURCE_CONFIG_ENTRY
info['config_entry'] = self.platform.config_entry.entry_id
else:
info['source'] = SOURCE_PLATFORM_CONFIG
self.hass.data.setdefault(DATA_ENTITY_SOURCE, {})[self.entity_id] = info
if (self.registry_entry is not None):
assert (not self.registry_entry.disabled_by), f"Entity {self.entity_id} is being added while it's disabled"
self.async_on_remove(async_track_entity_registry_updated_event(self.hass, self.entity_id, self._async_registry_updated)) | 1,359,335,844,177,028,900 | Run when entity about to be added to hass.
Not to be extended by integrations. | homeassistant/helpers/entity.py | async_internal_added_to_hass | algra4/core | python | async def async_internal_added_to_hass(self) -> None:
'Run when entity about to be added to hass.\n\n Not to be extended by integrations.\n '
if self.platform:
info = {'domain': self.platform.platform_name, 'custom_component': ('custom_components' in type(self).__module__)}
if self.platform.config_entry:
info['source'] = SOURCE_CONFIG_ENTRY
info['config_entry'] = self.platform.config_entry.entry_id
else:
info['source'] = SOURCE_PLATFORM_CONFIG
self.hass.data.setdefault(DATA_ENTITY_SOURCE, {})[self.entity_id] = info
if (self.registry_entry is not None):
assert (not self.registry_entry.disabled_by), f"Entity {self.entity_id} is being added while it's disabled"
self.async_on_remove(async_track_entity_registry_updated_event(self.hass, self.entity_id, self._async_registry_updated)) |
async def async_internal_will_remove_from_hass(self) -> None:
'Run when entity will be removed from hass.\n\n Not to be extended by integrations.\n '
if self.platform:
self.hass.data[DATA_ENTITY_SOURCE].pop(self.entity_id) | 1,536,591,258,824,101,000 | Run when entity will be removed from hass.
Not to be extended by integrations. | homeassistant/helpers/entity.py | async_internal_will_remove_from_hass | algra4/core | python | async def async_internal_will_remove_from_hass(self) -> None:
'Run when entity will be removed from hass.\n\n Not to be extended by integrations.\n '
if self.platform:
self.hass.data[DATA_ENTITY_SOURCE].pop(self.entity_id) |
async def _async_registry_updated(self, event: Event) -> None:
'Handle entity registry update.'
data = event.data
if (data['action'] == 'remove'):
(await self.async_removed_from_registry())
self.registry_entry = None
(await self.async_remove())
if (data['action'] != 'update'):
return
ent_reg = er.async_get(self.hass)
old = self.registry_entry
self.registry_entry = ent_reg.async_get(data['entity_id'])
assert (self.registry_entry is not None)
if self.registry_entry.disabled:
(await self.async_remove())
return
assert (old is not None)
if (self.registry_entry.entity_id == old.entity_id):
self.async_registry_entry_updated()
self.async_write_ha_state()
return
(await self.async_remove(force_remove=True))
assert (self.platform is not None)
self.entity_id = self.registry_entry.entity_id
(await self.platform.async_add_entities([self])) | -5,765,629,528,305,339,000 | Handle entity registry update. | homeassistant/helpers/entity.py | _async_registry_updated | algra4/core | python | async def _async_registry_updated(self, event: Event) -> None:
data = event.data
if (data['action'] == 'remove'):
(await self.async_removed_from_registry())
self.registry_entry = None
(await self.async_remove())
if (data['action'] != 'update'):
return
ent_reg = er.async_get(self.hass)
old = self.registry_entry
self.registry_entry = ent_reg.async_get(data['entity_id'])
assert (self.registry_entry is not None)
if self.registry_entry.disabled:
(await self.async_remove())
return
assert (old is not None)
if (self.registry_entry.entity_id == old.entity_id):
self.async_registry_entry_updated()
self.async_write_ha_state()
return
(await self.async_remove(force_remove=True))
assert (self.platform is not None)
self.entity_id = self.registry_entry.entity_id
(await self.platform.async_add_entities([self])) |
def __eq__(self, other: Any) -> bool:
'Return the comparison.'
if (not isinstance(other, self.__class__)):
return False
if ((self.unique_id is None) or (other.unique_id is None)):
return False
if ((self.platform is not None) or (other.platform is not None)):
if ((self.platform is None) or (other.platform is None)):
return False
if (self.platform.platform != other.platform.platform):
return False
return (self.unique_id == other.unique_id) | -4,800,598,507,128,546,000 | Return the comparison. | homeassistant/helpers/entity.py | __eq__ | algra4/core | python | def __eq__(self, other: Any) -> bool:
if (not isinstance(other, self.__class__)):
return False
if ((self.unique_id is None) or (other.unique_id is None)):
return False
if ((self.platform is not None) or (other.platform is not None)):
if ((self.platform is None) or (other.platform is None)):
return False
if (self.platform.platform != other.platform.platform):
return False
return (self.unique_id == other.unique_id) |
def __repr__(self) -> str:
'Return the representation.'
return f'<Entity {self.name}: {self.state}>' | 5,428,986,823,689,221,000 | Return the representation. | homeassistant/helpers/entity.py | __repr__ | algra4/core | python | def __repr__(self) -> str:
return f'<Entity {self.name}: {self.state}>' |
async def async_request_call(self, coro: Awaitable) -> None:
'Process request batched.'
if self.parallel_updates:
(await self.parallel_updates.acquire())
try:
(await coro)
finally:
if self.parallel_updates:
self.parallel_updates.release() | 3,011,644,997,447,190,500 | Process request batched. | homeassistant/helpers/entity.py | async_request_call | algra4/core | python | async def async_request_call(self, coro: Awaitable) -> None:
if self.parallel_updates:
(await self.parallel_updates.acquire())
try:
(await coro)
finally:
if self.parallel_updates:
self.parallel_updates.release() |
def _suggest_report_issue(self) -> str:
'Suggest to report an issue.'
report_issue = ''
if ('custom_components' in type(self).__module__):
report_issue = 'report it to the custom component author.'
else:
report_issue = 'create a bug report at https://github.com/home-assistant/core/issues?q=is%3Aopen+is%3Aissue'
if self.platform:
report_issue += f'+label%3A%22integration%3A+{self.platform.platform_name}%22'
return report_issue | 6,459,600,896,278,370,000 | Suggest to report an issue. | homeassistant/helpers/entity.py | _suggest_report_issue | algra4/core | python | def _suggest_report_issue(self) -> str:
report_issue =
if ('custom_components' in type(self).__module__):
report_issue = 'report it to the custom component author.'
else:
report_issue = 'create a bug report at https://github.com/home-assistant/core/issues?q=is%3Aopen+is%3Aissue'
if self.platform:
report_issue += f'+label%3A%22integration%3A+{self.platform.platform_name}%22'
return report_issue |
@property
@final
def state(self) -> (Literal[('on', 'off')] | None):
'Return the state.'
if ((is_on := self.is_on) is None):
return None
return (STATE_ON if is_on else STATE_OFF) | 1,586,572,218,295,235,300 | Return the state. | homeassistant/helpers/entity.py | state | algra4/core | python | @property
@final
def state(self) -> (Literal[('on', 'off')] | None):
if ((is_on := self.is_on) is None):
return None
return (STATE_ON if is_on else STATE_OFF) |
@property
def is_on(self) -> (bool | None):
'Return True if entity is on.'
return self._attr_is_on | -7,774,850,306,356,158,000 | Return True if entity is on. | homeassistant/helpers/entity.py | is_on | algra4/core | python | @property
def is_on(self) -> (bool | None):
return self._attr_is_on |
def turn_on(self, **kwargs: Any) -> None:
'Turn the entity on.'
raise NotImplementedError() | 2,322,256,188,001,028,600 | Turn the entity on. | homeassistant/helpers/entity.py | turn_on | algra4/core | python | def turn_on(self, **kwargs: Any) -> None:
raise NotImplementedError() |
async def async_turn_on(self, **kwargs: Any) -> None:
'Turn the entity on.'
(await self.hass.async_add_executor_job(ft.partial(self.turn_on, **kwargs))) | -8,243,134,644,201,260,000 | Turn the entity on. | homeassistant/helpers/entity.py | async_turn_on | algra4/core | python | async def async_turn_on(self, **kwargs: Any) -> None:
(await self.hass.async_add_executor_job(ft.partial(self.turn_on, **kwargs))) |
def turn_off(self, **kwargs: Any) -> None:
'Turn the entity off.'
raise NotImplementedError() | -3,799,708,282,002,590,700 | Turn the entity off. | homeassistant/helpers/entity.py | turn_off | algra4/core | python | def turn_off(self, **kwargs: Any) -> None:
raise NotImplementedError() |
async def async_turn_off(self, **kwargs: Any) -> None:
'Turn the entity off.'
(await self.hass.async_add_executor_job(ft.partial(self.turn_off, **kwargs))) | -6,121,593,494,274,528,000 | Turn the entity off. | homeassistant/helpers/entity.py | async_turn_off | algra4/core | python | async def async_turn_off(self, **kwargs: Any) -> None:
(await self.hass.async_add_executor_job(ft.partial(self.turn_off, **kwargs))) |
def toggle(self, **kwargs: Any) -> None:
'Toggle the entity.'
if self.is_on:
self.turn_off(**kwargs)
else:
self.turn_on(**kwargs) | -6,398,084,867,639,560,000 | Toggle the entity. | homeassistant/helpers/entity.py | toggle | algra4/core | python | def toggle(self, **kwargs: Any) -> None:
if self.is_on:
self.turn_off(**kwargs)
else:
self.turn_on(**kwargs) |
async def async_toggle(self, **kwargs: Any) -> None:
'Toggle the entity.'
if self.is_on:
(await self.async_turn_off(**kwargs))
else:
(await self.async_turn_on(**kwargs)) | 6,141,564,757,956,466,000 | Toggle the entity. | homeassistant/helpers/entity.py | async_toggle | algra4/core | python | async def async_toggle(self, **kwargs: Any) -> None:
if self.is_on:
(await self.async_turn_off(**kwargs))
else:
(await self.async_turn_on(**kwargs)) |
def _get_prefix_list_direction_out_prefix_name(self):
'\n Getter method for prefix_list_direction_out_prefix_name, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/prefix_list/direction_out/prefix_list_direction_out_prefix_name (nei-prefix-list-filter)\n '
return self.__prefix_list_direction_out_prefix_name | -3,966,028,733,959,844,000 | Getter method for prefix_list_direction_out_prefix_name, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/prefix_list/direction_out/prefix_list_direction_out_prefix_name (nei-prefix-list-filter) | pybind/slxos/v17s_1_02/routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/prefix_list/direction_out/__init__.py | _get_prefix_list_direction_out_prefix_name | extremenetworks/pybind | python | def _get_prefix_list_direction_out_prefix_name(self):
'\n \n '
return self.__prefix_list_direction_out_prefix_name |
def _set_prefix_list_direction_out_prefix_name(self, v, load=False):
'\n Setter method for prefix_list_direction_out_prefix_name, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/prefix_list/direction_out/prefix_list_direction_out_prefix_name (nei-prefix-list-filter)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_prefix_list_direction_out_prefix_name is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_prefix_list_direction_out_prefix_name() directly.\n '
if hasattr(v, '_utype'):
v = v._utype(v)
try:
t = YANGDynClass(v, base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name='prefix-list-direction-out-prefix-name', rest_name='ip-access-number', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'alt-name': u'ip-access-number', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='nei-prefix-list-filter', is_config=True)
except (TypeError, ValueError):
raise ValueError({'error-string': 'prefix_list_direction_out_prefix_name must be of a type compatible with nei-prefix-list-filter', 'defined-type': 'brocade-bgp:nei-prefix-list-filter', 'generated-type': 'YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={\'length\': [u\'1..63\']}), is_leaf=True, yang_name="prefix-list-direction-out-prefix-name", rest_name="ip-access-number", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'cli-drop-node-name\': None, u\'alt-name\': u\'ip-access-number\', u\'cli-incomplete-no\': None, u\'cli-incomplete-command\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-bgp\', defining_module=\'brocade-bgp\', yang_type=\'nei-prefix-list-filter\', is_config=True)'})
self.__prefix_list_direction_out_prefix_name = t
if hasattr(self, '_set'):
self._set() | 7,387,998,423,933,128,000 | Setter method for prefix_list_direction_out_prefix_name, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/prefix_list/direction_out/prefix_list_direction_out_prefix_name (nei-prefix-list-filter)
If this variable is read-only (config: false) in the
source YANG file, then _set_prefix_list_direction_out_prefix_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prefix_list_direction_out_prefix_name() directly. | pybind/slxos/v17s_1_02/routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/prefix_list/direction_out/__init__.py | _set_prefix_list_direction_out_prefix_name | extremenetworks/pybind | python | def _set_prefix_list_direction_out_prefix_name(self, v, load=False):
'\n Setter method for prefix_list_direction_out_prefix_name, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/prefix_list/direction_out/prefix_list_direction_out_prefix_name (nei-prefix-list-filter)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_prefix_list_direction_out_prefix_name is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_prefix_list_direction_out_prefix_name() directly.\n '
if hasattr(v, '_utype'):
v = v._utype(v)
try:
t = YANGDynClass(v, base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name='prefix-list-direction-out-prefix-name', rest_name='ip-access-number', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'alt-name': u'ip-access-number', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='nei-prefix-list-filter', is_config=True)
except (TypeError, ValueError):
raise ValueError({'error-string': 'prefix_list_direction_out_prefix_name must be of a type compatible with nei-prefix-list-filter', 'defined-type': 'brocade-bgp:nei-prefix-list-filter', 'generated-type': 'YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={\'length\': [u\'1..63\']}), is_leaf=True, yang_name="prefix-list-direction-out-prefix-name", rest_name="ip-access-number", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'cli-drop-node-name\': None, u\'alt-name\': u\'ip-access-number\', u\'cli-incomplete-no\': None, u\'cli-incomplete-command\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-bgp\', defining_module=\'brocade-bgp\', yang_type=\'nei-prefix-list-filter\', is_config=True)'})
self.__prefix_list_direction_out_prefix_name = t
if hasattr(self, '_set'):
self._set() |
def _get_prefix_list_direction_out(self):
'\n Getter method for prefix_list_direction_out, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/prefix_list/direction_out/prefix_list_direction_out (empty)\n '
return self.__prefix_list_direction_out | -8,154,108,608,796,012,000 | Getter method for prefix_list_direction_out, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/prefix_list/direction_out/prefix_list_direction_out (empty) | pybind/slxos/v17s_1_02/routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/prefix_list/direction_out/__init__.py | _get_prefix_list_direction_out | extremenetworks/pybind | python | def _get_prefix_list_direction_out(self):
'\n \n '
return self.__prefix_list_direction_out |
def _set_prefix_list_direction_out(self, v, load=False):
'\n Setter method for prefix_list_direction_out, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/prefix_list/direction_out/prefix_list_direction_out (empty)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_prefix_list_direction_out is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_prefix_list_direction_out() directly.\n '
if hasattr(v, '_utype'):
v = v._utype(v)
try:
t = YANGDynClass(v, base=YANGBool, is_leaf=True, yang_name='prefix-list-direction-out', rest_name='out', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Filter outgoing routes', u'alt-name': u'out'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({'error-string': 'prefix_list_direction_out must be of a type compatible with empty', 'defined-type': 'empty', 'generated-type': 'YANGDynClass(base=YANGBool, is_leaf=True, yang_name="prefix-list-direction-out", rest_name="out", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Filter outgoing routes\', u\'alt-name\': u\'out\'}}, namespace=\'urn:brocade.com:mgmt:brocade-bgp\', defining_module=\'brocade-bgp\', yang_type=\'empty\', is_config=True)'})
self.__prefix_list_direction_out = t
if hasattr(self, '_set'):
self._set() | 8,742,975,862,918,580,000 | Setter method for prefix_list_direction_out, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/prefix_list/direction_out/prefix_list_direction_out (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_prefix_list_direction_out is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prefix_list_direction_out() directly. | pybind/slxos/v17s_1_02/routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/prefix_list/direction_out/__init__.py | _set_prefix_list_direction_out | extremenetworks/pybind | python | def _set_prefix_list_direction_out(self, v, load=False):
'\n Setter method for prefix_list_direction_out, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/prefix_list/direction_out/prefix_list_direction_out (empty)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_prefix_list_direction_out is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_prefix_list_direction_out() directly.\n '
if hasattr(v, '_utype'):
v = v._utype(v)
try:
t = YANGDynClass(v, base=YANGBool, is_leaf=True, yang_name='prefix-list-direction-out', rest_name='out', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Filter outgoing routes', u'alt-name': u'out'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({'error-string': 'prefix_list_direction_out must be of a type compatible with empty', 'defined-type': 'empty', 'generated-type': 'YANGDynClass(base=YANGBool, is_leaf=True, yang_name="prefix-list-direction-out", rest_name="out", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Filter outgoing routes\', u\'alt-name\': u\'out\'}}, namespace=\'urn:brocade.com:mgmt:brocade-bgp\', defining_module=\'brocade-bgp\', yang_type=\'empty\', is_config=True)'})
self.__prefix_list_direction_out = t
if hasattr(self, '_set'):
self._set() |
def test_new_user(new_user):
'\n GIVEN a User model\n WHEN a new User is created\n THEN check the username and birthday fields are defined correctly\n '
assert (new_user.name == 'test')
assert (new_user.birthday == date(day=1, month=12, year=1989)) | -3,446,939,272,959,490,000 | GIVEN a User model
WHEN a new User is created
THEN check the username and birthday fields are defined correctly | tests/unit/test_app.py | test_new_user | atsikham/flask-test-app | python | def test_new_user(new_user):
'\n GIVEN a User model\n WHEN a new User is created\n THEN check the username and birthday fields are defined correctly\n '
assert (new_user.name == 'test')
assert (new_user.birthday == date(day=1, month=12, year=1989)) |
def __init__(self, length, dec_rep):
'\n Constructor\n\n Parameters\n ----------\n length : int\n dec_rep : int\n\n Returns\n -------\n\n '
self.len = length
self.dec_rep = dec_rep
self.max_len = 16
assert (length <= self.max_len), 'bit vector is too long'
assert (length > 0), 'bit vector len must be >=1' | 4,263,336,906,014,317,600 | Constructor
Parameters
----------
length : int
dec_rep : int
Returns
------- | qubiter/BitVector.py | __init__ | yourball/qubiter | python | def __init__(self, length, dec_rep):
'\n Constructor\n\n Parameters\n ----------\n length : int\n dec_rep : int\n\n Returns\n -------\n\n '
self.len = length
self.dec_rep = dec_rep
self.max_len = 16
assert (length <= self.max_len), 'bit vector is too long'
assert (length > 0), 'bit vector len must be >=1' |
@staticmethod
def copy(bvec):
'\n Copy constructor, returns a new BitVector which is a copy of the\n BitVector bvec.\n\n Parameters\n ----------\n bvec : BitVector\n\n Returns\n -------\n BitVector\n\n '
return BitVector(bvec.len, bvec.dec_rep) | -3,857,372,566,116,804,600 | Copy constructor, returns a new BitVector which is a copy of the
BitVector bvec.
Parameters
----------
bvec : BitVector
Returns
-------
BitVector | qubiter/BitVector.py | copy | yourball/qubiter | python | @staticmethod
def copy(bvec):
'\n Copy constructor, returns a new BitVector which is a copy of the\n BitVector bvec.\n\n Parameters\n ----------\n bvec : BitVector\n\n Returns\n -------\n BitVector\n\n '
return BitVector(bvec.len, bvec.dec_rep) |
def bit_is_T(self, bpos):
'\n Returns True iff bit at position bpos is 1 (True)\n\n Parameters\n ----------\n bpos : int\n bit position\n\n Returns\n -------\n bool\n\n '
assert (bpos < self.len), 'bit position is too large'
mask = (1 << bpos)
return ((self.dec_rep & mask) == mask) | -2,255,089,424,818,929,700 | Returns True iff bit at position bpos is 1 (True)
Parameters
----------
bpos : int
bit position
Returns
-------
bool | qubiter/BitVector.py | bit_is_T | yourball/qubiter | python | def bit_is_T(self, bpos):
'\n Returns True iff bit at position bpos is 1 (True)\n\n Parameters\n ----------\n bpos : int\n bit position\n\n Returns\n -------\n bool\n\n '
assert (bpos < self.len), 'bit position is too large'
mask = (1 << bpos)
return ((self.dec_rep & mask) == mask) |
def set_bit_T(self, bpos):
'\n Sets to 1 (True) the bit of self at position bpos.\n\n Parameters\n ----------\n bpos : int\n bit position\n\n Returns\n -------\n None\n\n '
assert (bpos < self.len), 'bit position is too large'
self.dec_rep |= (1 << bpos) | 3,042,222,247,460,635,000 | Sets to 1 (True) the bit of self at position bpos.
Parameters
----------
bpos : int
bit position
Returns
-------
None | qubiter/BitVector.py | set_bit_T | yourball/qubiter | python | def set_bit_T(self, bpos):
'\n Sets to 1 (True) the bit of self at position bpos.\n\n Parameters\n ----------\n bpos : int\n bit position\n\n Returns\n -------\n None\n\n '
assert (bpos < self.len), 'bit position is too large'
self.dec_rep |= (1 << bpos) |
def set_bit_F(self, bpos):
'\n Sets to 0 (False) the bit of self at position bpos.\n\n Parameters\n ----------\n bpos : int\n bit position\n\n Returns\n -------\n None\n\n '
assert (bpos < self.len), 'bit position is too large'
self.dec_rep &= (~ (1 << bpos)) | -5,681,098,986,469,289,000 | Sets to 0 (False) the bit of self at position bpos.
Parameters
----------
bpos : int
bit position
Returns
-------
None | qubiter/BitVector.py | set_bit_F | yourball/qubiter | python | def set_bit_F(self, bpos):
'\n Sets to 0 (False) the bit of self at position bpos.\n\n Parameters\n ----------\n bpos : int\n bit position\n\n Returns\n -------\n None\n\n '
assert (bpos < self.len), 'bit position is too large'
self.dec_rep &= (~ (1 << bpos)) |
def set_all_bits_T(self):
'\n Sets to 1 (True) the bits of self at position bpos\n from 0 to len-1 inclusive.\n\n Returns\n -------\n None\n\n '
self.dec_rep = ((1 << (self.len + 1)) - 1) | 7,747,470,342,879,452,000 | Sets to 1 (True) the bits of self at position bpos
from 0 to len-1 inclusive.
Returns
-------
None | qubiter/BitVector.py | set_all_bits_T | yourball/qubiter | python | def set_all_bits_T(self):
'\n Sets to 1 (True) the bits of self at position bpos\n from 0 to len-1 inclusive.\n\n Returns\n -------\n None\n\n '
self.dec_rep = ((1 << (self.len + 1)) - 1) |
def set_all_bits_F(self):
'\n Sets to 0 (False) the bits of self at positions bpos\n from 0 to len-1 inclusive.\n\n Returns\n -------\n None\n\n '
self.dec_rep = 0 | 1,587,382,351,114,493,700 | Sets to 0 (False) the bits of self at positions bpos
from 0 to len-1 inclusive.
Returns
-------
None | qubiter/BitVector.py | set_all_bits_F | yourball/qubiter | python | def set_all_bits_F(self):
'\n Sets to 0 (False) the bits of self at positions bpos\n from 0 to len-1 inclusive.\n\n Returns\n -------\n None\n\n '
self.dec_rep = 0 |
def get_num_T_bits(self):
'\n Returns the number of 1 (True) bits at positions bpos\n from 0 to len-1 inclusive.\n\n Returns\n -------\n int\n\n '
count = 0
for bpos in range(self.len):
if self.bit_is_T(bpos):
count += 1
return count | -6,344,205,092,744,203,000 | Returns the number of 1 (True) bits at positions bpos
from 0 to len-1 inclusive.
Returns
-------
int | qubiter/BitVector.py | get_num_T_bits | yourball/qubiter | python | def get_num_T_bits(self):
'\n Returns the number of 1 (True) bits at positions bpos\n from 0 to len-1 inclusive.\n\n Returns\n -------\n int\n\n '
count = 0
for bpos in range(self.len):
if self.bit_is_T(bpos):
count += 1
return count |
def find_T_bit_to_right_of(self, bpos):
'\n Returns position of 1 (True) bit immediately to the right of\n position bpos. Returns -1 if there is no such bit.\n\n Parameters\n ----------\n bpos : int\n bit position\n\n Returns\n -------\n int\n\n '
if (bpos <= 0):
return (- 1)
right_T_bit = bpos
mask = (1 << right_T_bit)
found_it = False
while True:
right_T_bit -= 1
mask >>= 1
found_it = ((self.dec_rep & mask) == mask)
if ((right_T_bit == 0) or found_it):
break
if found_it:
return right_T_bit
else:
return (- 1) | 6,769,676,576,450,919,000 | Returns position of 1 (True) bit immediately to the right of
position bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int | qubiter/BitVector.py | find_T_bit_to_right_of | yourball/qubiter | python | def find_T_bit_to_right_of(self, bpos):
'\n Returns position of 1 (True) bit immediately to the right of\n position bpos. Returns -1 if there is no such bit.\n\n Parameters\n ----------\n bpos : int\n bit position\n\n Returns\n -------\n int\n\n '
if (bpos <= 0):
return (- 1)
right_T_bit = bpos
mask = (1 << right_T_bit)
found_it = False
while True:
right_T_bit -= 1
mask >>= 1
found_it = ((self.dec_rep & mask) == mask)
if ((right_T_bit == 0) or found_it):
break
if found_it:
return right_T_bit
else:
return (- 1) |
def find_T_bit_to_left_of(self, bpos):
'\n Returns position of 1 (True) bit immediately to the left of position\n bpos. Returns -1 if there is no such bit.\n\n Parameters\n ----------\n bpos : int\n bit position\n\n Returns\n -------\n int\n\n '
if (bpos >= (self.len - 1)):
return (- 1)
left_T_bit = bpos
mask = (1 << left_T_bit)
found_it = False
while True:
left_T_bit += 1
mask <<= 1
found_it = ((self.dec_rep & mask) == mask)
if ((left_T_bit == (self.len - 1)) or found_it):
break
if found_it:
return left_T_bit
else:
return (- 1) | 1,600,739,133,069,071,000 | Returns position of 1 (True) bit immediately to the left of position
bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int | qubiter/BitVector.py | find_T_bit_to_left_of | yourball/qubiter | python | def find_T_bit_to_left_of(self, bpos):
'\n Returns position of 1 (True) bit immediately to the left of position\n bpos. Returns -1 if there is no such bit.\n\n Parameters\n ----------\n bpos : int\n bit position\n\n Returns\n -------\n int\n\n '
if (bpos >= (self.len - 1)):
return (- 1)
left_T_bit = bpos
mask = (1 << left_T_bit)
found_it = False
while True:
left_T_bit += 1
mask <<= 1
found_it = ((self.dec_rep & mask) == mask)
if ((left_T_bit == (self.len - 1)) or found_it):
break
if found_it:
return left_T_bit
else:
return (- 1) |
def find_leftmost_T_bit(self):
'\n Out of all 1 (True) bits, returns position of the leftmost one of\n those to the the left of position bpos. Returns -1 if there is no\n such bit.\n\n Returns\n -------\n int\n\n '
if self.bit_is_T((self.len - 1)):
return (self.len - 1)
else:
return self.find_T_bit_to_right_of((self.len - 1)) | -3,475,562,950,627,349,500 | Out of all 1 (True) bits, returns position of the leftmost one of
those to the the left of position bpos. Returns -1 if there is no
such bit.
Returns
-------
int | qubiter/BitVector.py | find_leftmost_T_bit | yourball/qubiter | python | def find_leftmost_T_bit(self):
'\n Out of all 1 (True) bits, returns position of the leftmost one of\n those to the the left of position bpos. Returns -1 if there is no\n such bit.\n\n Returns\n -------\n int\n\n '
if self.bit_is_T((self.len - 1)):
return (self.len - 1)
else:
return self.find_T_bit_to_right_of((self.len - 1)) |
def find_rightmost_T_bit(self):
'\n Out of all 1 (True) bits, returns position of the rightmost one of\n those to the the right of position bpos. Returns -1 if there is no\n such bit.\n\n Returns\n -------\n int\n\n '
if self.bit_is_T(0):
return 0
else:
return self.find_T_bit_to_left_of(0) | 2,662,890,795,470,964,700 | Out of all 1 (True) bits, returns position of the rightmost one of
those to the the right of position bpos. Returns -1 if there is no
such bit.
Returns
-------
int | qubiter/BitVector.py | find_rightmost_T_bit | yourball/qubiter | python | def find_rightmost_T_bit(self):
'\n Out of all 1 (True) bits, returns position of the rightmost one of\n those to the the right of position bpos. Returns -1 if there is no\n such bit.\n\n Returns\n -------\n int\n\n '
if self.bit_is_T(0):
return 0
else:
return self.find_T_bit_to_left_of(0) |
def get_bit_string(self):
'\n Returns self represented as string of length self.len of ones and\n zeros. If bit_str is the output, [int(x) for x in bit_str] will turn\n result to list of ints.\n\n Returns\n -------\n str\n\n '
bit_str = ''
for beta in range((self.len - 1), (- 1), (- 1)):
if self.bit_is_T(beta):
bit_str += '1'
else:
bit_str += '0'
return bit_str | -3,725,536,626,985,473,500 | Returns self represented as string of length self.len of ones and
zeros. If bit_str is the output, [int(x) for x in bit_str] will turn
result to list of ints.
Returns
-------
str | qubiter/BitVector.py | get_bit_string | yourball/qubiter | python | def get_bit_string(self):
'\n Returns self represented as string of length self.len of ones and\n zeros. If bit_str is the output, [int(x) for x in bit_str] will turn\n result to list of ints.\n\n Returns\n -------\n str\n\n '
bit_str =
for beta in range((self.len - 1), (- 1), (- 1)):
if self.bit_is_T(beta):
bit_str += '1'
else:
bit_str += '0'
return bit_str |
def __str__(self):
'\n Readable representation of self\n\n Returns\n -------\n str\n\n '
return ((self.get_bit_string() + '=') + str(self.dec_rep)) | -1,192,800,099,141,709,300 | Readable representation of self
Returns
-------
str | qubiter/BitVector.py | __str__ | yourball/qubiter | python | def __str__(self):
'\n Readable representation of self\n\n Returns\n -------\n str\n\n '
return ((self.get_bit_string() + '=') + str(self.dec_rep)) |
@staticmethod
def new_with_T_on_diff(bvec1, bvec2):
'\n Given two BitVectors bevc1 and bvec2, this return a BitVector which\n is a bitwise xor (mod 2 sum) of the bits of bvec1 and bvec2.\n\n Parameters\n ----------\n bvec1 : BitVector\n bvec2 : BitVector\n\n Returns\n -------\n BitVector\n\n '
assert (bvec1.len == bvec2.len)
return BitVector(bvec1.len, (bvec1.dec_rep ^ bvec2.dec_rep)) | 7,352,476,823,073,408,000 | Given two BitVectors bevc1 and bvec2, this return a BitVector which
is a bitwise xor (mod 2 sum) of the bits of bvec1 and bvec2.
Parameters
----------
bvec1 : BitVector
bvec2 : BitVector
Returns
-------
BitVector | qubiter/BitVector.py | new_with_T_on_diff | yourball/qubiter | python | @staticmethod
def new_with_T_on_diff(bvec1, bvec2):
'\n Given two BitVectors bevc1 and bvec2, this return a BitVector which\n is a bitwise xor (mod 2 sum) of the bits of bvec1 and bvec2.\n\n Parameters\n ----------\n bvec1 : BitVector\n bvec2 : BitVector\n\n Returns\n -------\n BitVector\n\n '
assert (bvec1.len == bvec2.len)
return BitVector(bvec1.len, (bvec1.dec_rep ^ bvec2.dec_rep)) |
@staticmethod
def get_lazy_from_normal(bit_len, normal):
'\n Throughout Qubiter, we will often refer to "Gray Code" as "lazy\n ordering". In lazy ordering with bit_len many bits, one gives a\n sequence of bit vectors of length bit_len, so that two adjacent\n items of the sequence differ by just one bit. For example 000=0,\n 100=4, 110=6, 010=2, 011=3, 111=7, 101=5, 001=1. Each element of\n this sequence represented as an int will be called lazy, and each\n int in the sequence 0, 1, 2, 3, 4, 5, 6, 7 will be called normal.\n Normal ordering is usually called dictionary ordering. Normal and\n lazy sequences both start at 0.\n\n Suppose bit_len = 3. The lazy sequence 000, 100, 110, 010, 011, 111,\n 101, 001 is easily obtained from the "standard" lazy sequence 000,\n 001, 011, 010, 110, 111, 101, 100 by "reflecting" each sequence\n term. We will use the second sequence because it is more common in\n the literature.\n\n References\n ----------\n 1. Martin Gardener, "Knotted DoughNuts and Other\n Mathematical Entertainments", chapt. 2, "The Binary Gray Code"\n 2. "Numerical Recipies in C"\n 3. Many books on Discrete Mathematics for CompSci types\n 4. On the web, in Eric\'s Treasure Trove/Math/Gray Codes\n\n Parameters\n ----------\n bit_len : int\n normal : int\n Function returns the lazy int that corresponds to this normal int.\n\n Returns\n -------\n int\n\n '
lazy_bvec = BitVector(bit_len, normal)
lazy = lazy_bvec.dec_rep
if (bit_len > 1):
for m in range((bit_len - 2), (- 1), (- 1)):
lazy ^= (((normal >> (m + 1)) & 1) << m)
return lazy | 7,674,673,722,284,487,000 | Throughout Qubiter, we will often refer to "Gray Code" as "lazy
ordering". In lazy ordering with bit_len many bits, one gives a
sequence of bit vectors of length bit_len, so that two adjacent
items of the sequence differ by just one bit. For example 000=0,
100=4, 110=6, 010=2, 011=3, 111=7, 101=5, 001=1. Each element of
this sequence represented as an int will be called lazy, and each
int in the sequence 0, 1, 2, 3, 4, 5, 6, 7 will be called normal.
Normal ordering is usually called dictionary ordering. Normal and
lazy sequences both start at 0.
Suppose bit_len = 3. The lazy sequence 000, 100, 110, 010, 011, 111,
101, 001 is easily obtained from the "standard" lazy sequence 000,
001, 011, 010, 110, 111, 101, 100 by "reflecting" each sequence
term. We will use the second sequence because it is more common in
the literature.
References
----------
1. Martin Gardener, "Knotted DoughNuts and Other
Mathematical Entertainments", chapt. 2, "The Binary Gray Code"
2. "Numerical Recipies in C"
3. Many books on Discrete Mathematics for CompSci types
4. On the web, in Eric's Treasure Trove/Math/Gray Codes
Parameters
----------
bit_len : int
normal : int
Function returns the lazy int that corresponds to this normal int.
Returns
-------
int | qubiter/BitVector.py | get_lazy_from_normal | yourball/qubiter | python | @staticmethod
def get_lazy_from_normal(bit_len, normal):
'\n Throughout Qubiter, we will often refer to "Gray Code" as "lazy\n ordering". In lazy ordering with bit_len many bits, one gives a\n sequence of bit vectors of length bit_len, so that two adjacent\n items of the sequence differ by just one bit. For example 000=0,\n 100=4, 110=6, 010=2, 011=3, 111=7, 101=5, 001=1. Each element of\n this sequence represented as an int will be called lazy, and each\n int in the sequence 0, 1, 2, 3, 4, 5, 6, 7 will be called normal.\n Normal ordering is usually called dictionary ordering. Normal and\n lazy sequences both start at 0.\n\n Suppose bit_len = 3. The lazy sequence 000, 100, 110, 010, 011, 111,\n 101, 001 is easily obtained from the "standard" lazy sequence 000,\n 001, 011, 010, 110, 111, 101, 100 by "reflecting" each sequence\n term. We will use the second sequence because it is more common in\n the literature.\n\n References\n ----------\n 1. Martin Gardener, "Knotted DoughNuts and Other\n Mathematical Entertainments", chapt. 2, "The Binary Gray Code"\n 2. "Numerical Recipies in C"\n 3. Many books on Discrete Mathematics for CompSci types\n 4. On the web, in Eric\'s Treasure Trove/Math/Gray Codes\n\n Parameters\n ----------\n bit_len : int\n normal : int\n Function returns the lazy int that corresponds to this normal int.\n\n Returns\n -------\n int\n\n '
lazy_bvec = BitVector(bit_len, normal)
lazy = lazy_bvec.dec_rep
if (bit_len > 1):
for m in range((bit_len - 2), (- 1), (- 1)):
lazy ^= (((normal >> (m + 1)) & 1) << m)
return lazy |
@staticmethod
def lazy_advance(old_normal, old_lazy):
'\n This method takes int "old_lazy" (which corresponds to bit vector\n "old_normal"), and changes it to the next lazy int, "new_lazy" (\n which corresponds to "new_normal").\n\n example:\n\n lazy sequence: 000, 001, 011, 010, 110, 111, 101, 100\n\n old_lazy = 011\n old_normal = 2 = 010\n\n new_normal = 3 = 011\n mask = (new_normal & ~old_normal) = 011 & 101 = 001\n new_lazy = new_normal ^ mask = 011 ^ 001 = 010\n\n\n Parameters\n ----------\n old_normal : int\n old_lazy : int\n\n Returns\n -------\n int, int\n\n '
new_normal = (old_normal + 1)
new_lazy = (old_lazy ^ (new_normal & (~ (new_normal - 1))))
return (new_normal, new_lazy) | 2,850,775,961,367,675,000 | This method takes int "old_lazy" (which corresponds to bit vector
"old_normal"), and changes it to the next lazy int, "new_lazy" (
which corresponds to "new_normal").
example:
lazy sequence: 000, 001, 011, 010, 110, 111, 101, 100
old_lazy = 011
old_normal = 2 = 010
new_normal = 3 = 011
mask = (new_normal & ~old_normal) = 011 & 101 = 001
new_lazy = new_normal ^ mask = 011 ^ 001 = 010
Parameters
----------
old_normal : int
old_lazy : int
Returns
-------
int, int | qubiter/BitVector.py | lazy_advance | yourball/qubiter | python | @staticmethod
def lazy_advance(old_normal, old_lazy):
'\n This method takes int "old_lazy" (which corresponds to bit vector\n "old_normal"), and changes it to the next lazy int, "new_lazy" (\n which corresponds to "new_normal").\n\n example:\n\n lazy sequence: 000, 001, 011, 010, 110, 111, 101, 100\n\n old_lazy = 011\n old_normal = 2 = 010\n\n new_normal = 3 = 011\n mask = (new_normal & ~old_normal) = 011 & 101 = 001\n new_lazy = new_normal ^ mask = 011 ^ 001 = 010\n\n\n Parameters\n ----------\n old_normal : int\n old_lazy : int\n\n Returns\n -------\n int, int\n\n '
new_normal = (old_normal + 1)
new_lazy = (old_lazy ^ (new_normal & (~ (new_normal - 1))))
return (new_normal, new_lazy) |
def __init__(self, sol_dim, max_iters, popsize, num_elites, cost_function, upper_bound=None, lower_bound=None, epsilon=0.001, alpha=0.25, viz_dir=None):
'Creates an instance of this class.\n Arguments:\n sol_dim (int): The dimensionality of the problem space\n max_iters (int): The maximum number of iterations to perform during optimization\n popsize (int): The number of candidate solutions to be sampled at every iteration\n num_elites (int): The number of top solutions that will be used to obtain the distribution\n at the next iteration.\n upper_bound (np.array): An array of upper bounds\n lower_bound (np.array): An array of lower bounds\n epsilon (float): A minimum variance. If the maximum variance drops below epsilon, optimization is\n stopped.\n alpha (float): Controls how much of the previous mean and variance is used for the next iteration.\n next_mean = alpha * old_mean + (1 - alpha) * elite_mean, and similarly for variance.\n '
super().__init__()
(self.sol_dim, self.max_iters, self.popsize, self.num_elites) = (sol_dim, max_iters, popsize, num_elites)
(self.ub, self.lb) = (upper_bound, lower_bound)
(self.epsilon, self.alpha) = (epsilon, alpha)
self.cost_function = cost_function
if (viz_dir is not None):
self.writer = tensorboardX.SummaryWriter(viz_dir)
else:
self.writer = tensorboardX.SummaryWriter()
if (num_elites > popsize):
raise ValueError('Number of elites must be at most the population size.') | -5,363,686,924,115,796,000 | Creates an instance of this class.
Arguments:
sol_dim (int): The dimensionality of the problem space
max_iters (int): The maximum number of iterations to perform during optimization
popsize (int): The number of candidate solutions to be sampled at every iteration
num_elites (int): The number of top solutions that will be used to obtain the distribution
at the next iteration.
upper_bound (np.array): An array of upper bounds
lower_bound (np.array): An array of lower bounds
epsilon (float): A minimum variance. If the maximum variance drops below epsilon, optimization is
stopped.
alpha (float): Controls how much of the previous mean and variance is used for the next iteration.
next_mean = alpha * old_mean + (1 - alpha) * elite_mean, and similarly for variance. | dr/experiment/ppo_pytorch.py | __init__ | quanvuong/domain_randomization | python | def __init__(self, sol_dim, max_iters, popsize, num_elites, cost_function, upper_bound=None, lower_bound=None, epsilon=0.001, alpha=0.25, viz_dir=None):
'Creates an instance of this class.\n Arguments:\n sol_dim (int): The dimensionality of the problem space\n max_iters (int): The maximum number of iterations to perform during optimization\n popsize (int): The number of candidate solutions to be sampled at every iteration\n num_elites (int): The number of top solutions that will be used to obtain the distribution\n at the next iteration.\n upper_bound (np.array): An array of upper bounds\n lower_bound (np.array): An array of lower bounds\n epsilon (float): A minimum variance. If the maximum variance drops below epsilon, optimization is\n stopped.\n alpha (float): Controls how much of the previous mean and variance is used for the next iteration.\n next_mean = alpha * old_mean + (1 - alpha) * elite_mean, and similarly for variance.\n '
super().__init__()
(self.sol_dim, self.max_iters, self.popsize, self.num_elites) = (sol_dim, max_iters, popsize, num_elites)
(self.ub, self.lb) = (upper_bound, lower_bound)
(self.epsilon, self.alpha) = (epsilon, alpha)
self.cost_function = cost_function
if (viz_dir is not None):
self.writer = tensorboardX.SummaryWriter(viz_dir)
else:
self.writer = tensorboardX.SummaryWriter()
if (num_elites > popsize):
raise ValueError('Number of elites must be at most the population size.') |
def obtain_solution(self, init_mean, init_var):
'Optimizes the cost function using the provided initial candidate distribution\n Arguments:\n init_mean (np.ndarray): The mean of the initial candidate distribution.\n init_var (np.ndarray): The variance of the initial candidate distribution.\n '
(mean, var, t) = (init_mean, init_var, 0)
X = stats.truncnorm((- 2), 2, loc=np.zeros_like(mean), scale=np.ones_like(var))
costs_hist = []
mean_hist = []
var_hist = []
while ((t < self.max_iters) and (np.max(var) > self.epsilon)):
(lb_dist, ub_dist) = ((mean - self.lb), (self.ub - mean))
constrained_var = np.minimum(np.minimum(np.square((lb_dist / 2)), np.square((ub_dist / 2))), var)
samples = ((X.rvs(size=[self.popsize, self.sol_dim]) * np.sqrt(constrained_var)) + mean)
samples = samples.astype(np.float32)
costs = self.cost_function(samples, t)
elites = samples[np.argsort(costs)][:self.num_elites]
new_mean = np.mean(elites, axis=0)
new_var = np.var(elites, axis=0)
mean = ((self.alpha * mean) + ((1 - self.alpha) * new_mean))
var = ((self.alpha * var) + ((1 - self.alpha) * new_var))
for (i, m) in enumerate(mean):
self.writer.add_scalar(f'mean/{i}', m, t)
for (i, m) in enumerate(var):
self.writer.add_scalar(f'var/{i}', m, t)
self.writer.add_scalar('costs', np.min(costs), t)
t += 1
costs_hist.append(costs)
mean_hist.append(mean)
var_hist.append(var)
self.writer.close()
return dict(mean_hist=mean_hist, costs_hist=costs_hist, var_hist=var_hist) | -1,709,952,683,294,620,700 | Optimizes the cost function using the provided initial candidate distribution
Arguments:
init_mean (np.ndarray): The mean of the initial candidate distribution.
init_var (np.ndarray): The variance of the initial candidate distribution. | dr/experiment/ppo_pytorch.py | obtain_solution | quanvuong/domain_randomization | python | def obtain_solution(self, init_mean, init_var):
'Optimizes the cost function using the provided initial candidate distribution\n Arguments:\n init_mean (np.ndarray): The mean of the initial candidate distribution.\n init_var (np.ndarray): The variance of the initial candidate distribution.\n '
(mean, var, t) = (init_mean, init_var, 0)
X = stats.truncnorm((- 2), 2, loc=np.zeros_like(mean), scale=np.ones_like(var))
costs_hist = []
mean_hist = []
var_hist = []
while ((t < self.max_iters) and (np.max(var) > self.epsilon)):
(lb_dist, ub_dist) = ((mean - self.lb), (self.ub - mean))
constrained_var = np.minimum(np.minimum(np.square((lb_dist / 2)), np.square((ub_dist / 2))), var)
samples = ((X.rvs(size=[self.popsize, self.sol_dim]) * np.sqrt(constrained_var)) + mean)
samples = samples.astype(np.float32)
costs = self.cost_function(samples, t)
elites = samples[np.argsort(costs)][:self.num_elites]
new_mean = np.mean(elites, axis=0)
new_var = np.var(elites, axis=0)
mean = ((self.alpha * mean) + ((1 - self.alpha) * new_mean))
var = ((self.alpha * var) + ((1 - self.alpha) * new_var))
for (i, m) in enumerate(mean):
self.writer.add_scalar(f'mean/{i}', m, t)
for (i, m) in enumerate(var):
self.writer.add_scalar(f'var/{i}', m, t)
self.writer.add_scalar('costs', np.min(costs), t)
t += 1
costs_hist.append(costs)
mean_hist.append(mean)
var_hist.append(var)
self.writer.close()
return dict(mean_hist=mean_hist, costs_hist=costs_hist, var_hist=var_hist) |
async def async_lock(self, access_token, lock_id):
'Execute a remote lock operation.\n\n Returns a LockStatus state.\n '
return determine_lock_status((await self._async_lock(access_token, lock_id)).get('status')) | 3,962,434,966,695,672,300 | Execute a remote lock operation.
Returns a LockStatus state. | august/api_async.py | async_lock | THATDONFC/py-august | python | async def async_lock(self, access_token, lock_id):
'Execute a remote lock operation.\n\n Returns a LockStatus state.\n '
return determine_lock_status((await self._async_lock(access_token, lock_id)).get('status')) |
async def async_lock_return_activities(self, access_token, lock_id):
'Execute a remote lock operation.\n\n Returns an array of one or more august.activity.Activity objects\n\n If the lock supports door sense one of the activities\n will include the current door state.\n '
return _convert_lock_result_to_activities((await self._async_lock(access_token, lock_id))) | -20,103,754,425,368,150 | Execute a remote lock operation.
Returns an array of one or more august.activity.Activity objects
If the lock supports door sense one of the activities
will include the current door state. | august/api_async.py | async_lock_return_activities | THATDONFC/py-august | python | async def async_lock_return_activities(self, access_token, lock_id):
'Execute a remote lock operation.\n\n Returns an array of one or more august.activity.Activity objects\n\n If the lock supports door sense one of the activities\n will include the current door state.\n '
return _convert_lock_result_to_activities((await self._async_lock(access_token, lock_id))) |
async def async_unlock(self, access_token, lock_id):
'Execute a remote unlock operation.\n\n Returns a LockStatus state.\n '
return determine_lock_status((await self._async_unlock(access_token, lock_id)).get('status')) | 1,674,035,768,962,010,600 | Execute a remote unlock operation.
Returns a LockStatus state. | august/api_async.py | async_unlock | THATDONFC/py-august | python | async def async_unlock(self, access_token, lock_id):
'Execute a remote unlock operation.\n\n Returns a LockStatus state.\n '
return determine_lock_status((await self._async_unlock(access_token, lock_id)).get('status')) |
async def async_unlock_return_activities(self, access_token, lock_id):
'Execute a remote lock operation.\n\n Returns an array of one or more august.activity.Activity objects\n\n If the lock supports door sense one of the activities\n will include the current door state.\n '
return _convert_lock_result_to_activities((await self._async_unlock(access_token, lock_id))) | 2,709,392,198,846,596,600 | Execute a remote lock operation.
Returns an array of one or more august.activity.Activity objects
If the lock supports door sense one of the activities
will include the current door state. | august/api_async.py | async_unlock_return_activities | THATDONFC/py-august | python | async def async_unlock_return_activities(self, access_token, lock_id):
'Execute a remote lock operation.\n\n Returns an array of one or more august.activity.Activity objects\n\n If the lock supports door sense one of the activities\n will include the current door state.\n '
return _convert_lock_result_to_activities((await self._async_unlock(access_token, lock_id))) |
async def async_refresh_access_token(self, access_token):
'Obtain a new api token.'
return (await self._async_dict_to_api(self._build_refresh_access_token_request(access_token))).headers[HEADER_AUGUST_ACCESS_TOKEN] | 7,545,354,170,499,416,000 | Obtain a new api token. | august/api_async.py | async_refresh_access_token | THATDONFC/py-august | python | async def async_refresh_access_token(self, access_token):
return (await self._async_dict_to_api(self._build_refresh_access_token_request(access_token))).headers[HEADER_AUGUST_ACCESS_TOKEN] |
def __init__(self, sizes=list(), learning_rate=1.0, mini_batch_size=16, epochs=10):
'Initialize a Neural Network model.\n\n Parameters\n ----------\n sizes : list, optional\n A list of integers specifying number of neurns in each layer. Not\n required if a pretrained model is used.\n\n learning_rate : float, optional\n Learning rate for gradient descent optimization. Defaults to 1.0\n\n mini_batch_size : int, optional\n Size of each mini batch of training examples as used by Stochastic\n Gradient Descent. Denotes after how many examples the weights\n and biases would be updated. Default size is 16.\n\n '
self.sizes = sizes
self.num_layers = len(sizes)
self.weights = ([np.array([0])] + [np.random.randn(y, x) for (y, x) in zip(sizes[1:], sizes[:(- 1)])])
self.biases = [np.random.randn(y, 1) for y in sizes]
self._zs = [np.zeros(bias.shape) for bias in self.biases]
self._activations = [np.zeros(bias.shape) for bias in self.biases]
self.mini_batch_size = mini_batch_size
self.epochs = epochs
self.eta = learning_rate | 6,273,857,992,573,708,000 | Initialize a Neural Network model.
Parameters
----------
sizes : list, optional
A list of integers specifying number of neurns in each layer. Not
required if a pretrained model is used.
learning_rate : float, optional
Learning rate for gradient descent optimization. Defaults to 1.0
mini_batch_size : int, optional
Size of each mini batch of training examples as used by Stochastic
Gradient Descent. Denotes after how many examples the weights
and biases would be updated. Default size is 16. | src/nn_model.py | __init__ | fredwangwang/webcam-sudoku-solver | python | def __init__(self, sizes=list(), learning_rate=1.0, mini_batch_size=16, epochs=10):
'Initialize a Neural Network model.\n\n Parameters\n ----------\n sizes : list, optional\n A list of integers specifying number of neurns in each layer. Not\n required if a pretrained model is used.\n\n learning_rate : float, optional\n Learning rate for gradient descent optimization. Defaults to 1.0\n\n mini_batch_size : int, optional\n Size of each mini batch of training examples as used by Stochastic\n Gradient Descent. Denotes after how many examples the weights\n and biases would be updated. Default size is 16.\n\n '
self.sizes = sizes
self.num_layers = len(sizes)
self.weights = ([np.array([0])] + [np.random.randn(y, x) for (y, x) in zip(sizes[1:], sizes[:(- 1)])])
self.biases = [np.random.randn(y, 1) for y in sizes]
self._zs = [np.zeros(bias.shape) for bias in self.biases]
self._activations = [np.zeros(bias.shape) for bias in self.biases]
self.mini_batch_size = mini_batch_size
self.epochs = epochs
self.eta = learning_rate |
def fit(self, training_data, validation_data=None):
'Fit (train) the Neural Network on provided training data. Fitting is\n carried out using Stochastic Gradient Descent Algorithm.\n\n Parameters\n ----------\n training_data : list of tuple\n A list of tuples of numpy arrays, ordered as (image, label).\n\n validation_data : list of tuple, optional\n Same as `training_data`, if provided, the network will display\n validation accuracy after each epoch.\n\n '
for epoch in range(self.epochs):
random.shuffle(training_data)
mini_batches = [training_data[k:(k + self.mini_batch_size)] for k in range(0, len(training_data), self.mini_batch_size)]
for mini_batch in mini_batches:
nabla_b = [np.zeros(bias.shape) for bias in self.biases]
nabla_w = [np.zeros(weight.shape) for weight in self.weights]
for (x, y) in mini_batch:
self._forward_prop(x)
(delta_nabla_b, delta_nabla_w) = self._back_prop(x, y)
nabla_b = [(nb + dnb) for (nb, dnb) in zip(nabla_b, delta_nabla_b)]
nabla_w = [(nw + dnw) for (nw, dnw) in zip(nabla_w, delta_nabla_w)]
self.weights = [(w - ((self.eta / self.mini_batch_size) * dw)) for (w, dw) in zip(self.weights, nabla_w)]
self.biases = [(b - ((self.eta / self.mini_batch_size) * db)) for (b, db) in zip(self.biases, nabla_b)]
if validation_data:
accuracy = (self.validate(validation_data) / 100.0)
print('Epoch {0}, accuracy {1} %.'.format((epoch + 1), accuracy))
else:
print('Processed epoch {0}.'.format(epoch)) | 897,726,686,871,590,900 | Fit (train) the Neural Network on provided training data. Fitting is
carried out using Stochastic Gradient Descent Algorithm.
Parameters
----------
training_data : list of tuple
A list of tuples of numpy arrays, ordered as (image, label).
validation_data : list of tuple, optional
Same as `training_data`, if provided, the network will display
validation accuracy after each epoch. | src/nn_model.py | fit | fredwangwang/webcam-sudoku-solver | python | def fit(self, training_data, validation_data=None):
'Fit (train) the Neural Network on provided training data. Fitting is\n carried out using Stochastic Gradient Descent Algorithm.\n\n Parameters\n ----------\n training_data : list of tuple\n A list of tuples of numpy arrays, ordered as (image, label).\n\n validation_data : list of tuple, optional\n Same as `training_data`, if provided, the network will display\n validation accuracy after each epoch.\n\n '
for epoch in range(self.epochs):
random.shuffle(training_data)
mini_batches = [training_data[k:(k + self.mini_batch_size)] for k in range(0, len(training_data), self.mini_batch_size)]
for mini_batch in mini_batches:
nabla_b = [np.zeros(bias.shape) for bias in self.biases]
nabla_w = [np.zeros(weight.shape) for weight in self.weights]
for (x, y) in mini_batch:
self._forward_prop(x)
(delta_nabla_b, delta_nabla_w) = self._back_prop(x, y)
nabla_b = [(nb + dnb) for (nb, dnb) in zip(nabla_b, delta_nabla_b)]
nabla_w = [(nw + dnw) for (nw, dnw) in zip(nabla_w, delta_nabla_w)]
self.weights = [(w - ((self.eta / self.mini_batch_size) * dw)) for (w, dw) in zip(self.weights, nabla_w)]
self.biases = [(b - ((self.eta / self.mini_batch_size) * db)) for (b, db) in zip(self.biases, nabla_b)]
if validation_data:
accuracy = (self.validate(validation_data) / 100.0)
print('Epoch {0}, accuracy {1} %.'.format((epoch + 1), accuracy))
else:
print('Processed epoch {0}.'.format(epoch)) |
def validate(self, validation_data):
'Validate the Neural Network on provided validation data. It uses the\n number of correctly predicted examples as validation accuracy metric.\n\n Parameters\n ----------\n validation_data : list of tuple\n\n Returns\n -------\n int\n Number of correctly predicted images.\n\n '
validation_results = [(self.predict(x) == y) for (x, y) in validation_data]
return sum((result for result in validation_results)) | 791,219,755,680,909,700 | Validate the Neural Network on provided validation data. It uses the
number of correctly predicted examples as validation accuracy metric.
Parameters
----------
validation_data : list of tuple
Returns
-------
int
Number of correctly predicted images. | src/nn_model.py | validate | fredwangwang/webcam-sudoku-solver | python | def validate(self, validation_data):
'Validate the Neural Network on provided validation data. It uses the\n number of correctly predicted examples as validation accuracy metric.\n\n Parameters\n ----------\n validation_data : list of tuple\n\n Returns\n -------\n int\n Number of correctly predicted images.\n\n '
validation_results = [(self.predict(x) == y) for (x, y) in validation_data]
return sum((result for result in validation_results)) |
def predict(self, x):
'Predict the label of a single test example (image).\n\n Parameters\n ----------\n x : numpy.array\n\n Returns\n -------\n int\n Predicted label of example (image).\n\n '
self._forward_prop(x)
return np.argmax(self._activations[(- 1)]) | 112,982,234,359,509,470 | Predict the label of a single test example (image).
Parameters
----------
x : numpy.array
Returns
-------
int
Predicted label of example (image). | src/nn_model.py | predict | fredwangwang/webcam-sudoku-solver | python | def predict(self, x):
'Predict the label of a single test example (image).\n\n Parameters\n ----------\n x : numpy.array\n\n Returns\n -------\n int\n Predicted label of example (image).\n\n '
self._forward_prop(x)
return np.argmax(self._activations[(- 1)]) |
def load(self, filename='model.npz'):
'Prepare a neural network from a compressed binary containing weights\n and biases arrays. Size of layers are derived from dimensions of\n numpy arrays.\n\n Parameters\n ----------\n filename : str, optional\n Name of the ``.npz`` compressed binary in models directory.\n\n '
npz_members = np.load(os.path.join(os.curdir, 'models', filename))
self.weights = list(npz_members['weights'])
self.biases = list(npz_members['biases'])
self.sizes = [b.shape[0] for b in self.biases]
self.num_layers = len(self.sizes)
self._zs = [np.zeros(bias.shape) for bias in self.biases]
self._activations = [np.zeros(bias.shape) for bias in self.biases]
self.mini_batch_size = int(npz_members['mini_batch_size'])
self.epochs = int(npz_members['epochs'])
self.eta = float(npz_members['eta']) | -6,871,594,500,702,750,000 | Prepare a neural network from a compressed binary containing weights
and biases arrays. Size of layers are derived from dimensions of
numpy arrays.
Parameters
----------
filename : str, optional
Name of the ``.npz`` compressed binary in models directory. | src/nn_model.py | load | fredwangwang/webcam-sudoku-solver | python | def load(self, filename='model.npz'):
'Prepare a neural network from a compressed binary containing weights\n and biases arrays. Size of layers are derived from dimensions of\n numpy arrays.\n\n Parameters\n ----------\n filename : str, optional\n Name of the ``.npz`` compressed binary in models directory.\n\n '
npz_members = np.load(os.path.join(os.curdir, 'models', filename))
self.weights = list(npz_members['weights'])
self.biases = list(npz_members['biases'])
self.sizes = [b.shape[0] for b in self.biases]
self.num_layers = len(self.sizes)
self._zs = [np.zeros(bias.shape) for bias in self.biases]
self._activations = [np.zeros(bias.shape) for bias in self.biases]
self.mini_batch_size = int(npz_members['mini_batch_size'])
self.epochs = int(npz_members['epochs'])
self.eta = float(npz_members['eta']) |
def save(self, filename='model.npz'):
"Save weights, biases and hyperparameters of neural network to a\n compressed binary. This ``.npz`` binary is saved in 'models' directory.\n\n Parameters\n ----------\n filename : str, optional\n Name of the ``.npz`` compressed binary in to be saved.\n\n "
np.savez_compressed(file=os.path.join(os.curdir, 'models', filename), weights=self.weights, biases=self.biases, mini_batch_size=self.mini_batch_size, epochs=self.epochs, eta=self.eta) | 5,541,264,440,548,084,000 | Save weights, biases and hyperparameters of neural network to a
compressed binary. This ``.npz`` binary is saved in 'models' directory.
Parameters
----------
filename : str, optional
Name of the ``.npz`` compressed binary in to be saved. | src/nn_model.py | save | fredwangwang/webcam-sudoku-solver | python | def save(self, filename='model.npz'):
"Save weights, biases and hyperparameters of neural network to a\n compressed binary. This ``.npz`` binary is saved in 'models' directory.\n\n Parameters\n ----------\n filename : str, optional\n Name of the ``.npz`` compressed binary in to be saved.\n\n "
np.savez_compressed(file=os.path.join(os.curdir, 'models', filename), weights=self.weights, biases=self.biases, mini_batch_size=self.mini_batch_size, epochs=self.epochs, eta=self.eta) |
def reset(self):
'\n\t\tReset current position to beginning.\n\t\t'
self.arm.reset()
self.current = self.arm.end_effector_position() | -1,015,991,858,288,929,900 | Reset current position to beginning. | environments/robot_arm/robot_arm.py | reset | callaunchpad/MOR | python | def reset(self):
'\n\t\t\n\t\t'
self.arm.reset()
self.current = self.arm.end_effector_position() |
def act(self, location, population, params, master):
'\n\t\tMove end effector to the given location\n\t\t'
valid = True
past = self.current
self.current = location
if (((population % self.config['record_iterations']) == 0) and master):
print('Recording')
try:
self.arm.ikine(location)
timestamp = datetime.now().strftime('%m-%d-%Y_%H-%M-%S')
training_path = (self.training_directory + '/records/')
try:
os.makedirs(training_path)
except OSError as e:
if (e.errno != errno.EEXIST):
raise
record_path = (((training_path + 'pop_') + str(population)) + '.npy')
video_path = (((training_path + 'pop_') + str(population)) + '.mp4')
self.arm.save_path(record_path)
self.env.animate(duration=5.0, save_path=video_path)
np.save(((training_path + 'net_') + str(population)), params)
except ValueError as e:
valid = False
logging.warn('Could not solve IK for position: {}'.format(location[0]))
return valid | -2,383,995,251,774,368,300 | Move end effector to the given location | environments/robot_arm/robot_arm.py | act | callaunchpad/MOR | python | def act(self, location, population, params, master):
'\n\t\t\n\t\t'
valid = True
past = self.current
self.current = location
if (((population % self.config['record_iterations']) == 0) and master):
print('Recording')
try:
self.arm.ikine(location)
timestamp = datetime.now().strftime('%m-%d-%Y_%H-%M-%S')
training_path = (self.training_directory + '/records/')
try:
os.makedirs(training_path)
except OSError as e:
if (e.errno != errno.EEXIST):
raise
record_path = (((training_path + 'pop_') + str(population)) + '.npy')
video_path = (((training_path + 'pop_') + str(population)) + '.mp4')
self.arm.save_path(record_path)
self.env.animate(duration=5.0, save_path=video_path)
np.save(((training_path + 'net_') + str(population)), params)
except ValueError as e:
valid = False
logging.warn('Could not solve IK for position: {}'.format(location[0]))
return valid |
def inputs(self, t):
'\n\t\tReturn the inputs for the neural network\n\t\t'
inputs = [self.current[0], self.current[1], self.current[2], self.target[0], self.target[1], self.target[2], (t + 1)]
return inputs | -5,793,087,367,901,682,000 | Return the inputs for the neural network | environments/robot_arm/robot_arm.py | inputs | callaunchpad/MOR | python | def inputs(self, t):
'\n\t\t\n\t\t'
inputs = [self.current[0], self.current[1], self.current[2], self.target[0], self.target[1], self.target[2], (t + 1)]
return inputs |
def reward_params(self, valid):
'\n\t\tReturn the parameters for the proposed reward function\n\t\t'
params = (self.current, self.target)
return params | 692,410,265,936,325,900 | Return the parameters for the proposed reward function | environments/robot_arm/robot_arm.py | reward_params | callaunchpad/MOR | python | def reward_params(self, valid):
'\n\t\t\n\t\t'
params = (self.current, self.target)
return params |
def pre_processing(self):
'\n\t\tComplete any pending post processing tasks\n\t\t'
pass | -7,139,161,499,287,091,000 | Complete any pending post processing tasks | environments/robot_arm/robot_arm.py | pre_processing | callaunchpad/MOR | python | def pre_processing(self):
'\n\t\t\n\t\t'
pass |
def post_processing(self):
'\n\t\tComplete any pending post processing tasks\n\t\t'
pass | -1,660,335,344,934,753,500 | Complete any pending post processing tasks | environments/robot_arm/robot_arm.py | post_processing | callaunchpad/MOR | python | def post_processing(self):
'\n\t\t\n\t\t'
pass |
def cluster(df: pd.DataFrame, header_prefix: List[str]=None, maxsize: int=20, method: str='single', numeric_cat: List[str]=None, plot=False) -> List[List[str]]:
'\n Given an input dataframe, extract clusters of similar headers\n based on a set of heuristics.\n\n Args:\n df: The dataframe to cluster headers from.\n header_prefix: List of columns to remove before cluster generation.\n maxsize: The max number of header clusters to generate\n from the input dataframe.\n method: Linkage method used to compute header cluster\n distances. For more information please refer to the scipy\n docs, https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html#scipy-cluster-hierarchy-linkage.\n numeric_cat: A list of fields to define as categorical. The header\n clustering code will automatically define pandas "object" and\n "category" columns as categorical. The ``numeric_cat`` parameter\n may be used to define additional categorical fields that may\n not automatically get identified as such.\n plot: Plot header list as a dendogram.\n '
def prepare_response(col_list: List[List[str]], prefix: List[str]=None) -> List[List[str]]:
if (prefix is not None):
col_list[0] = (prefix + col_list[0])
return col_list
if (numeric_cat is None):
numeric_cat = []
if (header_prefix is not None):
try:
df = df.drop(header_prefix, axis=1)
except KeyError as err:
raise ValueError('Header prefixes do not all exist in source DF') from err
if (df.shape[1] == 1):
return prepare_response([list(df.columns)], header_prefix)
corr_matrix = _get_correlation_matrix(df, numeric_cat)
X = (1 - np.array((1 - abs(corr_matrix))))
L = sch.linkage(X, method=method)
Lopt = sch.optimal_leaf_ordering(L, X)
columns = df.columns
start = (len(Lopt) - 1)
clusters = _traverse_node(Lopt, start, maxsize, len(columns))
col_list = _merge_clusters(clusters, maxsize, columns, Lopt, plot)
return prepare_response(col_list, header_prefix) | 4,085,142,219,848,973,300 | Given an input dataframe, extract clusters of similar headers
based on a set of heuristics.
Args:
df: The dataframe to cluster headers from.
header_prefix: List of columns to remove before cluster generation.
maxsize: The max number of header clusters to generate
from the input dataframe.
method: Linkage method used to compute header cluster
distances. For more information please refer to the scipy
docs, https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html#scipy-cluster-hierarchy-linkage.
numeric_cat: A list of fields to define as categorical. The header
clustering code will automatically define pandas "object" and
"category" columns as categorical. The ``numeric_cat`` parameter
may be used to define additional categorical fields that may
not automatically get identified as such.
plot: Plot header list as a dendogram. | src/gretel_synthetics/utils/header_clusters.py | cluster | andrewnc/gretel-synthetics | python | def cluster(df: pd.DataFrame, header_prefix: List[str]=None, maxsize: int=20, method: str='single', numeric_cat: List[str]=None, plot=False) -> List[List[str]]:
'\n Given an input dataframe, extract clusters of similar headers\n based on a set of heuristics.\n\n Args:\n df: The dataframe to cluster headers from.\n header_prefix: List of columns to remove before cluster generation.\n maxsize: The max number of header clusters to generate\n from the input dataframe.\n method: Linkage method used to compute header cluster\n distances. For more information please refer to the scipy\n docs, https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html#scipy-cluster-hierarchy-linkage.\n numeric_cat: A list of fields to define as categorical. The header\n clustering code will automatically define pandas "object" and\n "category" columns as categorical. The ``numeric_cat`` parameter\n may be used to define additional categorical fields that may\n not automatically get identified as such.\n plot: Plot header list as a dendogram.\n '
def prepare_response(col_list: List[List[str]], prefix: List[str]=None) -> List[List[str]]:
if (prefix is not None):
col_list[0] = (prefix + col_list[0])
return col_list
if (numeric_cat is None):
numeric_cat = []
if (header_prefix is not None):
try:
df = df.drop(header_prefix, axis=1)
except KeyError as err:
raise ValueError('Header prefixes do not all exist in source DF') from err
if (df.shape[1] == 1):
return prepare_response([list(df.columns)], header_prefix)
corr_matrix = _get_correlation_matrix(df, numeric_cat)
X = (1 - np.array((1 - abs(corr_matrix))))
L = sch.linkage(X, method=method)
Lopt = sch.optimal_leaf_ordering(L, X)
columns = df.columns
start = (len(Lopt) - 1)
clusters = _traverse_node(Lopt, start, maxsize, len(columns))
col_list = _merge_clusters(clusters, maxsize, columns, Lopt, plot)
return prepare_response(col_list, header_prefix) |
def __init__(self, more_items_remaining=None, total_item_count=None, continuation_token=None, items=None):
'\n Keyword args:\n more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.\n total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.\n continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).\n items (list[PolicyRuleSmbClient]): Returns a list of all items after filtering. The values are displayed for each name where meaningful.\n '
if (more_items_remaining is not None):
self.more_items_remaining = more_items_remaining
if (total_item_count is not None):
self.total_item_count = total_item_count
if (continuation_token is not None):
self.continuation_token = continuation_token
if (items is not None):
self.items = items | -7,882,988,585,856,706,000 | Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[PolicyRuleSmbClient]): Returns a list of all items after filtering. The values are displayed for each name where meaningful. | pypureclient/flasharray/FA_2_5/models/policy_rule_smb_client_get_response.py | __init__ | Flav-STOR-WL/py-pure-client | python | def __init__(self, more_items_remaining=None, total_item_count=None, continuation_token=None, items=None):
'\n Keyword args:\n more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.\n total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.\n continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).\n items (list[PolicyRuleSmbClient]): Returns a list of all items after filtering. The values are displayed for each name where meaningful.\n '
if (more_items_remaining is not None):
self.more_items_remaining = more_items_remaining
if (total_item_count is not None):
self.total_item_count = total_item_count
if (continuation_token is not None):
self.continuation_token = continuation_token
if (items is not None):
self.items = items |
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(PolicyRuleSmbClientGetResponse, dict):
for (key, value) in self.items():
result[key] = value
return result | 8,830,407,163,213,029,000 | Returns the model properties as a dict | pypureclient/flasharray/FA_2_5/models/policy_rule_smb_client_get_response.py | to_dict | Flav-STOR-WL/py-pure-client | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(PolicyRuleSmbClientGetResponse, dict):
for (key, value) in self.items():
result[key] = value
return result |
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | 5,849,158,643,760,736,000 | Returns the string representation of the model | pypureclient/flasharray/FA_2_5/models/policy_rule_smb_client_get_response.py | to_str | Flav-STOR-WL/py-pure-client | python | def to_str(self):
return pprint.pformat(self.to_dict()) |
def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | -8,960,031,694,814,905,000 | For `print` and `pprint` | pypureclient/flasharray/FA_2_5/models/policy_rule_smb_client_get_response.py | __repr__ | Flav-STOR-WL/py-pure-client | python | def __repr__(self):
return self.to_str() |
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, PolicyRuleSmbClientGetResponse)):
return False
return (self.__dict__ == other.__dict__) | 7,993,875,800,545,782,000 | Returns true if both objects are equal | pypureclient/flasharray/FA_2_5/models/policy_rule_smb_client_get_response.py | __eq__ | Flav-STOR-WL/py-pure-client | python | def __eq__(self, other):
if (not isinstance(other, PolicyRuleSmbClientGetResponse)):
return False
return (self.__dict__ == other.__dict__) |
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other)) | 7,764,124,047,908,058,000 | Returns true if both objects are not equal | pypureclient/flasharray/FA_2_5/models/policy_rule_smb_client_get_response.py | __ne__ | Flav-STOR-WL/py-pure-client | python | def __ne__(self, other):
return (not (self == other)) |
def combine_all_subsets(subsets):
"Merges N subsets into one. Strips top level 'name' and 'fields' keys as well as non-ECS field options since we can't know how to merge those."
merged_subset = {}
for subset in subsets:
strip_non_ecs_options(subset['fields'])
merge_subsets(merged_subset, subset['fields'])
return merged_subset | -5,836,347,463,631,856,000 | Merges N subsets into one. Strips top level 'name' and 'fields' keys as well as non-ECS field options since we can't know how to merge those. | scripts/schema/subset_filter.py | combine_all_subsets | 6un9-h0-Dan/ecs | python | def combine_all_subsets(subsets):
merged_subset = {}
for subset in subsets:
strip_non_ecs_options(subset['fields'])
merge_subsets(merged_subset, subset['fields'])
return merged_subset |
def eval_globs(globs):
'Accepts an array of glob patterns or file names, returns the array of actual files'
all_files = []
for g in globs:
new_files = glob.glob(g)
if (len(new_files) == 0):
warn('{} did not match any files'.format(g))
else:
all_files.extend(new_files)
return all_files | -7,410,213,171,083,965,000 | Accepts an array of glob patterns or file names, returns the array of actual files | scripts/schema/subset_filter.py | eval_globs | 6un9-h0-Dan/ecs | python | def eval_globs(globs):
all_files = []
for g in globs:
new_files = glob.glob(g)
if (len(new_files) == 0):
warn('{} did not match any files'.format(g))
else:
all_files.extend(new_files)
return all_files |