body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
@property
def available(self) -> bool:
'Return if bulb is available.'
return self._available | -8,593,444,446,007,529,000 | Return if bulb is available. | homeassistant/components/light/yeelight.py | available | DevRGT/home-assistant | python | @property
def available(self) -> bool:
return self._available |
@property
def supported_features(self) -> int:
'Flag supported features.'
return self._supported_features | 8,102,951,252,997,921,000 | Flag supported features. | homeassistant/components/light/yeelight.py | supported_features | DevRGT/home-assistant | python | @property
def supported_features(self) -> int:
return self._supported_features |
@property
def effect_list(self):
'Return the list of supported effects.'
return YEELIGHT_EFFECT_LIST | -6,125,693,931,358,221,000 | Return the list of supported effects. | homeassistant/components/light/yeelight.py | effect_list | DevRGT/home-assistant | python | @property
def effect_list(self):
return YEELIGHT_EFFECT_LIST |
@property
def color_temp(self) -> int:
'Return the color temperature.'
return self._color_temp | 9,105,838,033,052,904,000 | Return the color temperature. | homeassistant/components/light/yeelight.py | color_temp | DevRGT/home-assistant | python | @property
def color_temp(self) -> int:
return self._color_temp |
@property
def name(self) -> str:
'Return the name of the device if any.'
return self._name | -7,564,036,760,381,367,000 | Return the name of the device if any. | homeassistant/components/light/yeelight.py | name | DevRGT/home-assistant | python | @property
def name(self) -> str:
return self._name |
@property
def is_on(self) -> bool:
'Return true if device is on.'
return self._is_on | 2,519,804,288,039,148,000 | Return true if device is on. | homeassistant/components/light/yeelight.py | is_on | DevRGT/home-assistant | python | @property
def is_on(self) -> bool:
return self._is_on |
@property
def brightness(self) -> int:
'Return the brightness of this light between 1..255.'
return self._brightness | -1,115,853,844,080,985,100 | Return the brightness of this light between 1..255. | homeassistant/components/light/yeelight.py | brightness | DevRGT/home-assistant | python | @property
def brightness(self) -> int:
return self._brightness |
@property
def min_mireds(self):
'Return minimum supported color temperature.'
if (self.supported_features & SUPPORT_COLOR_TEMP):
return kelvin_to_mired(YEELIGHT_RGB_MAX_KELVIN)
return kelvin_to_mired(YEELIGHT_MAX_KELVIN) | 4,766,083,804,337,532,000 | Return minimum supported color temperature. | homeassistant/components/light/yeelight.py | min_mireds | DevRGT/home-assistant | python | @property
def min_mireds(self):
if (self.supported_features & SUPPORT_COLOR_TEMP):
return kelvin_to_mired(YEELIGHT_RGB_MAX_KELVIN)
return kelvin_to_mired(YEELIGHT_MAX_KELVIN) |
@property
def max_mireds(self):
'Return maximum supported color temperature.'
if (self.supported_features & SUPPORT_COLOR_TEMP):
return kelvin_to_mired(YEELIGHT_RGB_MIN_KELVIN)
return kelvin_to_mired(YEELIGHT_MIN_KELVIN) | 7,928,850,946,347,256,000 | Return maximum supported color temperature. | homeassistant/components/light/yeelight.py | max_mireds | DevRGT/home-assistant | python | @property
def max_mireds(self):
if (self.supported_features & SUPPORT_COLOR_TEMP):
return kelvin_to_mired(YEELIGHT_RGB_MIN_KELVIN)
return kelvin_to_mired(YEELIGHT_MIN_KELVIN) |
@property
def hs_color(self) -> tuple:
'Return the color property.'
return self._hs | 6,843,634,616,928,289,000 | Return the color property. | homeassistant/components/light/yeelight.py | hs_color | DevRGT/home-assistant | python | @property
def hs_color(self) -> tuple:
return self._hs |
def set_music_mode(self, mode) -> None:
'Set the music mode on or off.'
if mode:
self._bulb.start_music()
else:
self._bulb.stop_music() | 5,503,438,018,378,298,000 | Set the music mode on or off. | homeassistant/components/light/yeelight.py | set_music_mode | DevRGT/home-assistant | python | def set_music_mode(self, mode) -> None:
if mode:
self._bulb.start_music()
else:
self._bulb.stop_music() |
def update(self) -> None:
'Update properties from the bulb.'
import yeelight
try:
self._bulb.get_properties()
if (self._bulb_device.bulb_type == yeelight.BulbType.Color):
self._supported_features = SUPPORT_YEELIGHT_RGB
self._is_on = (self._properties.get('power') == 'on')
bright = self._properties.get('bright', None)
if bright:
self._brightness = round((255 * (int(bright) / 100)))
temp_in_k = self._properties.get('ct', None)
if temp_in_k:
self._color_temp = kelvin_to_mired(int(temp_in_k))
self._hs = self._get_hs_from_properties()
self._available = True
except yeelight.BulbException as ex:
if self._available:
_LOGGER.error('Unable to update bulb status: %s', ex)
self._available = False | 6,698,611,751,465,550,000 | Update properties from the bulb. | homeassistant/components/light/yeelight.py | update | DevRGT/home-assistant | python | def update(self) -> None:
import yeelight
try:
self._bulb.get_properties()
if (self._bulb_device.bulb_type == yeelight.BulbType.Color):
self._supported_features = SUPPORT_YEELIGHT_RGB
self._is_on = (self._properties.get('power') == 'on')
bright = self._properties.get('bright', None)
if bright:
self._brightness = round((255 * (int(bright) / 100)))
temp_in_k = self._properties.get('ct', None)
if temp_in_k:
self._color_temp = kelvin_to_mired(int(temp_in_k))
self._hs = self._get_hs_from_properties()
self._available = True
except yeelight.BulbException as ex:
if self._available:
_LOGGER.error('Unable to update bulb status: %s', ex)
self._available = False |
@_cmd
def set_brightness(self, brightness, duration) -> None:
'Set bulb brightness.'
if brightness:
_LOGGER.debug('Setting brightness: %s', brightness)
self._bulb.set_brightness(((brightness / 255) * 100), duration=duration) | 8,159,060,235,782,080,000 | Set bulb brightness. | homeassistant/components/light/yeelight.py | set_brightness | DevRGT/home-assistant | python | @_cmd
def set_brightness(self, brightness, duration) -> None:
if brightness:
_LOGGER.debug('Setting brightness: %s', brightness)
self._bulb.set_brightness(((brightness / 255) * 100), duration=duration) |
@_cmd
def set_rgb(self, rgb, duration) -> None:
"Set bulb's color."
if (rgb and (self.supported_features & SUPPORT_COLOR)):
_LOGGER.debug('Setting RGB: %s', rgb)
self._bulb.set_rgb(rgb[0], rgb[1], rgb[2], duration=duration) | -9,165,789,828,462,820,000 | Set bulb's color. | homeassistant/components/light/yeelight.py | set_rgb | DevRGT/home-assistant | python | @_cmd
def set_rgb(self, rgb, duration) -> None:
if (rgb and (self.supported_features & SUPPORT_COLOR)):
_LOGGER.debug('Setting RGB: %s', rgb)
self._bulb.set_rgb(rgb[0], rgb[1], rgb[2], duration=duration) |
@_cmd
def set_colortemp(self, colortemp, duration) -> None:
"Set bulb's color temperature."
if (colortemp and (self.supported_features & SUPPORT_COLOR_TEMP)):
temp_in_k = mired_to_kelvin(colortemp)
_LOGGER.debug('Setting color temp: %s K', temp_in_k)
self._bulb.set_color_temp(temp_in_k, duration=duration) | -5,806,106,555,384,193,000 | Set bulb's color temperature. | homeassistant/components/light/yeelight.py | set_colortemp | DevRGT/home-assistant | python | @_cmd
def set_colortemp(self, colortemp, duration) -> None:
if (colortemp and (self.supported_features & SUPPORT_COLOR_TEMP)):
temp_in_k = mired_to_kelvin(colortemp)
_LOGGER.debug('Setting color temp: %s K', temp_in_k)
self._bulb.set_color_temp(temp_in_k, duration=duration) |
@_cmd
def set_default(self) -> None:
'Set current options as default.'
self._bulb.set_default() | -2,304,011,003,329,184,800 | Set current options as default. | homeassistant/components/light/yeelight.py | set_default | DevRGT/home-assistant | python | @_cmd
def set_default(self) -> None:
self._bulb.set_default() |
@_cmd
def set_flash(self, flash) -> None:
'Activate flash.'
if flash:
from yeelight import RGBTransition, SleepTransition, Flow, BulbException
if (self._bulb.last_properties['color_mode'] != 1):
_LOGGER.error('Flash supported currently only in RGB mode.')
return
transition = int(self.config[CONF_TRANSITION])
if (flash == FLASH_LONG):
count = 1
duration = (transition * 5)
if (flash == FLASH_SHORT):
count = 1
duration = (transition * 2)
(red, green, blue) = color_util.color_hs_to_RGB(*self._hs)
transitions = list()
transitions.append(RGBTransition(255, 0, 0, brightness=10, duration=duration))
transitions.append(SleepTransition(duration=transition))
transitions.append(RGBTransition(red, green, blue, brightness=self.brightness, duration=duration))
flow = Flow(count=count, transitions=transitions)
try:
self._bulb.start_flow(flow)
except BulbException as ex:
_LOGGER.error('Unable to set flash: %s', ex) | 5,451,685,536,072,715,000 | Activate flash. | homeassistant/components/light/yeelight.py | set_flash | DevRGT/home-assistant | python | @_cmd
def set_flash(self, flash) -> None:
if flash:
from yeelight import RGBTransition, SleepTransition, Flow, BulbException
if (self._bulb.last_properties['color_mode'] != 1):
_LOGGER.error('Flash supported currently only in RGB mode.')
return
transition = int(self.config[CONF_TRANSITION])
if (flash == FLASH_LONG):
count = 1
duration = (transition * 5)
if (flash == FLASH_SHORT):
count = 1
duration = (transition * 2)
(red, green, blue) = color_util.color_hs_to_RGB(*self._hs)
transitions = list()
transitions.append(RGBTransition(255, 0, 0, brightness=10, duration=duration))
transitions.append(SleepTransition(duration=transition))
transitions.append(RGBTransition(red, green, blue, brightness=self.brightness, duration=duration))
flow = Flow(count=count, transitions=transitions)
try:
self._bulb.start_flow(flow)
except BulbException as ex:
_LOGGER.error('Unable to set flash: %s', ex) |
@_cmd
def set_effect(self, effect) -> None:
'Activate effect.'
if effect:
from yeelight import Flow, BulbException
from yeelight.transitions import disco, temp, strobe, pulse, strobe_color, alarm, police, police2, christmas, rgb, randomloop, slowdown
if (effect == EFFECT_STOP):
self._bulb.stop_flow()
return
if (effect == EFFECT_DISCO):
flow = Flow(count=0, transitions=disco())
if (effect == EFFECT_TEMP):
flow = Flow(count=0, transitions=temp())
if (effect == EFFECT_STROBE):
flow = Flow(count=0, transitions=strobe())
if (effect == EFFECT_STROBE_COLOR):
flow = Flow(count=0, transitions=strobe_color())
if (effect == EFFECT_ALARM):
flow = Flow(count=0, transitions=alarm())
if (effect == EFFECT_POLICE):
flow = Flow(count=0, transitions=police())
if (effect == EFFECT_POLICE2):
flow = Flow(count=0, transitions=police2())
if (effect == EFFECT_CHRISTMAS):
flow = Flow(count=0, transitions=christmas())
if (effect == EFFECT_RGB):
flow = Flow(count=0, transitions=rgb())
if (effect == EFFECT_RANDOM_LOOP):
flow = Flow(count=0, transitions=randomloop())
if (effect == EFFECT_FAST_RANDOM_LOOP):
flow = Flow(count=0, transitions=randomloop(duration=250))
if (effect == EFFECT_SLOWDOWN):
flow = Flow(count=0, transitions=slowdown())
if (effect == EFFECT_WHATSAPP):
flow = Flow(count=2, transitions=pulse(37, 211, 102))
if (effect == EFFECT_FACEBOOK):
flow = Flow(count=2, transitions=pulse(59, 89, 152))
if (effect == EFFECT_TWITTER):
flow = Flow(count=2, transitions=pulse(0, 172, 237))
try:
self._bulb.start_flow(flow)
except BulbException as ex:
_LOGGER.error('Unable to set effect: %s', ex) | -6,212,819,468,885,633,000 | Activate effect. | homeassistant/components/light/yeelight.py | set_effect | DevRGT/home-assistant | python | @_cmd
def set_effect(self, effect) -> None:
if effect:
from yeelight import Flow, BulbException
from yeelight.transitions import disco, temp, strobe, pulse, strobe_color, alarm, police, police2, christmas, rgb, randomloop, slowdown
if (effect == EFFECT_STOP):
self._bulb.stop_flow()
return
if (effect == EFFECT_DISCO):
flow = Flow(count=0, transitions=disco())
if (effect == EFFECT_TEMP):
flow = Flow(count=0, transitions=temp())
if (effect == EFFECT_STROBE):
flow = Flow(count=0, transitions=strobe())
if (effect == EFFECT_STROBE_COLOR):
flow = Flow(count=0, transitions=strobe_color())
if (effect == EFFECT_ALARM):
flow = Flow(count=0, transitions=alarm())
if (effect == EFFECT_POLICE):
flow = Flow(count=0, transitions=police())
if (effect == EFFECT_POLICE2):
flow = Flow(count=0, transitions=police2())
if (effect == EFFECT_CHRISTMAS):
flow = Flow(count=0, transitions=christmas())
if (effect == EFFECT_RGB):
flow = Flow(count=0, transitions=rgb())
if (effect == EFFECT_RANDOM_LOOP):
flow = Flow(count=0, transitions=randomloop())
if (effect == EFFECT_FAST_RANDOM_LOOP):
flow = Flow(count=0, transitions=randomloop(duration=250))
if (effect == EFFECT_SLOWDOWN):
flow = Flow(count=0, transitions=slowdown())
if (effect == EFFECT_WHATSAPP):
flow = Flow(count=2, transitions=pulse(37, 211, 102))
if (effect == EFFECT_FACEBOOK):
flow = Flow(count=2, transitions=pulse(59, 89, 152))
if (effect == EFFECT_TWITTER):
flow = Flow(count=2, transitions=pulse(0, 172, 237))
try:
self._bulb.start_flow(flow)
except BulbException as ex:
_LOGGER.error('Unable to set effect: %s', ex) |
def turn_on(self, **kwargs) -> None:
'Turn the bulb on.'
import yeelight
brightness = kwargs.get(ATTR_BRIGHTNESS)
colortemp = kwargs.get(ATTR_COLOR_TEMP)
hs_color = kwargs.get(ATTR_HS_COLOR)
rgb = (color_util.color_hs_to_RGB(*hs_color) if hs_color else None)
flash = kwargs.get(ATTR_FLASH)
effect = kwargs.get(ATTR_EFFECT)
duration = int(self.config[CONF_TRANSITION])
if (ATTR_TRANSITION in kwargs):
duration = int((kwargs.get(ATTR_TRANSITION) * 1000))
try:
self._bulb.turn_on(duration=duration)
except yeelight.BulbException as ex:
_LOGGER.error('Unable to turn the bulb on: %s', ex)
return
if (self.config[CONF_MODE_MUSIC] and (not self._bulb.music_mode)):
try:
self.set_music_mode(self.config[CONF_MODE_MUSIC])
except yeelight.BulbException as ex:
_LOGGER.error('Unable to turn on music mode,consider disabling it: %s', ex)
try:
self.set_rgb(rgb, duration)
self.set_colortemp(colortemp, duration)
self.set_brightness(brightness, duration)
self.set_flash(flash)
self.set_effect(effect)
except yeelight.BulbException as ex:
_LOGGER.error('Unable to set bulb properties: %s', ex)
return
if (self.config[CONF_SAVE_ON_CHANGE] and (brightness or colortemp or rgb)):
try:
self.set_default()
except yeelight.BulbException as ex:
_LOGGER.error('Unable to set the defaults: %s', ex)
return | -3,381,828,197,371,370,500 | Turn the bulb on. | homeassistant/components/light/yeelight.py | turn_on | DevRGT/home-assistant | python | def turn_on(self, **kwargs) -> None:
import yeelight
brightness = kwargs.get(ATTR_BRIGHTNESS)
colortemp = kwargs.get(ATTR_COLOR_TEMP)
hs_color = kwargs.get(ATTR_HS_COLOR)
rgb = (color_util.color_hs_to_RGB(*hs_color) if hs_color else None)
flash = kwargs.get(ATTR_FLASH)
effect = kwargs.get(ATTR_EFFECT)
duration = int(self.config[CONF_TRANSITION])
if (ATTR_TRANSITION in kwargs):
duration = int((kwargs.get(ATTR_TRANSITION) * 1000))
try:
self._bulb.turn_on(duration=duration)
except yeelight.BulbException as ex:
_LOGGER.error('Unable to turn the bulb on: %s', ex)
return
if (self.config[CONF_MODE_MUSIC] and (not self._bulb.music_mode)):
try:
self.set_music_mode(self.config[CONF_MODE_MUSIC])
except yeelight.BulbException as ex:
_LOGGER.error('Unable to turn on music mode,consider disabling it: %s', ex)
try:
self.set_rgb(rgb, duration)
self.set_colortemp(colortemp, duration)
self.set_brightness(brightness, duration)
self.set_flash(flash)
self.set_effect(effect)
except yeelight.BulbException as ex:
_LOGGER.error('Unable to set bulb properties: %s', ex)
return
if (self.config[CONF_SAVE_ON_CHANGE] and (brightness or colortemp or rgb)):
try:
self.set_default()
except yeelight.BulbException as ex:
_LOGGER.error('Unable to set the defaults: %s', ex)
return |
def turn_off(self, **kwargs) -> None:
'Turn off.'
import yeelight
duration = int(self.config[CONF_TRANSITION])
if (ATTR_TRANSITION in kwargs):
duration = int((kwargs.get(ATTR_TRANSITION) * 1000))
try:
self._bulb.turn_off(duration=duration)
except yeelight.BulbException as ex:
_LOGGER.error('Unable to turn the bulb off: %s', ex) | -5,587,446,905,218,160,000 | Turn off. | homeassistant/components/light/yeelight.py | turn_off | DevRGT/home-assistant | python | def turn_off(self, **kwargs) -> None:
import yeelight
duration = int(self.config[CONF_TRANSITION])
if (ATTR_TRANSITION in kwargs):
duration = int((kwargs.get(ATTR_TRANSITION) * 1000))
try:
self._bulb.turn_off(duration=duration)
except yeelight.BulbException as ex:
_LOGGER.error('Unable to turn the bulb off: %s', ex) |
def set_mode(self, mode: str):
'Set a power mode.'
import yeelight
try:
self._bulb.set_power_mode(yeelight.enums.PowerMode[mode.upper()])
except yeelight.BulbException as ex:
_LOGGER.error('Unable to set the power mode: %s', ex) | 1,404,244,294,184,249,000 | Set a power mode. | homeassistant/components/light/yeelight.py | set_mode | DevRGT/home-assistant | python | def set_mode(self, mode: str):
import yeelight
try:
self._bulb.set_power_mode(yeelight.enums.PowerMode[mode.upper()])
except yeelight.BulbException as ex:
_LOGGER.error('Unable to set the power mode: %s', ex) |
def addSymbol(self, char):
'Displays the inputted char onto the display'
self.stringContents += char
self.displayStr.set(self.stringContents) | -116,713,786,399,674,820 | Displays the inputted char onto the display | ProgrammingInPython/proj08_daniel_campos.py | addSymbol | spacemanidol/RPICS | python | def addSymbol(self, char):
self.stringContents += char
self.displayStr.set(self.stringContents) |
def addKeyboardSymbol(self, event):
'Displays the inputted char onto the display'
self.stringContents += str(repr(event.char))[1:(- 1)]
self.displayStr.set(self.stringContents) | 8,320,440,915,511,281,000 | Displays the inputted char onto the display | ProgrammingInPython/proj08_daniel_campos.py | addKeyboardSymbol | spacemanidol/RPICS | python | def addKeyboardSymbol(self, event):
self.stringContents += str(repr(event.char))[1:(- 1)]
self.displayStr.set(self.stringContents) |
def evaluate(self, evt=None):
'Evalutes the expression'
try:
self.displayStr.set(eval(self.stringContents))
self.stringContents = str(eval(self.stringContents))
except Exception as e:
self.displayStr.set('Error')
self.stringContents = '' | 5,710,872,660,080,229,000 | Evalutes the expression | ProgrammingInPython/proj08_daniel_campos.py | evaluate | spacemanidol/RPICS | python | def evaluate(self, evt=None):
try:
self.displayStr.set(eval(self.stringContents))
self.stringContents = str(eval(self.stringContents))
except Exception as e:
self.displayStr.set('Error')
self.stringContents = |
def clear(self, evt=None):
'Clears the expression'
self.stringContents = ''
self.displayStr.set(self.stringContents) | 3,363,923,291,867,862,000 | Clears the expression | ProgrammingInPython/proj08_daniel_campos.py | clear | spacemanidol/RPICS | python | def clear(self, evt=None):
self.stringContents =
self.displayStr.set(self.stringContents) |
def backSpace(self, evt=None):
'Backspace on expression'
self.stringContents = self.stringContents[:(- 1)]
self.displayStr.set(self.stringContents) | 7,594,805,476,417,825,000 | Backspace on expression | ProgrammingInPython/proj08_daniel_campos.py | backSpace | spacemanidol/RPICS | python | def backSpace(self, evt=None):
self.stringContents = self.stringContents[:(- 1)]
self.displayStr.set(self.stringContents) |
def count_samples(ns_run, **kwargs):
'Number of samples in run.\n\n Unlike most estimators this does not require log weights, but for\n convenience will not throw an error if they are specified.\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n\n Returns\n -------\n int\n '
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0] | 4,457,394,597,630,097,400 | Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int | nestcheck/estimators.py | count_samples | ThomasEdwardRiley/nestcheck | python | def count_samples(ns_run, **kwargs):
'Number of samples in run.\n\n Unlike most estimators this does not require log weights, but for\n convenience will not throw an error if they are specified.\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n\n Returns\n -------\n int\n '
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0] |
def logz(ns_run, logw=None, simulate=False):
'Natural log of Bayesian evidence :math:`\\log \\mathcal{Z}`.\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n logw: None or 1d numpy array, optional\n Log weights of samples.\n simulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n\n Returns\n -------\n float\n '
if (logw is None):
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw) | -5,927,102,853,806,405,000 | Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float | nestcheck/estimators.py | logz | ThomasEdwardRiley/nestcheck | python | def logz(ns_run, logw=None, simulate=False):
'Natural log of Bayesian evidence :math:`\\log \\mathcal{Z}`.\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n logw: None or 1d numpy array, optional\n Log weights of samples.\n simulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n\n Returns\n -------\n float\n '
if (logw is None):
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw) |
def evidence(ns_run, logw=None, simulate=False):
'Bayesian evidence :math:`\\log \\mathcal{Z}`.\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n logw: None or 1d numpy array, optional\n Log weights of samples.\n simulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n\n Returns\n -------\n float\n '
if (logw is None):
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw)) | 2,010,785,813,126,751,500 | Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float | nestcheck/estimators.py | evidence | ThomasEdwardRiley/nestcheck | python | def evidence(ns_run, logw=None, simulate=False):
'Bayesian evidence :math:`\\log \\mathcal{Z}`.\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n logw: None or 1d numpy array, optional\n Log weights of samples.\n simulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n\n Returns\n -------\n float\n '
if (logw is None):
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw)) |
def param_mean(ns_run, logw=None, simulate=False, param_ind=0, handle_indexerror=False):
"Mean of a single parameter (single component of theta).\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n logw: None or 1d numpy array, optional\n Log weights of samples.\n simulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n param_ind: int, optional\n Index of parameter for which the mean should be calculated. This\n corresponds to the column of ns_run['theta'] which contains the\n parameter.\n handle_indexerror: bool, optional\n Make the function function return nan rather than raising an\n IndexError if param_ind >= ndim. This is useful when applying\n the same list of estimators to data sets of different dimensions.\n\n Returns\n -------\n float\n "
if (logw is None):
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp((logw - logw.max()))
try:
return (np.sum((w_relative * ns_run['theta'][:, param_ind])) / np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise | 3,105,416,283,611,357,700 | Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float | nestcheck/estimators.py | param_mean | ThomasEdwardRiley/nestcheck | python | def param_mean(ns_run, logw=None, simulate=False, param_ind=0, handle_indexerror=False):
"Mean of a single parameter (single component of theta).\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n logw: None or 1d numpy array, optional\n Log weights of samples.\n simulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n param_ind: int, optional\n Index of parameter for which the mean should be calculated. This\n corresponds to the column of ns_run['theta'] which contains the\n parameter.\n handle_indexerror: bool, optional\n Make the function function return nan rather than raising an\n IndexError if param_ind >= ndim. This is useful when applying\n the same list of estimators to data sets of different dimensions.\n\n Returns\n -------\n float\n "
if (logw is None):
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp((logw - logw.max()))
try:
return (np.sum((w_relative * ns_run['theta'][:, param_ind])) / np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise |
def param_cred(ns_run, logw=None, simulate=False, probability=0.5, param_ind=0):
"One-tailed credible interval on the value of a single parameter\n (component of theta).\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n logw: None or 1d numpy array, optional\n Log weights of samples.\n simulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n probability: float, optional\n Quantile to estimate - must be in open interval (0, 1).\n For example, use 0.5 for the median and 0.84 for the upper\n 84% quantile. Passed to weighted_quantile.\n param_ind: int, optional\n Index of parameter for which the credible interval should be\n calculated. This corresponds to the column of ns_run['theta']\n which contains the parameter.\n\n Returns\n -------\n float\n "
if (logw is None):
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp((logw - logw.max()))
return weighted_quantile(probability, ns_run['theta'][:, param_ind], w_relative) | 4,726,149,972,506,099,000 | One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float | nestcheck/estimators.py | param_cred | ThomasEdwardRiley/nestcheck | python | def param_cred(ns_run, logw=None, simulate=False, probability=0.5, param_ind=0):
"One-tailed credible interval on the value of a single parameter\n (component of theta).\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n logw: None or 1d numpy array, optional\n Log weights of samples.\n simulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n probability: float, optional\n Quantile to estimate - must be in open interval (0, 1).\n For example, use 0.5 for the median and 0.84 for the upper\n 84% quantile. Passed to weighted_quantile.\n param_ind: int, optional\n Index of parameter for which the credible interval should be\n calculated. This corresponds to the column of ns_run['theta']\n which contains the parameter.\n\n Returns\n -------\n float\n "
if (logw is None):
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp((logw - logw.max()))
return weighted_quantile(probability, ns_run['theta'][:, param_ind], w_relative) |
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"Mean of the square of single parameter (second moment of its\n posterior distribution).\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n logw: None or 1d numpy array, optional\n Log weights of samples.\n simulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n param_ind: int, optional\n Index of parameter for which the second moment should be\n calculated. This corresponds to the column of ns_run['theta']\n which contains the parameter.\n\n Returns\n -------\n float\n "
if (logw is None):
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp((logw - logw.max()))
w_relative /= np.sum(w_relative)
return np.sum((w_relative * (ns_run['theta'][:, param_ind] ** 2))) | 1,526,012,662,973,918,200 | Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float | nestcheck/estimators.py | param_squared_mean | ThomasEdwardRiley/nestcheck | python | def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"Mean of the square of single parameter (second moment of its\n posterior distribution).\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n logw: None or 1d numpy array, optional\n Log weights of samples.\n simulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n param_ind: int, optional\n Index of parameter for which the second moment should be\n calculated. This corresponds to the column of ns_run['theta']\n which contains the parameter.\n\n Returns\n -------\n float\n "
if (logw is None):
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp((logw - logw.max()))
w_relative /= np.sum(w_relative)
return np.sum((w_relative * (ns_run['theta'][:, param_ind] ** 2))) |
def r_mean(ns_run, logw=None, simulate=False):
'Mean of the radial coordinate (magnitude of theta vector).\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n logw: None or 1d numpy array, optional\n Log weights of samples.\n simulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n\n Returns\n -------\n float\n '
if (logw is None):
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp((logw - logw.max()))
r = np.sqrt(np.sum((ns_run['theta'] ** 2), axis=1))
return (np.sum((w_relative * r)) / np.sum(w_relative)) | -438,064,546,922,247,500 | Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float | nestcheck/estimators.py | r_mean | ThomasEdwardRiley/nestcheck | python | def r_mean(ns_run, logw=None, simulate=False):
'Mean of the radial coordinate (magnitude of theta vector).\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n logw: None or 1d numpy array, optional\n Log weights of samples.\n simulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n\n Returns\n -------\n float\n '
if (logw is None):
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp((logw - logw.max()))
r = np.sqrt(np.sum((ns_run['theta'] ** 2), axis=1))
return (np.sum((w_relative * r)) / np.sum(w_relative)) |
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
'One-tailed credible interval on the value of the radial coordinate\n (magnitude of theta vector).\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n logw: None or 1d numpy array, optional\n Log weights of samples.\n simulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n probability: float, optional\n Quantile to estimate - must be in open interval (0, 1).\n For example, use 0.5 for the median and 0.84 for the upper\n 84% quantile. Passed to weighted_quantile.\n\n Returns\n -------\n float\n '
if (logw is None):
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp((logw - logw.max()))
r = np.sqrt(np.sum((ns_run['theta'] ** 2), axis=1))
return weighted_quantile(probability, r, w_relative) | -7,417,632,296,846,792,000 | One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float | nestcheck/estimators.py | r_cred | ThomasEdwardRiley/nestcheck | python | def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
'One-tailed credible interval on the value of the radial coordinate\n (magnitude of theta vector).\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n logw: None or 1d numpy array, optional\n Log weights of samples.\n simulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n probability: float, optional\n Quantile to estimate - must be in open interval (0, 1).\n For example, use 0.5 for the median and 0.84 for the upper\n 84% quantile. Passed to weighted_quantile.\n\n Returns\n -------\n float\n '
if (logw is None):
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp((logw - logw.max()))
r = np.sqrt(np.sum((ns_run['theta'] ** 2), axis=1))
return weighted_quantile(probability, r, w_relative) |
def get_latex_name(func_in, **kwargs):
'\n Produce a latex formatted name for each function for use in labelling\n results.\n\n Parameters\n ----------\n func_in: function\n kwargs: dict, optional\n Kwargs for function.\n\n Returns\n -------\n latex_name: str\n Latex formatted name for the function.\n '
if isinstance(func_in, functools.partial):
func = func_in.func
assert (not (set(func_in.keywords) & set(kwargs))), 'kwargs={0} and func_in.keywords={1} contain repeated keys'.format(kwargs, func_in.keywords)
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = (('{\\hat{' + str((param_ind + 1))) + '}}')
latex_name_dict = {'count_samples': 'samples', 'logz': '$\\mathrm{log} \\mathcal{Z}$', 'evidence': '$\\mathcal{Z}$', 'r_mean': '$\\overline{|\\theta|}$', 'param_mean': (('$\\overline{\\theta_' + ind_str) + '}$'), 'param_squared_mean': (('$\\overline{\\theta^2_' + ind_str) + '}$')}
if (probability == 0.5):
cred_str = '$\\mathrm{median}('
else:
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = (('$\\mathrm{C.I.}_{' + percent_str) + '\\%}(')
latex_name_dict['param_cred'] = (((cred_str + '\\theta_') + ind_str) + ')$')
latex_name_dict['r_cred'] = (cred_str + '|\\theta|)$')
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = (err.args + (('get_latex_name not yet set up for ' + func.__name__),))
raise | 6,843,338,948,465,915,000 | Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function. | nestcheck/estimators.py | get_latex_name | ThomasEdwardRiley/nestcheck | python | def get_latex_name(func_in, **kwargs):
'\n Produce a latex formatted name for each function for use in labelling\n results.\n\n Parameters\n ----------\n func_in: function\n kwargs: dict, optional\n Kwargs for function.\n\n Returns\n -------\n latex_name: str\n Latex formatted name for the function.\n '
if isinstance(func_in, functools.partial):
func = func_in.func
assert (not (set(func_in.keywords) & set(kwargs))), 'kwargs={0} and func_in.keywords={1} contain repeated keys'.format(kwargs, func_in.keywords)
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = (('{\\hat{' + str((param_ind + 1))) + '}}')
latex_name_dict = {'count_samples': 'samples', 'logz': '$\\mathrm{log} \\mathcal{Z}$', 'evidence': '$\\mathcal{Z}$', 'r_mean': '$\\overline{|\\theta|}$', 'param_mean': (('$\\overline{\\theta_' + ind_str) + '}$'), 'param_squared_mean': (('$\\overline{\\theta^2_' + ind_str) + '}$')}
if (probability == 0.5):
cred_str = '$\\mathrm{median}('
else:
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = (('$\\mathrm{C.I.}_{' + percent_str) + '\\%}(')
latex_name_dict['param_cred'] = (((cred_str + '\\theta_') + ind_str) + ')$')
latex_name_dict['r_cred'] = (cred_str + '|\\theta|)$')
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = (err.args + (('get_latex_name not yet set up for ' + func.__name__),))
raise |
def weighted_quantile(probability, values, weights):
'\n Get quantile estimate for input probability given weighted samples using\n linear interpolation.\n\n Parameters\n ----------\n probability: float\n Quantile to estimate - must be in open interval (0, 1).\n For example, use 0.5 for the median and 0.84 for the upper\n 84% quantile.\n values: 1d numpy array\n Sample values.\n weights: 1d numpy array\n Corresponding sample weights (same shape as values).\n\n Returns\n -------\n quantile: float\n '
assert (1 > probability > 0), (('credible interval prob= ' + str(probability)) + ' not in (0, 1)')
assert (values.shape == weights.shape)
assert (values.ndim == 1)
assert (weights.ndim == 1)
sorted_inds = np.argsort(values)
quantiles = (np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds]))
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds]) | -5,182,723,951,505,794,000 | Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float | nestcheck/estimators.py | weighted_quantile | ThomasEdwardRiley/nestcheck | python | def weighted_quantile(probability, values, weights):
'\n Get quantile estimate for input probability given weighted samples using\n linear interpolation.\n\n Parameters\n ----------\n probability: float\n Quantile to estimate - must be in open interval (0, 1).\n For example, use 0.5 for the median and 0.84 for the upper\n 84% quantile.\n values: 1d numpy array\n Sample values.\n weights: 1d numpy array\n Corresponding sample weights (same shape as values).\n\n Returns\n -------\n quantile: float\n '
assert (1 > probability > 0), (('credible interval prob= ' + str(probability)) + ' not in (0, 1)')
assert (values.shape == weights.shape)
assert (values.ndim == 1)
assert (weights.ndim == 1)
sorted_inds = np.argsort(values)
quantiles = (np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds]))
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds]) |
def _is_sqlite_json1_enabled():
'Check if SQLite implementation includes JSON1 extension.'
con = sqlite3.connect(':memory:')
try:
con.execute("SELECT json_valid('123')")
except sqlite3.OperationalError:
return False
finally:
con.close()
return True | 7,702,032,116,213,631,000 | Check if SQLite implementation includes JSON1 extension. | toron/_node_schema.py | _is_sqlite_json1_enabled | shawnbrown/gpn | python | def _is_sqlite_json1_enabled():
con = sqlite3.connect(':memory:')
try:
con.execute("SELECT json_valid('123')")
except sqlite3.OperationalError:
return False
finally:
con.close()
return True |
def _is_wellformed_json(x):
'Return 1 if *x* is well-formed JSON or return 0 if *x* is not\n well-formed. This function should be registered with SQLite (via\n the create_function() method) when the JSON1 extension is not\n available.\n\n This function mimics the JSON1 json_valid() function, see:\n https://www.sqlite.org/json1.html#jvalid\n '
try:
_loads(x)
except (ValueError, TypeError):
return 0
return 1 | -7,860,604,478,455,344,000 | Return 1 if *x* is well-formed JSON or return 0 if *x* is not
well-formed. This function should be registered with SQLite (via
the create_function() method) when the JSON1 extension is not
available.
This function mimics the JSON1 json_valid() function, see:
https://www.sqlite.org/json1.html#jvalid | toron/_node_schema.py | _is_wellformed_json | shawnbrown/gpn | python | def _is_wellformed_json(x):
'Return 1 if *x* is well-formed JSON or return 0 if *x* is not\n well-formed. This function should be registered with SQLite (via\n the create_function() method) when the JSON1 extension is not\n available.\n\n This function mimics the JSON1 json_valid() function, see:\n https://www.sqlite.org/json1.html#jvalid\n '
try:
_loads(x)
except (ValueError, TypeError):
return 0
return 1 |
def _make_trigger_for_json(insert_or_update, table, column):
'Return a SQL statement for creating a temporary trigger. The\n trigger is used to validate the contents of TEXT_JSON type columns.\n The trigger will pass without error if the JSON is wellformed.\n '
if (insert_or_update.upper() not in {'INSERT', 'UPDATE'}):
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f'''
NEW.{column} IS NOT NULL
AND json_valid(NEW.{column}) = 0
'''.rstrip()
else:
when_clause = f'''
NEW.{column} IS NOT NULL
AND is_wellformed_json(NEW.{column}) = 0
'''.rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON');
END;
''' | -861,732,227,289,730,800 | Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_JSON type columns.
The trigger will pass without error if the JSON is wellformed. | toron/_node_schema.py | _make_trigger_for_json | shawnbrown/gpn | python | def _make_trigger_for_json(insert_or_update, table, column):
'Return a SQL statement for creating a temporary trigger. The\n trigger is used to validate the contents of TEXT_JSON type columns.\n The trigger will pass without error if the JSON is wellformed.\n '
if (insert_or_update.upper() not in {'INSERT', 'UPDATE'}):
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f'
NEW.{column} IS NOT NULL
AND json_valid(NEW.{column}) = 0
'.rstrip()
else:
when_clause = f'
NEW.{column} IS NOT NULL
AND is_wellformed_json(NEW.{column}) = 0
'.rstrip()
return f'
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON');
END;
' |
def _is_wellformed_user_properties(x):
"Check if *x* is a wellformed TEXT_USERPROPERTIES value.\n A wellformed TEXT_USERPROPERTIES value is a string containing\n a JSON formatted object. Returns 1 if *x* is valid or 0 if\n it's not.\n\n This function should be registered as an application-defined\n SQL function and used in queries when SQLite's JSON1 extension\n is not enabled.\n "
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if isinstance(obj, dict):
return 1
return 0 | -7,747,161,462,159,716,000 | Check if *x* is a wellformed TEXT_USERPROPERTIES value.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object. Returns 1 if *x* is valid or 0 if
it's not.
This function should be registered as an application-defined
SQL function and used in queries when SQLite's JSON1 extension
is not enabled. | toron/_node_schema.py | _is_wellformed_user_properties | shawnbrown/gpn | python | def _is_wellformed_user_properties(x):
"Check if *x* is a wellformed TEXT_USERPROPERTIES value.\n A wellformed TEXT_USERPROPERTIES value is a string containing\n a JSON formatted object. Returns 1 if *x* is valid or 0 if\n it's not.\n\n This function should be registered as an application-defined\n SQL function and used in queries when SQLite's JSON1 extension\n is not enabled.\n "
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if isinstance(obj, dict):
return 1
return 0 |
def _make_trigger_for_user_properties(insert_or_update, table, column):
'Return a CREATE TRIGGER statement to check TEXT_USERPROPERTIES\n values. This trigger is used to check values before they are saved\n in the database.\n\n A wellformed TEXT_USERPROPERTIES value is a string containing\n a JSON formatted object.\n\n The trigger will pass without error if the value is wellformed.\n '
if SQLITE_JSON1_ENABLED:
user_properties_check = f"(json_valid(NEW.{column}) = 0 OR json_type(NEW.{column}) != 'object')"
else:
user_properties_check = f'is_wellformed_user_properties(NEW.{column}) = 0'
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN
NEW.{column} IS NOT NULL
AND {user_properties_check}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON object');
END;
''' | 6,090,807,620,620,976,000 | Return a CREATE TRIGGER statement to check TEXT_USERPROPERTIES
values. This trigger is used to check values before they are saved
in the database.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object.
The trigger will pass without error if the value is wellformed. | toron/_node_schema.py | _make_trigger_for_user_properties | shawnbrown/gpn | python | def _make_trigger_for_user_properties(insert_or_update, table, column):
'Return a CREATE TRIGGER statement to check TEXT_USERPROPERTIES\n values. This trigger is used to check values before they are saved\n in the database.\n\n A wellformed TEXT_USERPROPERTIES value is a string containing\n a JSON formatted object.\n\n The trigger will pass without error if the value is wellformed.\n '
if SQLITE_JSON1_ENABLED:
user_properties_check = f"(json_valid(NEW.{column}) = 0 OR json_type(NEW.{column}) != 'object')"
else:
user_properties_check = f'is_wellformed_user_properties(NEW.{column}) = 0'
return f'
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN
NEW.{column} IS NOT NULL
AND {user_properties_check}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON object');
END;
' |
def _is_wellformed_attributes(x):
'Returns 1 if *x* is a wellformed TEXT_ATTRIBUTES column\n value else returns 0. TEXT_ATTRIBUTES should be flat, JSON\n object strings. This function should be registered with SQLite\n (via the create_function() method) when the JSON1 extension\n is not available.\n '
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if (not isinstance(obj, dict)):
return 0
for value in obj.values():
if (not isinstance(value, str)):
return 0
return 1 | -2,626,542,635,610,831,000 | Returns 1 if *x* is a wellformed TEXT_ATTRIBUTES column
value else returns 0. TEXT_ATTRIBUTES should be flat, JSON
object strings. This function should be registered with SQLite
(via the create_function() method) when the JSON1 extension
is not available. | toron/_node_schema.py | _is_wellformed_attributes | shawnbrown/gpn | python | def _is_wellformed_attributes(x):
'Returns 1 if *x* is a wellformed TEXT_ATTRIBUTES column\n value else returns 0. TEXT_ATTRIBUTES should be flat, JSON\n object strings. This function should be registered with SQLite\n (via the create_function() method) when the JSON1 extension\n is not available.\n '
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if (not isinstance(obj, dict)):
return 0
for value in obj.values():
if (not isinstance(value, str)):
return 0
return 1 |
def _make_trigger_for_attributes(insert_or_update, table, column):
'Return a SQL statement for creating a temporary trigger. The\n trigger is used to validate the contents of TEXT_ATTRIBUTES\n type columns.\n\n The trigger will pass without error if the JSON is a wellformed\n "object" containing "text" values.\n\n The trigger will raise an error if the value is:\n\n * not wellformed JSON\n * not an "object" type\n * an "object" type that contains one or more "integer", "real",\n "true", "false", "null", "object" or "array" types\n '
if (insert_or_update.upper() not in {'INSERT', 'UPDATE'}):
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f'''
NEW.{column} IS NOT NULL
AND (json_valid(NEW.{column}) = 0
OR json_type(NEW.{column}) != 'object'
OR (SELECT COUNT(*)
FROM json_each(NEW.{column})
WHERE json_each.type != 'text') != 0)
'''.rstrip()
else:
when_clause = f'''
NEW.{column} IS NOT NULL
AND is_wellformed_attributes(NEW.{column}) = 0
'''.rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be a JSON object with text values');
END;
''' | 8,810,595,410,914,199,000 | Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_ATTRIBUTES
type columns.
The trigger will pass without error if the JSON is a wellformed
"object" containing "text" values.
The trigger will raise an error if the value is:
* not wellformed JSON
* not an "object" type
* an "object" type that contains one or more "integer", "real",
"true", "false", "null", "object" or "array" types | toron/_node_schema.py | _make_trigger_for_attributes | shawnbrown/gpn | python | def _make_trigger_for_attributes(insert_or_update, table, column):
'Return a SQL statement for creating a temporary trigger. The\n trigger is used to validate the contents of TEXT_ATTRIBUTES\n type columns.\n\n The trigger will pass without error if the JSON is a wellformed\n "object" containing "text" values.\n\n The trigger will raise an error if the value is:\n\n * not wellformed JSON\n * not an "object" type\n * an "object" type that contains one or more "integer", "real",\n "true", "false", "null", "object" or "array" types\n '
if (insert_or_update.upper() not in {'INSERT', 'UPDATE'}):
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f'
NEW.{column} IS NOT NULL
AND (json_valid(NEW.{column}) = 0
OR json_type(NEW.{column}) != 'object'
OR (SELECT COUNT(*)
FROM json_each(NEW.{column})
WHERE json_each.type != 'text') != 0)
'.rstrip()
else:
when_clause = f'
NEW.{column} IS NOT NULL
AND is_wellformed_attributes(NEW.{column}) = 0
'.rstrip()
return f'
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be a JSON object with text values');
END;
' |
def _add_functions_and_triggers(connection):
'Create triggers and application-defined functions *connection*.\n\n Note: This function must not be executed on an empty connection.\n The table schema must exist before triggers can be created.\n '
if (not SQLITE_JSON1_ENABLED):
try:
connection.create_function('is_wellformed_json', 1, _is_wellformed_json, deterministic=True)
connection.create_function('is_wellformed_user_properties', 1, _is_wellformed_user_properties, deterministic=True)
connection.create_function('is_wellformed_attributes', 1, _is_wellformed_attributes, deterministic=True)
except TypeError:
connection.create_function('is_wellformed_json', 1, _is_wellformed_json)
connection.create_function('is_wellformed_user_properties', 1, _is_wellformed_user_properties)
connection.create_function('is_wellformed_attributes', 1, _is_wellformed_attributes)
connection.execute(_make_trigger_for_json('INSERT', 'property', 'value'))
connection.execute(_make_trigger_for_json('UPDATE', 'property', 'value'))
connection.execute(_make_trigger_for_user_properties('INSERT', 'edge', 'user_properties'))
connection.execute(_make_trigger_for_user_properties('UPDATE', 'edge', 'user_properties'))
jsonflatobj_columns = [('edge', 'type_info'), ('quantity', 'attributes'), ('weight', 'type_info')]
for (table, column) in jsonflatobj_columns:
connection.execute(_make_trigger_for_attributes('INSERT', table, column))
connection.execute(_make_trigger_for_attributes('UPDATE', table, column)) | -3,662,904,084,164,747,300 | Create triggers and application-defined functions *connection*.
Note: This function must not be executed on an empty connection.
The table schema must exist before triggers can be created. | toron/_node_schema.py | _add_functions_and_triggers | shawnbrown/gpn | python | def _add_functions_and_triggers(connection):
'Create triggers and application-defined functions *connection*.\n\n Note: This function must not be executed on an empty connection.\n The table schema must exist before triggers can be created.\n '
if (not SQLITE_JSON1_ENABLED):
try:
connection.create_function('is_wellformed_json', 1, _is_wellformed_json, deterministic=True)
connection.create_function('is_wellformed_user_properties', 1, _is_wellformed_user_properties, deterministic=True)
connection.create_function('is_wellformed_attributes', 1, _is_wellformed_attributes, deterministic=True)
except TypeError:
connection.create_function('is_wellformed_json', 1, _is_wellformed_json)
connection.create_function('is_wellformed_user_properties', 1, _is_wellformed_user_properties)
connection.create_function('is_wellformed_attributes', 1, _is_wellformed_attributes)
connection.execute(_make_trigger_for_json('INSERT', 'property', 'value'))
connection.execute(_make_trigger_for_json('UPDATE', 'property', 'value'))
connection.execute(_make_trigger_for_user_properties('INSERT', 'edge', 'user_properties'))
connection.execute(_make_trigger_for_user_properties('UPDATE', 'edge', 'user_properties'))
jsonflatobj_columns = [('edge', 'type_info'), ('quantity', 'attributes'), ('weight', 'type_info')]
for (table, column) in jsonflatobj_columns:
connection.execute(_make_trigger_for_attributes('INSERT', table, column))
connection.execute(_make_trigger_for_attributes('UPDATE', table, column)) |
def _path_to_sqlite_uri(path):
"Convert a path into a SQLite compatible URI.\n\n Unlike pathlib's URI handling, SQLite accepts relative URI paths.\n For details, see:\n\n https://www.sqlite.org/uri.html#the_uri_path\n "
if (os.name == 'nt'):
if re.match('^[a-zA-Z]:', path):
path = os.path.abspath(path)
drive_prefix = f'/{path[:2]}'
path = path[2:]
else:
drive_prefix = ''
path = path.replace('\\', '/')
path = urllib_parse_quote(path)
path = f'{drive_prefix}{path}'
else:
path = urllib_parse_quote(path)
path = re.sub('/+', '/', path)
return f'file:{path}' | -3,435,782,394,609,266,700 | Convert a path into a SQLite compatible URI.
Unlike pathlib's URI handling, SQLite accepts relative URI paths.
For details, see:
https://www.sqlite.org/uri.html#the_uri_path | toron/_node_schema.py | _path_to_sqlite_uri | shawnbrown/gpn | python | def _path_to_sqlite_uri(path):
"Convert a path into a SQLite compatible URI.\n\n Unlike pathlib's URI handling, SQLite accepts relative URI paths.\n For details, see:\n\n https://www.sqlite.org/uri.html#the_uri_path\n "
if (os.name == 'nt'):
if re.match('^[a-zA-Z]:', path):
path = os.path.abspath(path)
drive_prefix = f'/{path[:2]}'
path = path[2:]
else:
drive_prefix =
path = path.replace('\\', '/')
path = urllib_parse_quote(path)
path = f'{drive_prefix}{path}'
else:
path = urllib_parse_quote(path)
path = re.sub('/+', '/', path)
return f'file:{path}' |
def connect(path, mode='rwc'):
'Returns a sqlite3 connection to a Toron node file.'
uri_path = _path_to_sqlite_uri(path)
uri_path = f'{uri_path}?mode={mode}'
try:
get_connection = (lambda : sqlite3.connect(database=uri_path, detect_types=sqlite3.PARSE_DECLTYPES, isolation_level=None, uri=True))
if os.path.exists(path):
con = get_connection()
else:
con = get_connection()
con.executescript(_schema_script)
except sqlite3.OperationalError as err:
msg = str(err).replace('database file', f'node file {path!r}')
raise ToronError(msg)
try:
_add_functions_and_triggers(con)
except (sqlite3.OperationalError, sqlite3.DatabaseError):
con.close()
raise ToronError(f'Path is not a Toron node: {path!r}')
cur = con.execute("SELECT value FROM property WHERE key='schema_version'")
(schema_version, *_) = (cur.fetchone() or (None,))
cur.close()
if (schema_version != 1):
msg = f'Unsupported Toron node format: schema version {schema_version!r}'
raise ToronError(msg)
return con | 244,127,551,233,007,550 | Returns a sqlite3 connection to a Toron node file. | toron/_node_schema.py | connect | shawnbrown/gpn | python | def connect(path, mode='rwc'):
uri_path = _path_to_sqlite_uri(path)
uri_path = f'{uri_path}?mode={mode}'
try:
get_connection = (lambda : sqlite3.connect(database=uri_path, detect_types=sqlite3.PARSE_DECLTYPES, isolation_level=None, uri=True))
if os.path.exists(path):
con = get_connection()
else:
con = get_connection()
con.executescript(_schema_script)
except sqlite3.OperationalError as err:
msg = str(err).replace('database file', f'node file {path!r}')
raise ToronError(msg)
try:
_add_functions_and_triggers(con)
except (sqlite3.OperationalError, sqlite3.DatabaseError):
con.close()
raise ToronError(f'Path is not a Toron node: {path!r}')
cur = con.execute("SELECT value FROM property WHERE key='schema_version'")
(schema_version, *_) = (cur.fetchone() or (None,))
cur.close()
if (schema_version != 1):
msg = f'Unsupported Toron node format: schema version {schema_version!r}'
raise ToronError(msg)
return con |
@contextmanager
def transaction(path_or_connection, mode=None):
'A context manager that yields a cursor that runs in an\n isolated transaction. If the context manager exits without\n errors, the transaction is committed. If an exception is\n raised, all changes are rolled-back.\n '
if isinstance(path_or_connection, sqlite3.Connection):
connection = path_or_connection
connection_close = (lambda : None)
else:
connection = connect(path_or_connection, mode=mode)
connection_close = connection.close
cursor = connection.cursor()
try:
with savepoint(cursor):
(yield cursor)
finally:
cursor.close()
connection_close() | 4,488,582,226,618,593,300 | A context manager that yields a cursor that runs in an
isolated transaction. If the context manager exits without
errors, the transaction is committed. If an exception is
raised, all changes are rolled-back. | toron/_node_schema.py | transaction | shawnbrown/gpn | python | @contextmanager
def transaction(path_or_connection, mode=None):
'A context manager that yields a cursor that runs in an\n isolated transaction. If the context manager exits without\n errors, the transaction is committed. If an exception is\n raised, all changes are rolled-back.\n '
if isinstance(path_or_connection, sqlite3.Connection):
connection = path_or_connection
connection_close = (lambda : None)
else:
connection = connect(path_or_connection, mode=mode)
connection_close = connection.close
cursor = connection.cursor()
try:
with savepoint(cursor):
(yield cursor)
finally:
cursor.close()
connection_close() |
def run(cmd, *args, **kwargs):
'Echo a command before running it'
log.info(('> ' + list2cmdline(cmd)))
kwargs['shell'] = (sys.platform == 'win32')
return check_call(cmd, *args, **kwargs) | -821,275,233,338,806,500 | Echo a command before running it | setupbase.py | run | bualpha/jupyterlab | python | def run(cmd, *args, **kwargs):
log.info(('> ' + list2cmdline(cmd)))
kwargs['shell'] = (sys.platform == 'win32')
return check_call(cmd, *args, **kwargs) |
def find_packages():
'\n Find all of the packages.\n '
packages = []
for (dir, subdirs, files) in os.walk('jupyterlab'):
if ('node_modules' in subdirs):
subdirs.remove('node_modules')
package = dir.replace(osp.sep, '.')
if ('__init__.py' not in files):
continue
packages.append(package)
return packages | 8,569,758,962,851,079,000 | Find all of the packages. | setupbase.py | find_packages | bualpha/jupyterlab | python | def find_packages():
'\n \n '
packages = []
for (dir, subdirs, files) in os.walk('jupyterlab'):
if ('node_modules' in subdirs):
subdirs.remove('node_modules')
package = dir.replace(osp.sep, '.')
if ('__init__.py' not in files):
continue
packages.append(package)
return packages |
def find_package_data():
'\n Find package_data.\n '
theme_dirs = []
for (dir, subdirs, files) in os.walk(pjoin('jupyterlab', 'themes')):
slice_len = len(('jupyterlab' + os.sep))
theme_dirs.append(pjoin(dir[slice_len:], '*'))
schema_dirs = []
for (dir, subdirs, files) in os.walk(pjoin('jupyterlab', 'schemas')):
slice_len = len(('jupyterlab' + os.sep))
schema_dirs.append(pjoin(dir[slice_len:], '*'))
return {'jupyterlab': ((['build/*', '*.js', 'package.app.json', 'yarn.lock', 'yarn.app.lock', '.yarnrc'] + theme_dirs) + schema_dirs)} | -2,900,111,418,617,660,400 | Find package_data. | setupbase.py | find_package_data | bualpha/jupyterlab | python | def find_package_data():
'\n \n '
theme_dirs = []
for (dir, subdirs, files) in os.walk(pjoin('jupyterlab', 'themes')):
slice_len = len(('jupyterlab' + os.sep))
theme_dirs.append(pjoin(dir[slice_len:], '*'))
schema_dirs = []
for (dir, subdirs, files) in os.walk(pjoin('jupyterlab', 'schemas')):
slice_len = len(('jupyterlab' + os.sep))
schema_dirs.append(pjoin(dir[slice_len:], '*'))
return {'jupyterlab': ((['build/*', '*.js', 'package.app.json', 'yarn.lock', 'yarn.app.lock', '.yarnrc'] + theme_dirs) + schema_dirs)} |
def find_data_files():
'\n Find data_files.\n '
if (not os.path.exists(pjoin('jupyterlab', 'build'))):
return []
files = []
static_files = os.listdir(pjoin('jupyterlab', 'build'))
files.append(('share/jupyter/lab/static', [('jupyterlab/build/%s' % f) for f in static_files]))
for (dir, subdirs, fnames) in os.walk(pjoin('jupyterlab', 'schemas')):
dir = dir.replace(os.sep, '/')
schema_files = []
for fname in fnames:
schema_files.append(('%s/%s' % (dir, fname)))
slice_len = len('jupyterlab/')
files.append((('share/jupyter/lab/%s' % dir[slice_len:]), schema_files))
for (dir, subdirs, fnames) in os.walk(pjoin('jupyterlab', 'themes')):
dir = dir.replace(os.sep, '/')
themes_files = []
for fname in fnames:
themes_files.append(('%s/%s' % (dir, fname)))
slice_len = len('jupyterlab/')
files.append((('share/jupyter/lab/%s' % dir[slice_len:]), themes_files))
return files | -157,557,035,968,105,020 | Find data_files. | setupbase.py | find_data_files | bualpha/jupyterlab | python | def find_data_files():
'\n \n '
if (not os.path.exists(pjoin('jupyterlab', 'build'))):
return []
files = []
static_files = os.listdir(pjoin('jupyterlab', 'build'))
files.append(('share/jupyter/lab/static', [('jupyterlab/build/%s' % f) for f in static_files]))
for (dir, subdirs, fnames) in os.walk(pjoin('jupyterlab', 'schemas')):
dir = dir.replace(os.sep, '/')
schema_files = []
for fname in fnames:
schema_files.append(('%s/%s' % (dir, fname)))
slice_len = len('jupyterlab/')
files.append((('share/jupyter/lab/%s' % dir[slice_len:]), schema_files))
for (dir, subdirs, fnames) in os.walk(pjoin('jupyterlab', 'themes')):
dir = dir.replace(os.sep, '/')
themes_files = []
for fname in fnames:
themes_files.append(('%s/%s' % (dir, fname)))
slice_len = len('jupyterlab/')
files.append((('share/jupyter/lab/%s' % dir[slice_len:]), themes_files))
return files |
def js_prerelease(command, strict=False):
'decorator for building minified js/css prior to another command'
class DecoratedCommand(command):
def run(self):
jsdeps = self.distribution.get_command_obj('jsdeps')
if ((not is_repo) and all((osp.exists(t) for t in jsdeps.targets))):
command.run(self)
return
try:
self.distribution.run_command('jsdeps')
except Exception as e:
missing = [t for t in jsdeps.targets if (not osp.exists(t))]
if (strict or missing):
log.warn('js check failed')
if missing:
log.error(('missing files: %s' % missing))
raise e
else:
log.warn('js check failed (not a problem)')
log.warn(str(e))
command.run(self)
return DecoratedCommand | -9,103,179,315,340,882,000 | decorator for building minified js/css prior to another command | setupbase.py | js_prerelease | bualpha/jupyterlab | python | def js_prerelease(command, strict=False):
class DecoratedCommand(command):
def run(self):
jsdeps = self.distribution.get_command_obj('jsdeps')
if ((not is_repo) and all((osp.exists(t) for t in jsdeps.targets))):
command.run(self)
return
try:
self.distribution.run_command('jsdeps')
except Exception as e:
missing = [t for t in jsdeps.targets if (not osp.exists(t))]
if (strict or missing):
log.warn('js check failed')
if missing:
log.error(('missing files: %s' % missing))
raise e
else:
log.warn('js check failed (not a problem)')
log.warn(str(e))
command.run(self)
return DecoratedCommand |
def update_package_data(distribution):
'update build_py options to get package_data changes'
build_py = distribution.get_command_obj('build_py')
build_py.finalize_options() | -7,966,824,714,045,736,000 | update build_py options to get package_data changes | setupbase.py | update_package_data | bualpha/jupyterlab | python | def update_package_data(distribution):
build_py = distribution.get_command_obj('build_py')
build_py.finalize_options() |
@classmethod
def _find_playlist_info(cls, response):
'\n Finds playlist info (type, id) in HTTP response.\n\n :param response: Response object.\n :returns: Dictionary with type and id.\n '
values = {}
matches = cls._playlist_info_re.search(response.text)
if matches:
values['type'] = matches.group(1)
values['id'] = matches.group(2)
return values | -5,611,080,266,467,536,000 | Finds playlist info (type, id) in HTTP response.
:param response: Response object.
:returns: Dictionary with type and id. | src/streamlink/plugins/ceskatelevize.py | _find_playlist_info | Erk-/streamlink | python | @classmethod
def _find_playlist_info(cls, response):
'\n Finds playlist info (type, id) in HTTP response.\n\n :param response: Response object.\n :returns: Dictionary with type and id.\n '
values = {}
matches = cls._playlist_info_re.search(response.text)
if matches:
values['type'] = matches.group(1)
values['id'] = matches.group(2)
return values |
@classmethod
def _find_player_url(cls, response):
'\n Finds embedded player url in HTTP response.\n\n :param response: Response object.\n :returns: Player url (str).\n '
url = ''
matches = cls._player_re.search(response.text)
if matches:
tmp_url = matches.group(0).replace('&', '&')
if ('hash' not in tmp_url):
matches = cls._hash_re.search(response.text)
if matches:
url = ((tmp_url + '&hash=') + matches.group(1))
else:
url = tmp_url
return ('http://ceskatelevize.cz/' + url) | 9,190,241,898,436,455,000 | Finds embedded player url in HTTP response.
:param response: Response object.
:returns: Player url (str). | src/streamlink/plugins/ceskatelevize.py | _find_player_url | Erk-/streamlink | python | @classmethod
def _find_player_url(cls, response):
'\n Finds embedded player url in HTTP response.\n\n :param response: Response object.\n :returns: Player url (str).\n '
url =
matches = cls._player_re.search(response.text)
if matches:
tmp_url = matches.group(0).replace('&', '&')
if ('hash' not in tmp_url):
matches = cls._hash_re.search(response.text)
if matches:
url = ((tmp_url + '&hash=') + matches.group(1))
else:
url = tmp_url
return ('http://ceskatelevize.cz/' + url) |
def main():
'Main routine'
debug = False
try:
argparser = ArgumentParser(description=modules[__name__].__doc__)
argparser.add_argument('device', nargs='?', default='ftdi:///?', help='serial port device name')
argparser.add_argument('-x', '--hexdump', action='store_true', help='dump EEPROM content as ASCII')
argparser.add_argument('-X', '--hexblock', type=int, help='dump EEPROM as indented hexa blocks')
argparser.add_argument('-i', '--input', type=FileType('rt'), help='input ini file to load EEPROM content')
argparser.add_argument('-l', '--load', default='all', choices=('all', 'raw', 'values'), help='section(s) to load from input file')
argparser.add_argument('-o', '--output', type=FileType('wt'), help='output ini file to save EEPROM content')
argparser.add_argument('-s', '--serial-number', help='set serial number')
argparser.add_argument('-m', '--manufacturer', help='set manufacturer name')
argparser.add_argument('-p', '--product', help='set product name')
argparser.add_argument('-c', '--config', action='append', help='change/configure a property as key=value pair')
argparser.add_argument('-e', '--erase', action='store_true', help='erase the whole EEPROM content')
argparser.add_argument('-u', '--update', action='store_true', help='perform actual update, use w/ care')
argparser.add_argument('-P', '--vidpid', action='append', help='specify a custom VID:PID device ID, may be repeated')
argparser.add_argument('-V', '--virtual', type=FileType('r'), help='use a virtual device, specified as YaML')
argparser.add_argument('-v', '--verbose', action='count', default=0, help='increase verbosity')
argparser.add_argument('-d', '--debug', action='store_true', help='enable debug mode')
args = argparser.parse_args()
debug = args.debug
if (not args.device):
argparser.error('Serial device not specified')
loglevel = max(DEBUG, (ERROR - (10 * args.verbose)))
loglevel = min(ERROR, loglevel)
if debug:
formatter = Formatter('%(asctime)s.%(msecs)03d %(name)-20s %(message)s', '%H:%M:%S')
else:
formatter = Formatter('%(message)s')
FtdiLogger.set_formatter(formatter)
FtdiLogger.set_level(loglevel)
FtdiLogger.log.addHandler(StreamHandler(stderr))
if args.virtual:
from pyftdi.usbtools import UsbTools
UsbTools.BACKENDS = ('pyftdi.tests.backend.usbvirt',)
backend = UsbTools.find_backend()
loader = backend.create_loader()()
loader.load(args.virtual)
try:
add_custom_devices(Ftdi, args.vidpid, force_hex=True)
except ValueError as exc:
argparser.error(str(exc))
eeprom = FtdiEeprom()
eeprom.open(args.device)
if args.erase:
eeprom.erase()
if args.input:
eeprom.load_config(args.input, args.load)
if args.serial_number:
eeprom.set_serial_number(args.serial_number)
if args.manufacturer:
eeprom.set_manufacturer_name(args.manufacturer)
if args.product:
eeprom.set_product_name(args.product)
for conf in (args.config or []):
if (conf == '?'):
helpstr = ', '.join(sorted(eeprom.properties))
print(fill(helpstr, initial_indent=' ', subsequent_indent=' '))
exit(1)
for sep in ':=':
if (sep in conf):
(name, value) = conf.split(sep, 1)
if (not value):
argparser.error(('Configuration %s without value' % conf))
helpio = StringIO()
eeprom.set_property(name, value, helpio)
helpstr = helpio.getvalue()
if helpstr:
print(fill(helpstr, initial_indent=' ', subsequent_indent=' '))
exit(1)
break
else:
argparser.error(('Missing name:value separator in %s' % conf))
if args.hexdump:
print(hexdump(eeprom.data))
if (args.hexblock is not None):
indent = (' ' * args.hexblock)
for pos in range(0, len(eeprom.data), 16):
hexa = ' '.join([('%02x' % x) for x in eeprom.data[pos:(pos + 16)]])
print(indent, hexa, sep='')
if args.update:
if eeprom.commit(False):
eeprom.reset_device()
if (args.verbose > 0):
eeprom.dump_config()
if args.output:
eeprom.save_config(args.output)
except (ImportError, IOError, NotImplementedError, ValueError) as exc:
print(('\nError: %s' % exc), file=stderr)
if debug:
print(format_exc(chain=False), file=stderr)
exit(1)
except KeyboardInterrupt:
exit(2) | 5,579,499,725,529,677,000 | Main routine | bin/ftconf.py | main | andrario/API_Estacao | python | def main():
debug = False
try:
argparser = ArgumentParser(description=modules[__name__].__doc__)
argparser.add_argument('device', nargs='?', default='ftdi:///?', help='serial port device name')
argparser.add_argument('-x', '--hexdump', action='store_true', help='dump EEPROM content as ASCII')
argparser.add_argument('-X', '--hexblock', type=int, help='dump EEPROM as indented hexa blocks')
argparser.add_argument('-i', '--input', type=FileType('rt'), help='input ini file to load EEPROM content')
argparser.add_argument('-l', '--load', default='all', choices=('all', 'raw', 'values'), help='section(s) to load from input file')
argparser.add_argument('-o', '--output', type=FileType('wt'), help='output ini file to save EEPROM content')
argparser.add_argument('-s', '--serial-number', help='set serial number')
argparser.add_argument('-m', '--manufacturer', help='set manufacturer name')
argparser.add_argument('-p', '--product', help='set product name')
argparser.add_argument('-c', '--config', action='append', help='change/configure a property as key=value pair')
argparser.add_argument('-e', '--erase', action='store_true', help='erase the whole EEPROM content')
argparser.add_argument('-u', '--update', action='store_true', help='perform actual update, use w/ care')
argparser.add_argument('-P', '--vidpid', action='append', help='specify a custom VID:PID device ID, may be repeated')
argparser.add_argument('-V', '--virtual', type=FileType('r'), help='use a virtual device, specified as YaML')
argparser.add_argument('-v', '--verbose', action='count', default=0, help='increase verbosity')
argparser.add_argument('-d', '--debug', action='store_true', help='enable debug mode')
args = argparser.parse_args()
debug = args.debug
if (not args.device):
argparser.error('Serial device not specified')
loglevel = max(DEBUG, (ERROR - (10 * args.verbose)))
loglevel = min(ERROR, loglevel)
if debug:
formatter = Formatter('%(asctime)s.%(msecs)03d %(name)-20s %(message)s', '%H:%M:%S')
else:
formatter = Formatter('%(message)s')
FtdiLogger.set_formatter(formatter)
FtdiLogger.set_level(loglevel)
FtdiLogger.log.addHandler(StreamHandler(stderr))
if args.virtual:
from pyftdi.usbtools import UsbTools
UsbTools.BACKENDS = ('pyftdi.tests.backend.usbvirt',)
backend = UsbTools.find_backend()
loader = backend.create_loader()()
loader.load(args.virtual)
try:
add_custom_devices(Ftdi, args.vidpid, force_hex=True)
except ValueError as exc:
argparser.error(str(exc))
eeprom = FtdiEeprom()
eeprom.open(args.device)
if args.erase:
eeprom.erase()
if args.input:
eeprom.load_config(args.input, args.load)
if args.serial_number:
eeprom.set_serial_number(args.serial_number)
if args.manufacturer:
eeprom.set_manufacturer_name(args.manufacturer)
if args.product:
eeprom.set_product_name(args.product)
for conf in (args.config or []):
if (conf == '?'):
helpstr = ', '.join(sorted(eeprom.properties))
print(fill(helpstr, initial_indent=' ', subsequent_indent=' '))
exit(1)
for sep in ':=':
if (sep in conf):
(name, value) = conf.split(sep, 1)
if (not value):
argparser.error(('Configuration %s without value' % conf))
helpio = StringIO()
eeprom.set_property(name, value, helpio)
helpstr = helpio.getvalue()
if helpstr:
print(fill(helpstr, initial_indent=' ', subsequent_indent=' '))
exit(1)
break
else:
argparser.error(('Missing name:value separator in %s' % conf))
if args.hexdump:
print(hexdump(eeprom.data))
if (args.hexblock is not None):
indent = (' ' * args.hexblock)
for pos in range(0, len(eeprom.data), 16):
hexa = ' '.join([('%02x' % x) for x in eeprom.data[pos:(pos + 16)]])
print(indent, hexa, sep=)
if args.update:
if eeprom.commit(False):
eeprom.reset_device()
if (args.verbose > 0):
eeprom.dump_config()
if args.output:
eeprom.save_config(args.output)
except (ImportError, IOError, NotImplementedError, ValueError) as exc:
print(('\nError: %s' % exc), file=stderr)
if debug:
print(format_exc(chain=False), file=stderr)
exit(1)
except KeyboardInterrupt:
exit(2) |
def _analyze_result_columns(self, query: Query):
'Given info on a list of SELECTs, determine whether to warn.'
if (not query.selectables):
return
for selectable in query.selectables:
self.logger.debug(f'Analyzing query: {selectable.selectable.raw}')
for wildcard in selectable.get_wildcard_info():
if wildcard.tables:
for wildcard_table in wildcard.tables:
self.logger.debug(f'Wildcard: {wildcard.segment.raw} has target {{wildcard_table}}')
alias_info = selectable.find_alias(wildcard_table)
if alias_info:
self._handle_alias(selectable, alias_info, query)
else:
cte = query.lookup_cte(wildcard_table)
if cte:
self._analyze_result_columns(cte)
else:
self.logger.debug(f'Query target {wildcard_table} is external. Generating warning.')
raise RuleFailure(selectable.selectable)
else:
query_list = SelectCrawler.get(query, query.selectables[0].selectable)
for o in query_list:
if isinstance(o, Query):
self._analyze_result_columns(o)
return
self.logger.debug(f'Query target "{query.selectables[0].selectable.raw}" has no targets. Generating warning.')
raise RuleFailure(query.selectables[0].selectable) | -8,042,192,610,915,090,000 | Given info on a list of SELECTs, determine whether to warn. | src/sqlfluff/rules/L044.py | _analyze_result_columns | R7L208/sqlfluff | python | def _analyze_result_columns(self, query: Query):
if (not query.selectables):
return
for selectable in query.selectables:
self.logger.debug(f'Analyzing query: {selectable.selectable.raw}')
for wildcard in selectable.get_wildcard_info():
if wildcard.tables:
for wildcard_table in wildcard.tables:
self.logger.debug(f'Wildcard: {wildcard.segment.raw} has target {{wildcard_table}}')
alias_info = selectable.find_alias(wildcard_table)
if alias_info:
self._handle_alias(selectable, alias_info, query)
else:
cte = query.lookup_cte(wildcard_table)
if cte:
self._analyze_result_columns(cte)
else:
self.logger.debug(f'Query target {wildcard_table} is external. Generating warning.')
raise RuleFailure(selectable.selectable)
else:
query_list = SelectCrawler.get(query, query.selectables[0].selectable)
for o in query_list:
if isinstance(o, Query):
self._analyze_result_columns(o)
return
self.logger.debug(f'Query target "{query.selectables[0].selectable.raw}" has no targets. Generating warning.')
raise RuleFailure(query.selectables[0].selectable) |
def _eval(self, context: RuleContext) -> Optional[LintResult]:
'Outermost query should produce known number of columns.'
start_types = ['select_statement', 'set_expression', 'with_compound_statement']
if (context.segment.is_type(*start_types) and (not context.functional.parent_stack.any(sp.is_type(*start_types)))):
crawler = SelectCrawler(context.segment, context.dialect)
if crawler.query_tree:
try:
return self._analyze_result_columns(crawler.query_tree)
except RuleFailure as e:
return LintResult(anchor=e.anchor)
return None | -4,919,894,665,587,562,000 | Outermost query should produce known number of columns. | src/sqlfluff/rules/L044.py | _eval | R7L208/sqlfluff | python | def _eval(self, context: RuleContext) -> Optional[LintResult]:
start_types = ['select_statement', 'set_expression', 'with_compound_statement']
if (context.segment.is_type(*start_types) and (not context.functional.parent_stack.any(sp.is_type(*start_types)))):
crawler = SelectCrawler(context.segment, context.dialect)
if crawler.query_tree:
try:
return self._analyze_result_columns(crawler.query_tree)
except RuleFailure as e:
return LintResult(anchor=e.anchor)
return None |
def imread(filename, *args, **kwargs):
'Read and decode an image to an NDArray.\n\n Note: `imread` uses OpenCV (not the CV2 Python library).\n MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.\n\n Parameters\n ----------\n filename : str\n Name of the image file to be loaded.\n flag : {0, 1}, default 1\n 1 for three channel color output. 0 for grayscale output.\n to_rgb : bool, default True\n True for RGB formatted output (MXNet default).\n False for BGR formatted output (OpenCV default).\n out : NDArray, optional\n Output buffer. Use `None` for automatic allocation.\n\n Returns\n -------\n NDArray\n An `NDArray` containing the image.\n\n Example\n -------\n >>> mx.img.imread("flower.jpg")\n <NDArray 224x224x3 @cpu(0)>\n\n Set `flag` parameter to 0 to get grayscale output\n\n >>> mx.img.imread("flower.jpg", flag=0)\n <NDArray 224x224x1 @cpu(0)>\n\n Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)\n\n >>> mx.img.imread("flower.jpg", to_rgb=0)\n <NDArray 224x224x3 @cpu(0)>\n '
return _internal._cvimread(filename, *args, **kwargs) | 5,817,983,858,285,566,000 | Read and decode an image to an NDArray.
Note: `imread` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
filename : str
Name of the image file to be loaded.
flag : {0, 1}, default 1
1 for three channel color output. 0 for grayscale output.
to_rgb : bool, default True
True for RGB formatted output (MXNet default).
False for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> mx.img.imread("flower.jpg")
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> mx.img.imread("flower.jpg", flag=0)
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> mx.img.imread("flower.jpg", to_rgb=0)
<NDArray 224x224x3 @cpu(0)> | python/mxnet/image/image.py | imread | Vikas89/private-mxnet | python | def imread(filename, *args, **kwargs):
'Read and decode an image to an NDArray.\n\n Note: `imread` uses OpenCV (not the CV2 Python library).\n MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.\n\n Parameters\n ----------\n filename : str\n Name of the image file to be loaded.\n flag : {0, 1}, default 1\n 1 for three channel color output. 0 for grayscale output.\n to_rgb : bool, default True\n True for RGB formatted output (MXNet default).\n False for BGR formatted output (OpenCV default).\n out : NDArray, optional\n Output buffer. Use `None` for automatic allocation.\n\n Returns\n -------\n NDArray\n An `NDArray` containing the image.\n\n Example\n -------\n >>> mx.img.imread("flower.jpg")\n <NDArray 224x224x3 @cpu(0)>\n\n Set `flag` parameter to 0 to get grayscale output\n\n >>> mx.img.imread("flower.jpg", flag=0)\n <NDArray 224x224x1 @cpu(0)>\n\n Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)\n\n >>> mx.img.imread("flower.jpg", to_rgb=0)\n <NDArray 224x224x3 @cpu(0)>\n '
return _internal._cvimread(filename, *args, **kwargs) |
def imdecode(buf, *args, **kwargs):
'Decode an image to an NDArray.\n\n Note: `imdecode` uses OpenCV (not the CV2 Python library).\n MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.\n\n Parameters\n ----------\n buf : str/bytes or numpy.ndarray\n Binary image data as string or numpy ndarray.\n flag : int, optional, default=1\n 1 for three channel color output. 0 for grayscale output.\n to_rgb : int, optional, default=1\n 1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).\n out : NDArray, optional\n Output buffer. Use `None` for automatic allocation.\n\n Returns\n -------\n NDArray\n An `NDArray` containing the image.\n\n Example\n -------\n >>> with open("flower.jpg", \'rb\') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image)\n >>> image\n <NDArray 224x224x3 @cpu(0)>\n\n Set `flag` parameter to 0 to get grayscale output\n\n >>> with open("flower.jpg", \'rb\') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image, flag=0)\n >>> image\n <NDArray 224x224x1 @cpu(0)>\n\n Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)\n\n >>> with open("flower.jpg", \'rb\') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image, to_rgb=0)\n >>> image\n <NDArray 224x224x3 @cpu(0)>\n '
if (not isinstance(buf, nd.NDArray)):
buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8)
return _internal._cvimdecode(buf, *args, **kwargs) | -3,713,905,794,887,272,400 | Decode an image to an NDArray.
Note: `imdecode` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
buf : str/bytes or numpy.ndarray
Binary image data as string or numpy ndarray.
flag : int, optional, default=1
1 for three channel color output. 0 for grayscale output.
to_rgb : int, optional, default=1
1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, flag=0)
>>> image
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, to_rgb=0)
>>> image
<NDArray 224x224x3 @cpu(0)> | python/mxnet/image/image.py | imdecode | Vikas89/private-mxnet | python | def imdecode(buf, *args, **kwargs):
'Decode an image to an NDArray.\n\n Note: `imdecode` uses OpenCV (not the CV2 Python library).\n MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.\n\n Parameters\n ----------\n buf : str/bytes or numpy.ndarray\n Binary image data as string or numpy ndarray.\n flag : int, optional, default=1\n 1 for three channel color output. 0 for grayscale output.\n to_rgb : int, optional, default=1\n 1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).\n out : NDArray, optional\n Output buffer. Use `None` for automatic allocation.\n\n Returns\n -------\n NDArray\n An `NDArray` containing the image.\n\n Example\n -------\n >>> with open("flower.jpg", \'rb\') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image)\n >>> image\n <NDArray 224x224x3 @cpu(0)>\n\n Set `flag` parameter to 0 to get grayscale output\n\n >>> with open("flower.jpg", \'rb\') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image, flag=0)\n >>> image\n <NDArray 224x224x1 @cpu(0)>\n\n Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)\n\n >>> with open("flower.jpg", \'rb\') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image, to_rgb=0)\n >>> image\n <NDArray 224x224x3 @cpu(0)>\n '
if (not isinstance(buf, nd.NDArray)):
buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8)
return _internal._cvimdecode(buf, *args, **kwargs) |
def scale_down(src_size, size):
"Scales down crop size if it's larger than image size.\n\n If width/height of the crop is larger than the width/height of the image,\n sets the width/height to the width/height of the image.\n\n Parameters\n ----------\n src_size : tuple of int\n Size of the image in (width, height) format.\n size : tuple of int\n Size of the crop in (width, height) format.\n\n Returns\n -------\n tuple of int\n A tuple containing the scaled crop size in (width, height) format.\n\n Example\n --------\n >>> src_size = (640,480)\n >>> size = (720,120)\n >>> new_size = mx.img.scale_down(src_size, size)\n >>> new_size\n (640,106)\n "
(w, h) = size
(sw, sh) = src_size
if (sh < h):
(w, h) = ((float((w * sh)) / h), sh)
if (sw < w):
(w, h) = (sw, (float((h * sw)) / w))
return (int(w), int(h)) | -209,325,311,614,370,940 | Scales down crop size if it's larger than image size.
If width/height of the crop is larger than the width/height of the image,
sets the width/height to the width/height of the image.
Parameters
----------
src_size : tuple of int
Size of the image in (width, height) format.
size : tuple of int
Size of the crop in (width, height) format.
Returns
-------
tuple of int
A tuple containing the scaled crop size in (width, height) format.
Example
--------
>>> src_size = (640,480)
>>> size = (720,120)
>>> new_size = mx.img.scale_down(src_size, size)
>>> new_size
(640,106) | python/mxnet/image/image.py | scale_down | Vikas89/private-mxnet | python | def scale_down(src_size, size):
"Scales down crop size if it's larger than image size.\n\n If width/height of the crop is larger than the width/height of the image,\n sets the width/height to the width/height of the image.\n\n Parameters\n ----------\n src_size : tuple of int\n Size of the image in (width, height) format.\n size : tuple of int\n Size of the crop in (width, height) format.\n\n Returns\n -------\n tuple of int\n A tuple containing the scaled crop size in (width, height) format.\n\n Example\n --------\n >>> src_size = (640,480)\n >>> size = (720,120)\n >>> new_size = mx.img.scale_down(src_size, size)\n >>> new_size\n (640,106)\n "
(w, h) = size
(sw, sh) = src_size
if (sh < h):
(w, h) = ((float((w * sh)) / h), sh)
if (sw < w):
(w, h) = (sw, (float((h * sw)) / w))
return (int(w), int(h)) |
def _get_interp_method(interp, sizes=()):
'Get the interpolation method for resize functions.\n The major purpose of this function is to wrap a random interp method selection\n and a auto-estimation method.\n\n Parameters\n ----------\n interp : int\n interpolation method for all resizing operations\n\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n More details can be found in the documentation of OpenCV, please refer to\n http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.\n sizes : tuple of int\n (old_height, old_width, new_height, new_width), if None provided, auto(9)\n will return Area(2) anyway.\n\n Returns\n -------\n int\n interp method from 0 to 4\n '
if (interp == 9):
if sizes:
assert (len(sizes) == 4)
(oh, ow, nh, nw) = sizes
if ((nh > oh) and (nw > ow)):
return 2
elif ((nh < oh) and (nw < ow)):
return 3
else:
return 1
else:
return 2
if (interp == 10):
return random.randint(0, 4)
if (interp not in (0, 1, 2, 3, 4)):
raise ValueError(('Unknown interp method %d' % interp))
return interp | 4,209,777,761,857,167,000 | Get the interpolation method for resize functions.
The major purpose of this function is to wrap a random interp method selection
and a auto-estimation method.
Parameters
----------
interp : int
interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
sizes : tuple of int
(old_height, old_width, new_height, new_width), if None provided, auto(9)
will return Area(2) anyway.
Returns
-------
int
interp method from 0 to 4 | python/mxnet/image/image.py | _get_interp_method | Vikas89/private-mxnet | python | def _get_interp_method(interp, sizes=()):
'Get the interpolation method for resize functions.\n The major purpose of this function is to wrap a random interp method selection\n and a auto-estimation method.\n\n Parameters\n ----------\n interp : int\n interpolation method for all resizing operations\n\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n More details can be found in the documentation of OpenCV, please refer to\n http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.\n sizes : tuple of int\n (old_height, old_width, new_height, new_width), if None provided, auto(9)\n will return Area(2) anyway.\n\n Returns\n -------\n int\n interp method from 0 to 4\n '
if (interp == 9):
if sizes:
assert (len(sizes) == 4)
(oh, ow, nh, nw) = sizes
if ((nh > oh) and (nw > ow)):
return 2
elif ((nh < oh) and (nw < ow)):
return 3
else:
return 1
else:
return 2
if (interp == 10):
return random.randint(0, 4)
if (interp not in (0, 1, 2, 3, 4)):
raise ValueError(('Unknown interp method %d' % interp))
return interp |
def resize_short(src, size, interp=2):
'Resizes shorter edge to size.\n\n Note: `resize_short` uses OpenCV (not the CV2 Python library).\n MXNet must have been built with OpenCV for `resize_short` to work.\n\n Resizes the original image by setting the shorter edge to size\n and setting the longer edge accordingly.\n Resizing function is called from OpenCV.\n\n Parameters\n ----------\n src : NDArray\n The original image.\n size : int\n The length to be set for the shorter edge.\n interp : int, optional, default=2\n Interpolation method used for resizing the image.\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n More details can be found in the documentation of OpenCV, please refer to\n http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.\n\n Returns\n -------\n NDArray\n An \'NDArray\' containing the resized image.\n\n Example\n -------\n >>> with open("flower.jpeg", \'rb\') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image)\n >>> image\n <NDArray 2321x3482x3 @cpu(0)>\n >>> size = 640\n >>> new_image = mx.img.resize_short(image, size)\n >>> new_image\n <NDArray 2321x3482x3 @cpu(0)>\n '
(h, w, _) = src.shape
if (h > w):
(new_h, new_w) = (((size * h) // w), size)
else:
(new_h, new_w) = (size, ((size * w) // h))
return imresize(src, new_w, new_h, interp=_get_interp_method(interp, (h, w, new_h, new_w))) | -4,620,161,702,122,637,000 | Resizes shorter edge to size.
Note: `resize_short` uses OpenCV (not the CV2 Python library).
MXNet must have been built with OpenCV for `resize_short` to work.
Resizes the original image by setting the shorter edge to size
and setting the longer edge accordingly.
Resizing function is called from OpenCV.
Parameters
----------
src : NDArray
The original image.
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method used for resizing the image.
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
Returns
-------
NDArray
An 'NDArray' containing the resized image.
Example
-------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> size = 640
>>> new_image = mx.img.resize_short(image, size)
>>> new_image
<NDArray 2321x3482x3 @cpu(0)> | python/mxnet/image/image.py | resize_short | Vikas89/private-mxnet | python | def resize_short(src, size, interp=2):
'Resizes shorter edge to size.\n\n Note: `resize_short` uses OpenCV (not the CV2 Python library).\n MXNet must have been built with OpenCV for `resize_short` to work.\n\n Resizes the original image by setting the shorter edge to size\n and setting the longer edge accordingly.\n Resizing function is called from OpenCV.\n\n Parameters\n ----------\n src : NDArray\n The original image.\n size : int\n The length to be set for the shorter edge.\n interp : int, optional, default=2\n Interpolation method used for resizing the image.\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n More details can be found in the documentation of OpenCV, please refer to\n http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.\n\n Returns\n -------\n NDArray\n An \'NDArray\' containing the resized image.\n\n Example\n -------\n >>> with open("flower.jpeg", \'rb\') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image)\n >>> image\n <NDArray 2321x3482x3 @cpu(0)>\n >>> size = 640\n >>> new_image = mx.img.resize_short(image, size)\n >>> new_image\n <NDArray 2321x3482x3 @cpu(0)>\n '
(h, w, _) = src.shape
if (h > w):
(new_h, new_w) = (((size * h) // w), size)
else:
(new_h, new_w) = (size, ((size * w) // h))
return imresize(src, new_w, new_h, interp=_get_interp_method(interp, (h, w, new_h, new_w))) |
def fixed_crop(src, x0, y0, w, h, size=None, interp=2):
'Crop src at fixed location, and (optionally) resize it to size.\n\n Parameters\n ----------\n src : NDArray\n Input image\n x0 : int\n Left boundary of the cropping area\n y0 : int\n Top boundary of the cropping area\n w : int\n Width of the cropping area\n h : int\n Height of the cropping area\n size : tuple of (w, h)\n Optional, resize to new size after cropping\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n\n Returns\n -------\n NDArray\n An `NDArray` containing the cropped image.\n '
out = nd.crop(src, begin=(y0, x0, 0), end=((y0 + h), (x0 + w), int(src.shape[2])))
if ((size is not None) and ((w, h) != size)):
sizes = (h, w, size[1], size[0])
out = imresize(out, *size, interp=_get_interp_method(interp, sizes))
return out | -4,619,619,017,581,396,000 | Crop src at fixed location, and (optionally) resize it to size.
Parameters
----------
src : NDArray
Input image
x0 : int
Left boundary of the cropping area
y0 : int
Top boundary of the cropping area
w : int
Width of the cropping area
h : int
Height of the cropping area
size : tuple of (w, h)
Optional, resize to new size after cropping
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image. | python/mxnet/image/image.py | fixed_crop | Vikas89/private-mxnet | python | def fixed_crop(src, x0, y0, w, h, size=None, interp=2):
'Crop src at fixed location, and (optionally) resize it to size.\n\n Parameters\n ----------\n src : NDArray\n Input image\n x0 : int\n Left boundary of the cropping area\n y0 : int\n Top boundary of the cropping area\n w : int\n Width of the cropping area\n h : int\n Height of the cropping area\n size : tuple of (w, h)\n Optional, resize to new size after cropping\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n\n Returns\n -------\n NDArray\n An `NDArray` containing the cropped image.\n '
out = nd.crop(src, begin=(y0, x0, 0), end=((y0 + h), (x0 + w), int(src.shape[2])))
if ((size is not None) and ((w, h) != size)):
sizes = (h, w, size[1], size[0])
out = imresize(out, *size, interp=_get_interp_method(interp, sizes))
return out |
def random_crop(src, size, interp=2):
'Randomly crop `src` with `size` (width, height).\n Upsample result if `src` is smaller than `size`.\n\n Parameters\n ----------\n src: Source image `NDArray`\n size: Size of the crop formatted as (width, height). If the `size` is larger\n than the image, then the source image is upsampled to `size` and returned.\n interp: int, optional, default=2\n Interpolation method. See resize_short for details.\n Returns\n -------\n NDArray\n An `NDArray` containing the cropped image.\n Tuple\n A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the\n original image and (width, height) are the dimensions of the cropped image.\n\n Example\n -------\n >>> im = mx.nd.array(cv2.imread("flower.jpg"))\n >>> cropped_im, rect = mx.image.random_crop(im, (100, 100))\n >>> print cropped_im\n <NDArray 100x100x1 @cpu(0)>\n >>> print rect\n (20, 21, 100, 100)\n '
(h, w, _) = src.shape
(new_w, new_h) = scale_down((w, h), size)
x0 = random.randint(0, (w - new_w))
y0 = random.randint(0, (h - new_h))
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return (out, (x0, y0, new_w, new_h)) | -4,275,306,880,614,285,000 | Randomly crop `src` with `size` (width, height).
Upsample result if `src` is smaller than `size`.
Parameters
----------
src: Source image `NDArray`
size: Size of the crop formatted as (width, height). If the `size` is larger
than the image, then the source image is upsampled to `size` and returned.
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
Example
-------
>>> im = mx.nd.array(cv2.imread("flower.jpg"))
>>> cropped_im, rect = mx.image.random_crop(im, (100, 100))
>>> print cropped_im
<NDArray 100x100x1 @cpu(0)>
>>> print rect
(20, 21, 100, 100) | python/mxnet/image/image.py | random_crop | Vikas89/private-mxnet | python | def random_crop(src, size, interp=2):
'Randomly crop `src` with `size` (width, height).\n Upsample result if `src` is smaller than `size`.\n\n Parameters\n ----------\n src: Source image `NDArray`\n size: Size of the crop formatted as (width, height). If the `size` is larger\n than the image, then the source image is upsampled to `size` and returned.\n interp: int, optional, default=2\n Interpolation method. See resize_short for details.\n Returns\n -------\n NDArray\n An `NDArray` containing the cropped image.\n Tuple\n A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the\n original image and (width, height) are the dimensions of the cropped image.\n\n Example\n -------\n >>> im = mx.nd.array(cv2.imread("flower.jpg"))\n >>> cropped_im, rect = mx.image.random_crop(im, (100, 100))\n >>> print cropped_im\n <NDArray 100x100x1 @cpu(0)>\n >>> print rect\n (20, 21, 100, 100)\n '
(h, w, _) = src.shape
(new_w, new_h) = scale_down((w, h), size)
x0 = random.randint(0, (w - new_w))
y0 = random.randint(0, (h - new_h))
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return (out, (x0, y0, new_w, new_h)) |
def center_crop(src, size, interp=2):
'Crops the image `src` to the given `size` by trimming on all four\n sides and preserving the center of the image. Upsamples if `src` is smaller\n than `size`.\n\n .. note:: This requires MXNet to be compiled with USE_OPENCV.\n\n Parameters\n ----------\n src : NDArray\n Binary source image data.\n size : list or tuple of int\n The desired output image size.\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n\n Returns\n -------\n NDArray\n The cropped image.\n Tuple\n (x, y, width, height) where x, y are the positions of the crop in the\n original image and width, height the dimensions of the crop.\n\n Example\n -------\n >>> with open("flower.jpg", \'rb\') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.image.imdecode(str_image)\n >>> image\n <NDArray 2321x3482x3 @cpu(0)>\n >>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))\n >>> cropped_image\n <NDArray 500x1000x3 @cpu(0)>\n >>> x, y, width, height\n (1241, 910, 1000, 500)\n '
(h, w, _) = src.shape
(new_w, new_h) = scale_down((w, h), size)
x0 = int(((w - new_w) / 2))
y0 = int(((h - new_h) / 2))
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return (out, (x0, y0, new_w, new_h)) | 6,517,003,938,661,321,000 | Crops the image `src` to the given `size` by trimming on all four
sides and preserving the center of the image. Upsamples if `src` is smaller
than `size`.
.. note:: This requires MXNet to be compiled with USE_OPENCV.
Parameters
----------
src : NDArray
Binary source image data.
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
The cropped image.
Tuple
(x, y, width, height) where x, y are the positions of the crop in the
original image and width, height the dimensions of the crop.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.image.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))
>>> cropped_image
<NDArray 500x1000x3 @cpu(0)>
>>> x, y, width, height
(1241, 910, 1000, 500) | python/mxnet/image/image.py | center_crop | Vikas89/private-mxnet | python | def center_crop(src, size, interp=2):
'Crops the image `src` to the given `size` by trimming on all four\n sides and preserving the center of the image. Upsamples if `src` is smaller\n than `size`.\n\n .. note:: This requires MXNet to be compiled with USE_OPENCV.\n\n Parameters\n ----------\n src : NDArray\n Binary source image data.\n size : list or tuple of int\n The desired output image size.\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n\n Returns\n -------\n NDArray\n The cropped image.\n Tuple\n (x, y, width, height) where x, y are the positions of the crop in the\n original image and width, height the dimensions of the crop.\n\n Example\n -------\n >>> with open("flower.jpg", \'rb\') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.image.imdecode(str_image)\n >>> image\n <NDArray 2321x3482x3 @cpu(0)>\n >>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))\n >>> cropped_image\n <NDArray 500x1000x3 @cpu(0)>\n >>> x, y, width, height\n (1241, 910, 1000, 500)\n '
(h, w, _) = src.shape
(new_w, new_h) = scale_down((w, h), size)
x0 = int(((w - new_w) / 2))
y0 = int(((h - new_h) / 2))
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return (out, (x0, y0, new_w, new_h)) |
def color_normalize(src, mean, std=None):
'Normalize src with mean and std.\n\n Parameters\n ----------\n src : NDArray\n Input image\n mean : NDArray\n RGB mean to be subtracted\n std : NDArray\n RGB standard deviation to be divided\n\n Returns\n -------\n NDArray\n An `NDArray` containing the normalized image.\n '
if (mean is not None):
src -= mean
if (std is not None):
src /= std
return src | 1,318,848,124,312,677,400 | Normalize src with mean and std.
Parameters
----------
src : NDArray
Input image
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
Returns
-------
NDArray
An `NDArray` containing the normalized image. | python/mxnet/image/image.py | color_normalize | Vikas89/private-mxnet | python | def color_normalize(src, mean, std=None):
'Normalize src with mean and std.\n\n Parameters\n ----------\n src : NDArray\n Input image\n mean : NDArray\n RGB mean to be subtracted\n std : NDArray\n RGB standard deviation to be divided\n\n Returns\n -------\n NDArray\n An `NDArray` containing the normalized image.\n '
if (mean is not None):
src -= mean
if (std is not None):
src /= std
return src |
def random_size_crop(src, size, area, ratio, interp=2, **kwargs):
'Randomly crop src with size. Randomize area and aspect ratio.\n\n Parameters\n ----------\n src : NDArray\n Input image\n size : tuple of (int, int)\n Size of the crop formatted as (width, height).\n area : float in (0, 1] or tuple of (float, float)\n If tuple, minimum area and maximum area to be maintained after cropping\n If float, minimum area to be maintained after cropping, maximum area is set to 1.0\n ratio : tuple of (float, float)\n Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)\n interp: int, optional, default=2\n Interpolation method. See resize_short for details.\n Returns\n -------\n NDArray\n An `NDArray` containing the cropped image.\n Tuple\n A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the\n original image and (width, height) are the dimensions of the cropped image.\n\n '
(h, w, _) = src.shape
src_area = (h * w)
if ('min_area' in kwargs):
warnings.warn('`min_area` is deprecated. Please use `area` instead.', DeprecationWarning)
area = kwargs.pop('min_area')
assert (not kwargs), 'unexpected keyword arguments for `random_size_crop`.'
if isinstance(area, numeric_types):
area = (area, 1.0)
for _ in range(10):
target_area = (random.uniform(area[0], area[1]) * src_area)
new_ratio = random.uniform(*ratio)
new_w = int(round(np.sqrt((target_area * new_ratio))))
new_h = int(round(np.sqrt((target_area / new_ratio))))
if (random.random() < 0.5):
(new_h, new_w) = (new_w, new_h)
if ((new_w <= w) and (new_h <= h)):
x0 = random.randint(0, (w - new_w))
y0 = random.randint(0, (h - new_h))
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return (out, (x0, y0, new_w, new_h))
return center_crop(src, size, interp) | 6,472,939,180,749,413,000 | Randomly crop src with size. Randomize area and aspect ratio.
Parameters
----------
src : NDArray
Input image
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image. | python/mxnet/image/image.py | random_size_crop | Vikas89/private-mxnet | python | def random_size_crop(src, size, area, ratio, interp=2, **kwargs):
'Randomly crop src with size. Randomize area and aspect ratio.\n\n Parameters\n ----------\n src : NDArray\n Input image\n size : tuple of (int, int)\n Size of the crop formatted as (width, height).\n area : float in (0, 1] or tuple of (float, float)\n If tuple, minimum area and maximum area to be maintained after cropping\n If float, minimum area to be maintained after cropping, maximum area is set to 1.0\n ratio : tuple of (float, float)\n Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)\n interp: int, optional, default=2\n Interpolation method. See resize_short for details.\n Returns\n -------\n NDArray\n An `NDArray` containing the cropped image.\n Tuple\n A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the\n original image and (width, height) are the dimensions of the cropped image.\n\n '
(h, w, _) = src.shape
src_area = (h * w)
if ('min_area' in kwargs):
warnings.warn('`min_area` is deprecated. Please use `area` instead.', DeprecationWarning)
area = kwargs.pop('min_area')
assert (not kwargs), 'unexpected keyword arguments for `random_size_crop`.'
if isinstance(area, numeric_types):
area = (area, 1.0)
for _ in range(10):
target_area = (random.uniform(area[0], area[1]) * src_area)
new_ratio = random.uniform(*ratio)
new_w = int(round(np.sqrt((target_area * new_ratio))))
new_h = int(round(np.sqrt((target_area / new_ratio))))
if (random.random() < 0.5):
(new_h, new_w) = (new_w, new_h)
if ((new_w <= w) and (new_h <= h)):
x0 = random.randint(0, (w - new_w))
y0 = random.randint(0, (h - new_h))
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return (out, (x0, y0, new_w, new_h))
return center_crop(src, size, interp) |
def CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False, mean=None, std=None, brightness=0, contrast=0, saturation=0, hue=0, pca_noise=0, rand_gray=0, inter_method=2):
'Creates an augmenter list.\n\n Parameters\n ----------\n data_shape : tuple of int\n Shape for output data\n resize : int\n Resize shorter edge if larger than 0 at the begining\n rand_crop : bool\n Whether to enable random cropping other than center crop\n rand_resize : bool\n Whether to enable random sized cropping, require rand_crop to be enabled\n rand_gray : float\n [0, 1], probability to convert to grayscale for all channels, the number\n of channels will not be reduced to 1\n rand_mirror : bool\n Whether to apply horizontal flip to image with probability 0.5\n mean : np.ndarray or None\n Mean pixel values for [r, g, b]\n std : np.ndarray or None\n Standard deviations for [r, g, b]\n brightness : float\n Brightness jittering range (percent)\n contrast : float\n Contrast jittering range (percent)\n saturation : float\n Saturation jittering range (percent)\n hue : float\n Hue jittering range (percent)\n pca_noise : float\n Pca noise level (percent)\n inter_method : int, default=2(Area-based)\n Interpolation method for all resizing operations\n\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n\n Examples\n --------\n >>> # An example of creating multiple augmenters\n >>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,\n ... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,\n ... saturation=0.125, pca_noise=0.05, inter_method=10)\n >>> # dump the details\n >>> for aug in augs:\n ... aug.dumps()\n '
auglist = []
if (resize > 0):
auglist.append(ResizeAug(resize, inter_method))
crop_size = (data_shape[2], data_shape[1])
if rand_resize:
assert rand_crop
auglist.append(RandomSizedCropAug(crop_size, 0.08, ((3.0 / 4.0), (4.0 / 3.0)), inter_method))
elif rand_crop:
auglist.append(RandomCropAug(crop_size, inter_method))
else:
auglist.append(CenterCropAug(crop_size, inter_method))
if rand_mirror:
auglist.append(HorizontalFlipAug(0.5))
auglist.append(CastAug())
if (brightness or contrast or saturation):
auglist.append(ColorJitterAug(brightness, contrast, saturation))
if hue:
auglist.append(HueJitterAug(hue))
if (pca_noise > 0):
eigval = np.array([55.46, 4.794, 1.148])
eigvec = np.array([[(- 0.5675), 0.7192, 0.4009], [(- 0.5808), (- 0.0045), (- 0.814)], [(- 0.5836), (- 0.6948), 0.4203]])
auglist.append(LightingAug(pca_noise, eigval, eigvec))
if (rand_gray > 0):
auglist.append(RandomGrayAug(rand_gray))
if (mean is True):
mean = nd.array([123.68, 116.28, 103.53])
elif (mean is not None):
assert (isinstance(mean, (np.ndarray, nd.NDArray)) and (mean.shape[0] in [1, 3]))
if (std is True):
std = nd.array([58.395, 57.12, 57.375])
elif (std is not None):
assert (isinstance(std, (np.ndarray, nd.NDArray)) and (std.shape[0] in [1, 3]))
if ((mean is not None) or (std is not None)):
auglist.append(ColorNormalizeAug(mean, std))
return auglist | 1,781,855,416,623,279,600 | Creates an augmenter list.
Parameters
----------
data_shape : tuple of int
Shape for output data
resize : int
Resize shorter edge if larger than 0 at the begining
rand_crop : bool
Whether to enable random cropping other than center crop
rand_resize : bool
Whether to enable random sized cropping, require rand_crop to be enabled
rand_gray : float
[0, 1], probability to convert to grayscale for all channels, the number
of channels will not be reduced to 1
rand_mirror : bool
Whether to apply horizontal flip to image with probability 0.5
mean : np.ndarray or None
Mean pixel values for [r, g, b]
std : np.ndarray or None
Standard deviations for [r, g, b]
brightness : float
Brightness jittering range (percent)
contrast : float
Contrast jittering range (percent)
saturation : float
Saturation jittering range (percent)
hue : float
Hue jittering range (percent)
pca_noise : float
Pca noise level (percent)
inter_method : int, default=2(Area-based)
Interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
Examples
--------
>>> # An example of creating multiple augmenters
>>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,
... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,
... saturation=0.125, pca_noise=0.05, inter_method=10)
>>> # dump the details
>>> for aug in augs:
... aug.dumps() | python/mxnet/image/image.py | CreateAugmenter | Vikas89/private-mxnet | python | def CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False, mean=None, std=None, brightness=0, contrast=0, saturation=0, hue=0, pca_noise=0, rand_gray=0, inter_method=2):
'Creates an augmenter list.\n\n Parameters\n ----------\n data_shape : tuple of int\n Shape for output data\n resize : int\n Resize shorter edge if larger than 0 at the begining\n rand_crop : bool\n Whether to enable random cropping other than center crop\n rand_resize : bool\n Whether to enable random sized cropping, require rand_crop to be enabled\n rand_gray : float\n [0, 1], probability to convert to grayscale for all channels, the number\n of channels will not be reduced to 1\n rand_mirror : bool\n Whether to apply horizontal flip to image with probability 0.5\n mean : np.ndarray or None\n Mean pixel values for [r, g, b]\n std : np.ndarray or None\n Standard deviations for [r, g, b]\n brightness : float\n Brightness jittering range (percent)\n contrast : float\n Contrast jittering range (percent)\n saturation : float\n Saturation jittering range (percent)\n hue : float\n Hue jittering range (percent)\n pca_noise : float\n Pca noise level (percent)\n inter_method : int, default=2(Area-based)\n Interpolation method for all resizing operations\n\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n\n Examples\n --------\n >>> # An example of creating multiple augmenters\n >>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,\n ... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,\n ... saturation=0.125, pca_noise=0.05, inter_method=10)\n >>> # dump the details\n >>> for aug in augs:\n ... aug.dumps()\n '
auglist = []
if (resize > 0):
auglist.append(ResizeAug(resize, inter_method))
crop_size = (data_shape[2], data_shape[1])
if rand_resize:
assert rand_crop
auglist.append(RandomSizedCropAug(crop_size, 0.08, ((3.0 / 4.0), (4.0 / 3.0)), inter_method))
elif rand_crop:
auglist.append(RandomCropAug(crop_size, inter_method))
else:
auglist.append(CenterCropAug(crop_size, inter_method))
if rand_mirror:
auglist.append(HorizontalFlipAug(0.5))
auglist.append(CastAug())
if (brightness or contrast or saturation):
auglist.append(ColorJitterAug(brightness, contrast, saturation))
if hue:
auglist.append(HueJitterAug(hue))
if (pca_noise > 0):
eigval = np.array([55.46, 4.794, 1.148])
eigvec = np.array([[(- 0.5675), 0.7192, 0.4009], [(- 0.5808), (- 0.0045), (- 0.814)], [(- 0.5836), (- 0.6948), 0.4203]])
auglist.append(LightingAug(pca_noise, eigval, eigvec))
if (rand_gray > 0):
auglist.append(RandomGrayAug(rand_gray))
if (mean is True):
mean = nd.array([123.68, 116.28, 103.53])
elif (mean is not None):
assert (isinstance(mean, (np.ndarray, nd.NDArray)) and (mean.shape[0] in [1, 3]))
if (std is True):
std = nd.array([58.395, 57.12, 57.375])
elif (std is not None):
assert (isinstance(std, (np.ndarray, nd.NDArray)) and (std.shape[0] in [1, 3]))
if ((mean is not None) or (std is not None)):
auglist.append(ColorNormalizeAug(mean, std))
return auglist |
def dumps(self):
'Saves the Augmenter to string\n\n Returns\n -------\n str\n JSON formatted string that describes the Augmenter.\n '
return json.dumps([self.__class__.__name__.lower(), self._kwargs]) | 5,340,473,756,469,926,000 | Saves the Augmenter to string
Returns
-------
str
JSON formatted string that describes the Augmenter. | python/mxnet/image/image.py | dumps | Vikas89/private-mxnet | python | def dumps(self):
'Saves the Augmenter to string\n\n Returns\n -------\n str\n JSON formatted string that describes the Augmenter.\n '
return json.dumps([self.__class__.__name__.lower(), self._kwargs]) |
def __call__(self, src):
'Abstract implementation body'
raise NotImplementedError('Must override implementation.') | 6,341,831,232,067,915,000 | Abstract implementation body | python/mxnet/image/image.py | __call__ | Vikas89/private-mxnet | python | def __call__(self, src):
raise NotImplementedError('Must override implementation.') |
def dumps(self):
'Override the default to avoid duplicate dump.'
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]] | 5,817,320,955,584,513,000 | Override the default to avoid duplicate dump. | python/mxnet/image/image.py | dumps | Vikas89/private-mxnet | python | def dumps(self):
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]] |
def __call__(self, src):
'Augmenter body'
for aug in self.ts:
src = aug(src)
return src | -1,729,443,728,950,751,500 | Augmenter body | python/mxnet/image/image.py | __call__ | Vikas89/private-mxnet | python | def __call__(self, src):
for aug in self.ts:
src = aug(src)
return src |
def __call__(self, src):
'Augmenter body'
return resize_short(src, self.size, self.interp) | -8,370,831,105,102,411,000 | Augmenter body | python/mxnet/image/image.py | __call__ | Vikas89/private-mxnet | python | def __call__(self, src):
return resize_short(src, self.size, self.interp) |
def __call__(self, src):
'Augmenter body'
sizes = (src.shape[0], src.shape[1], self.size[1], self.size[0])
return imresize(src, *self.size, interp=_get_interp_method(self.interp, sizes)) | -7,340,788,848,155,299,000 | Augmenter body | python/mxnet/image/image.py | __call__ | Vikas89/private-mxnet | python | def __call__(self, src):
sizes = (src.shape[0], src.shape[1], self.size[1], self.size[0])
return imresize(src, *self.size, interp=_get_interp_method(self.interp, sizes)) |
def __call__(self, src):
'Augmenter body'
return random_crop(src, self.size, self.interp)[0] | 2,023,972,826,688,800,000 | Augmenter body | python/mxnet/image/image.py | __call__ | Vikas89/private-mxnet | python | def __call__(self, src):
return random_crop(src, self.size, self.interp)[0] |
def __call__(self, src):
'Augmenter body'
return random_size_crop(src, self.size, self.area, self.ratio, self.interp)[0] | -7,275,055,935,349,442,000 | Augmenter body | python/mxnet/image/image.py | __call__ | Vikas89/private-mxnet | python | def __call__(self, src):
return random_size_crop(src, self.size, self.area, self.ratio, self.interp)[0] |
def __call__(self, src):
'Augmenter body'
return center_crop(src, self.size, self.interp)[0] | -6,424,092,096,283,396,000 | Augmenter body | python/mxnet/image/image.py | __call__ | Vikas89/private-mxnet | python | def __call__(self, src):
return center_crop(src, self.size, self.interp)[0] |
def dumps(self):
'Override the default to avoid duplicate dump.'
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]] | 5,817,320,955,584,513,000 | Override the default to avoid duplicate dump. | python/mxnet/image/image.py | dumps | Vikas89/private-mxnet | python | def dumps(self):
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]] |
def __call__(self, src):
'Augmenter body'
random.shuffle(self.ts)
for t in self.ts:
src = t(src)
return src | 3,099,077,576,856,897,500 | Augmenter body | python/mxnet/image/image.py | __call__ | Vikas89/private-mxnet | python | def __call__(self, src):
random.shuffle(self.ts)
for t in self.ts:
src = t(src)
return src |
def __call__(self, src):
'Augmenter body'
alpha = (1.0 + random.uniform((- self.brightness), self.brightness))
src *= alpha
return src | -4,187,481,208,873,638,000 | Augmenter body | python/mxnet/image/image.py | __call__ | Vikas89/private-mxnet | python | def __call__(self, src):
alpha = (1.0 + random.uniform((- self.brightness), self.brightness))
src *= alpha
return src |
def __call__(self, src):
'Augmenter body'
alpha = (1.0 + random.uniform((- self.contrast), self.contrast))
gray = (src * self.coef)
gray = (((3.0 * (1.0 - alpha)) / gray.size) * nd.sum(gray))
src *= alpha
src += gray
return src | 7,416,574,687,388,400,000 | Augmenter body | python/mxnet/image/image.py | __call__ | Vikas89/private-mxnet | python | def __call__(self, src):
alpha = (1.0 + random.uniform((- self.contrast), self.contrast))
gray = (src * self.coef)
gray = (((3.0 * (1.0 - alpha)) / gray.size) * nd.sum(gray))
src *= alpha
src += gray
return src |
def __call__(self, src):
'Augmenter body'
alpha = (1.0 + random.uniform((- self.saturation), self.saturation))
gray = (src * self.coef)
gray = nd.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return src | -9,177,407,812,149,575,000 | Augmenter body | python/mxnet/image/image.py | __call__ | Vikas89/private-mxnet | python | def __call__(self, src):
alpha = (1.0 + random.uniform((- self.saturation), self.saturation))
gray = (src * self.coef)
gray = nd.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return src |
def __call__(self, src):
'Augmenter body.\n Using approximate linear transfomation described in:\n https://beesbuzz.biz/code/hsv_color_transforms.php\n '
alpha = random.uniform((- self.hue), self.hue)
u = np.cos((alpha * np.pi))
w = np.sin((alpha * np.pi))
bt = np.array([[1.0, 0.0, 0.0], [0.0, u, (- w)], [0.0, w, u]])
t = np.dot(np.dot(self.ityiq, bt), self.tyiq).T
src = nd.dot(src, nd.array(t))
return src | -8,270,626,956,080,227,000 | Augmenter body.
Using approximate linear transfomation described in:
https://beesbuzz.biz/code/hsv_color_transforms.php | python/mxnet/image/image.py | __call__ | Vikas89/private-mxnet | python | def __call__(self, src):
'Augmenter body.\n Using approximate linear transfomation described in:\n https://beesbuzz.biz/code/hsv_color_transforms.php\n '
alpha = random.uniform((- self.hue), self.hue)
u = np.cos((alpha * np.pi))
w = np.sin((alpha * np.pi))
bt = np.array([[1.0, 0.0, 0.0], [0.0, u, (- w)], [0.0, w, u]])
t = np.dot(np.dot(self.ityiq, bt), self.tyiq).T
src = nd.dot(src, nd.array(t))
return src |
def __call__(self, src):
'Augmenter body'
alpha = np.random.normal(0, self.alphastd, size=(3,))
rgb = np.dot((self.eigvec * alpha), self.eigval)
src += nd.array(rgb)
return src | -2,768,567,695,815,835,600 | Augmenter body | python/mxnet/image/image.py | __call__ | Vikas89/private-mxnet | python | def __call__(self, src):
alpha = np.random.normal(0, self.alphastd, size=(3,))
rgb = np.dot((self.eigvec * alpha), self.eigval)
src += nd.array(rgb)
return src |
def __call__(self, src):
'Augmenter body'
return color_normalize(src, self.mean, self.std) | 8,233,329,245,456,983,000 | Augmenter body | python/mxnet/image/image.py | __call__ | Vikas89/private-mxnet | python | def __call__(self, src):
return color_normalize(src, self.mean, self.std) |
def __call__(self, src):
'Augmenter body'
if (random.random() < self.p):
src = nd.dot(src, self.mat)
return src | -7,419,213,170,269,568,000 | Augmenter body | python/mxnet/image/image.py | __call__ | Vikas89/private-mxnet | python | def __call__(self, src):
if (random.random() < self.p):
src = nd.dot(src, self.mat)
return src |
def __call__(self, src):
'Augmenter body'
if (random.random() < self.p):
src = nd.flip(src, axis=1)
return src | -8,938,693,278,912,147,000 | Augmenter body | python/mxnet/image/image.py | __call__ | Vikas89/private-mxnet | python | def __call__(self, src):
if (random.random() < self.p):
src = nd.flip(src, axis=1)
return src |
def __call__(self, src):
'Augmenter body'
src = src.astype(self.typ)
return src | -4,804,264,324,144,117,000 | Augmenter body | python/mxnet/image/image.py | __call__ | Vikas89/private-mxnet | python | def __call__(self, src):
src = src.astype(self.typ)
return src |
def reset(self):
'Resets the iterator to the beginning of the data.'
if ((self.seq is not None) and self.shuffle):
random.shuffle(self.seq)
if ((self.last_batch_handle != 'roll_over') or (self._cache_data is None)):
if (self.imgrec is not None):
self.imgrec.reset()
self.cur = 0
if (self._allow_read is False):
self._allow_read = True | -7,756,397,515,869,751,000 | Resets the iterator to the beginning of the data. | python/mxnet/image/image.py | reset | Vikas89/private-mxnet | python | def reset(self):
if ((self.seq is not None) and self.shuffle):
random.shuffle(self.seq)
if ((self.last_batch_handle != 'roll_over') or (self._cache_data is None)):
if (self.imgrec is not None):
self.imgrec.reset()
self.cur = 0
if (self._allow_read is False):
self._allow_read = True |
def hard_reset(self):
'Resets the iterator and ignore roll over data'
if ((self.seq is not None) and self.shuffle):
random.shuffle(self.seq)
if (self.imgrec is not None):
self.imgrec.reset()
self.cur = 0
self._allow_read = True
self._cache_data = None
self._cache_label = None
self._cache_idx = None | 1,775,562,946,994,153,500 | Resets the iterator and ignore roll over data | python/mxnet/image/image.py | hard_reset | Vikas89/private-mxnet | python | def hard_reset(self):
if ((self.seq is not None) and self.shuffle):
random.shuffle(self.seq)
if (self.imgrec is not None):
self.imgrec.reset()
self.cur = 0
self._allow_read = True
self._cache_data = None
self._cache_label = None
self._cache_idx = None |
def next_sample(self):
'Helper function for reading in next sample.'
if (self._allow_read is False):
raise StopIteration
if (self.seq is not None):
if (self.cur < self.num_image):
idx = self.seq[self.cur]
else:
if (self.last_batch_handle != 'discard'):
self.cur = 0
raise StopIteration
self.cur += 1
if (self.imgrec is not None):
s = self.imgrec.read_idx(idx)
(header, img) = recordio.unpack(s)
if (self.imglist is None):
return (header.label, img)
else:
return (self.imglist[idx][0], img)
else:
(label, fname) = self.imglist[idx]
return (label, self.read_image(fname))
else:
s = self.imgrec.read()
if (s is None):
if (self.last_batch_handle != 'discard'):
self.imgrec.reset()
raise StopIteration
(header, img) = recordio.unpack(s)
return (header.label, img) | -5,655,445,514,747,744,000 | Helper function for reading in next sample. | python/mxnet/image/image.py | next_sample | Vikas89/private-mxnet | python | def next_sample(self):
if (self._allow_read is False):
raise StopIteration
if (self.seq is not None):
if (self.cur < self.num_image):
idx = self.seq[self.cur]
else:
if (self.last_batch_handle != 'discard'):
self.cur = 0
raise StopIteration
self.cur += 1
if (self.imgrec is not None):
s = self.imgrec.read_idx(idx)
(header, img) = recordio.unpack(s)
if (self.imglist is None):
return (header.label, img)
else:
return (self.imglist[idx][0], img)
else:
(label, fname) = self.imglist[idx]
return (label, self.read_image(fname))
else:
s = self.imgrec.read()
if (s is None):
if (self.last_batch_handle != 'discard'):
self.imgrec.reset()
raise StopIteration
(header, img) = recordio.unpack(s)
return (header.label, img) |
def _batchify(self, batch_data, batch_label, start=0):
'Helper function for batchifying data'
i = start
batch_size = self.batch_size
try:
while (i < batch_size):
(label, s) = self.next_sample()
data = self.imdecode(s)
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
data = self.augmentation_transform(data)
assert (i < batch_size), 'Batch size must be multiples of augmenter output length'
batch_data[i] = self.postprocess_data(data)
batch_label[i] = label
i += 1
except StopIteration:
if (not i):
raise StopIteration
return i | 3,709,117,745,405,608,400 | Helper function for batchifying data | python/mxnet/image/image.py | _batchify | Vikas89/private-mxnet | python | def _batchify(self, batch_data, batch_label, start=0):
i = start
batch_size = self.batch_size
try:
while (i < batch_size):
(label, s) = self.next_sample()
data = self.imdecode(s)
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
data = self.augmentation_transform(data)
assert (i < batch_size), 'Batch size must be multiples of augmenter output length'
batch_data[i] = self.postprocess_data(data)
batch_label[i] = label
i += 1
except StopIteration:
if (not i):
raise StopIteration
return i |
def next(self):
'Returns the next batch of data.'
batch_size = self.batch_size
(c, h, w) = self.data_shape
if (self._cache_data is not None):
assert (self._cache_label is not None), "_cache_label didn't have values"
assert (self._cache_idx is not None), "_cache_idx didn't have values"
batch_data = self._cache_data
batch_label = self._cache_label
i = self._cache_idx
else:
batch_data = nd.empty((batch_size, c, h, w))
batch_label = nd.empty(self.provide_label[0][1])
i = self._batchify(batch_data, batch_label)
pad = (batch_size - i)
if (pad != 0):
if (self.last_batch_handle == 'discard'):
raise StopIteration
elif ((self.last_batch_handle == 'roll_over') and (self._cache_data is None)):
self._cache_data = batch_data
self._cache_label = batch_label
self._cache_idx = i
raise StopIteration
else:
_ = self._batchify(batch_data, batch_label, i)
if (self.last_batch_handle == 'pad'):
self._allow_read = False
else:
self._cache_data = None
self._cache_label = None
self._cache_idx = None
return io.DataBatch([batch_data], [batch_label], pad=pad) | 25,468,607,389,459,910 | Returns the next batch of data. | python/mxnet/image/image.py | next | Vikas89/private-mxnet | python | def next(self):
batch_size = self.batch_size
(c, h, w) = self.data_shape
if (self._cache_data is not None):
assert (self._cache_label is not None), "_cache_label didn't have values"
assert (self._cache_idx is not None), "_cache_idx didn't have values"
batch_data = self._cache_data
batch_label = self._cache_label
i = self._cache_idx
else:
batch_data = nd.empty((batch_size, c, h, w))
batch_label = nd.empty(self.provide_label[0][1])
i = self._batchify(batch_data, batch_label)
pad = (batch_size - i)
if (pad != 0):
if (self.last_batch_handle == 'discard'):
raise StopIteration
elif ((self.last_batch_handle == 'roll_over') and (self._cache_data is None)):
self._cache_data = batch_data
self._cache_label = batch_label
self._cache_idx = i
raise StopIteration
else:
_ = self._batchify(batch_data, batch_label, i)
if (self.last_batch_handle == 'pad'):
self._allow_read = False
else:
self._cache_data = None
self._cache_label = None
self._cache_idx = None
return io.DataBatch([batch_data], [batch_label], pad=pad) |
def check_data_shape(self, data_shape):
'Checks if the input data shape is valid'
if (not (len(data_shape) == 3)):
raise ValueError('data_shape should have length 3, with dimensions CxHxW')
if (not (data_shape[0] == 3)):
raise ValueError('This iterator expects inputs to have 3 channels.') | 8,563,661,476,355,520,000 | Checks if the input data shape is valid | python/mxnet/image/image.py | check_data_shape | Vikas89/private-mxnet | python | def check_data_shape(self, data_shape):
if (not (len(data_shape) == 3)):
raise ValueError('data_shape should have length 3, with dimensions CxHxW')
if (not (data_shape[0] == 3)):
raise ValueError('This iterator expects inputs to have 3 channels.') |
def check_valid_image(self, data):
'Checks if the input data is valid'
if (len(data[0].shape) == 0):
raise RuntimeError('Data shape is wrong') | 5,022,741,528,319,668,000 | Checks if the input data is valid | python/mxnet/image/image.py | check_valid_image | Vikas89/private-mxnet | python | def check_valid_image(self, data):
if (len(data[0].shape) == 0):
raise RuntimeError('Data shape is wrong') |
def imdecode(self, s):
'Decodes a string or byte string to an NDArray.\n See mx.img.imdecode for more details.'
def locate():
'Locate the image file/index if decode fails.'
if (self.seq is not None):
idx = self.seq[((self.cur % self.num_image) - 1)]
else:
idx = ((self.cur % self.num_image) - 1)
if (self.imglist is not None):
(_, fname) = self.imglist[idx]
msg = 'filename: {}'.format(fname)
else:
msg = 'index: {}'.format(idx)
return ('Broken image ' + msg)
try:
img = imdecode(s)
except Exception as e:
raise RuntimeError('{}, {}'.format(locate(), e))
return img | 7,497,748,351,332,963,000 | Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details. | python/mxnet/image/image.py | imdecode | Vikas89/private-mxnet | python | def imdecode(self, s):
'Decodes a string or byte string to an NDArray.\n See mx.img.imdecode for more details.'
def locate():
'Locate the image file/index if decode fails.'
if (self.seq is not None):
idx = self.seq[((self.cur % self.num_image) - 1)]
else:
idx = ((self.cur % self.num_image) - 1)
if (self.imglist is not None):
(_, fname) = self.imglist[idx]
msg = 'filename: {}'.format(fname)
else:
msg = 'index: {}'.format(idx)
return ('Broken image ' + msg)
try:
img = imdecode(s)
except Exception as e:
raise RuntimeError('{}, {}'.format(locate(), e))
return img |
def read_image(self, fname):
"Reads an input image `fname` and returns the decoded raw bytes.\n Example usage:\n ----------\n >>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.\n "
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return img | -6,715,990,902,793,112,000 | Reads an input image `fname` and returns the decoded raw bytes.
Example usage:
----------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes. | python/mxnet/image/image.py | read_image | Vikas89/private-mxnet | python | def read_image(self, fname):
"Reads an input image `fname` and returns the decoded raw bytes.\n Example usage:\n ----------\n >>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.\n "
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return img |
def augmentation_transform(self, data):
'Transforms input data with specified augmentation.'
for aug in self.auglist:
data = aug(data)
return data | 8,575,387,383,950,764,000 | Transforms input data with specified augmentation. | python/mxnet/image/image.py | augmentation_transform | Vikas89/private-mxnet | python | def augmentation_transform(self, data):
for aug in self.auglist:
data = aug(data)
return data |
def postprocess_data(self, datum):
'Final postprocessing step before image is loaded into the batch.'
return nd.transpose(datum, axes=(2, 0, 1)) | 2,554,523,868,221,964,300 | Final postprocessing step before image is loaded into the batch. | python/mxnet/image/image.py | postprocess_data | Vikas89/private-mxnet | python | def postprocess_data(self, datum):
return nd.transpose(datum, axes=(2, 0, 1)) |