body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
f7f0c818a5da1640ccf6d801090eeed5c8c27a3b10bfab535fbf5c73828af3bf | def save_calibration_file(self, meas_type, path):
'Create a OL756-compatible calibration file.\n\n Parameters\n ----------\n meas_type : :class:`int`\n The measurement type.\n\n * 3 - Irradiance Calibration\n * 4 - Radiance Calibration\n * 5 - Transmittance Calibration\n\n path : :class:`str`\n The path to save the calibration file to.\n '
if (meas_type not in [3, 4, 5]):
raise ValueError('Invalid measurement type {}. Must be 3, 4 or 5'.format(meas_type))
ret = self.lib.SaveCalibrationFile(meas_type, path)
self._check(ret, (_Error.FILE_IO_FAILED,)) | Create a OL756-compatible calibration file.
Parameters
----------
meas_type : :class:`int`
The measurement type.
* 3 - Irradiance Calibration
* 4 - Radiance Calibration
* 5 - Transmittance Calibration
path : :class:`str`
The path to save the calibration file to. | msl/equipment/resources/optronic_laboratories/ol756ocx_32.py | save_calibration_file | MSLNZ/msl-equipment | 9 | python | def save_calibration_file(self, meas_type, path):
'Create a OL756-compatible calibration file.\n\n Parameters\n ----------\n meas_type : :class:`int`\n The measurement type.\n\n * 3 - Irradiance Calibration\n * 4 - Radiance Calibration\n * 5 - Transmittance Calibration\n\n path : :class:`str`\n The path to save the calibration file to.\n '
if (meas_type not in [3, 4, 5]):
raise ValueError('Invalid measurement type {}. Must be 3, 4 or 5'.format(meas_type))
ret = self.lib.SaveCalibrationFile(meas_type, path)
self._check(ret, (_Error.FILE_IO_FAILED,)) | def save_calibration_file(self, meas_type, path):
'Create a OL756-compatible calibration file.\n\n Parameters\n ----------\n meas_type : :class:`int`\n The measurement type.\n\n * 3 - Irradiance Calibration\n * 4 - Radiance Calibration\n * 5 - Transmittance Calibration\n\n path : :class:`str`\n The path to save the calibration file to.\n '
if (meas_type not in [3, 4, 5]):
raise ValueError('Invalid measurement type {}. Must be 3, 4 or 5'.format(meas_type))
ret = self.lib.SaveCalibrationFile(meas_type, path)
self._check(ret, (_Error.FILE_IO_FAILED,))<|docstring|>Create a OL756-compatible calibration file.
Parameters
----------
meas_type : :class:`int`
The measurement type.
* 3 - Irradiance Calibration
* 4 - Radiance Calibration
* 5 - Transmittance Calibration
path : :class:`str`
The path to save the calibration file to.<|endoftext|> |
7713445eba8d05916e564f98225d0ec2e37469e1751578774a7a0f0728af414c | def save_measurement_data(self, meas_type, path):
'Save the measurement data to a OL756-compatible data file.\n\n Parameters\n ----------\n meas_type : :class:`int`\n The measurement type.\n\n * 0 - Irradiance\n * 1 - Radiance\n * 2 - Transmittance\n\n path : :class:`str`\n The path to save the data to.\n '
if (meas_type not in [0, 1, 2]):
raise ValueError('Invalid measurement type {}. Must be 0, 1 or 2'.format(meas_type))
self.lib.SaveMeasurementData(meas_type, path) | Save the measurement data to a OL756-compatible data file.
Parameters
----------
meas_type : :class:`int`
The measurement type.
* 0 - Irradiance
* 1 - Radiance
* 2 - Transmittance
path : :class:`str`
The path to save the data to. | msl/equipment/resources/optronic_laboratories/ol756ocx_32.py | save_measurement_data | MSLNZ/msl-equipment | 9 | python | def save_measurement_data(self, meas_type, path):
'Save the measurement data to a OL756-compatible data file.\n\n Parameters\n ----------\n meas_type : :class:`int`\n The measurement type.\n\n * 0 - Irradiance\n * 1 - Radiance\n * 2 - Transmittance\n\n path : :class:`str`\n The path to save the data to.\n '
if (meas_type not in [0, 1, 2]):
raise ValueError('Invalid measurement type {}. Must be 0, 1 or 2'.format(meas_type))
self.lib.SaveMeasurementData(meas_type, path) | def save_measurement_data(self, meas_type, path):
'Save the measurement data to a OL756-compatible data file.\n\n Parameters\n ----------\n meas_type : :class:`int`\n The measurement type.\n\n * 0 - Irradiance\n * 1 - Radiance\n * 2 - Transmittance\n\n path : :class:`str`\n The path to save the data to.\n '
if (meas_type not in [0, 1, 2]):
raise ValueError('Invalid measurement type {}. Must be 0, 1 or 2'.format(meas_type))
self.lib.SaveMeasurementData(meas_type, path)<|docstring|>Save the measurement data to a OL756-compatible data file.
Parameters
----------
meas_type : :class:`int`
The measurement type.
* 0 - Irradiance
* 1 - Radiance
* 2 - Transmittance
path : :class:`str`
The path to save the data to.<|endoftext|> |
b5a213dc4bdb823c3df886475d88d3ee1d12a9cbb6176dd1f48275bde42a987e | def send_down_parameters(self, scan_mode):
'Sends down the parameters to the system.\n\n This needs to be called whenever parameters dealing with the PMT or\n integration time and gain has changed. Needs to be called once before\n doing any measurements or other signal acquisition including dark\n current.\n\n The following methods affect the parameters:\n :meth:`.set_pmt_flux_overload_voltage`\n :meth:`.set_gain`\n :meth:`.set_integration_time`\n :meth:`.set_pmt_hi_voltage`\n :meth:`.set_settling_time`\n :meth:`.set_scan_range`\n :meth:`.set_adaptive_integration_time`\n\n Parameters\n ----------\n scan_mode : :class:`int`\n The scan mode.\n\n * 0 - Point to point\n * 1 - Quick scan\n\n '
if (scan_mode not in [0, 1]):
raise ValueError('Invalid scan mode {}. Must be 0 or 1'.format(scan_mode))
ret = self.lib.SendDownParameters(scan_mode)
self._check(ret, (_Error.SYSTEM_BUSY,)) | Sends down the parameters to the system.
This needs to be called whenever parameters dealing with the PMT or
integration time and gain has changed. Needs to be called once before
doing any measurements or other signal acquisition including dark
current.
The following methods affect the parameters:
:meth:`.set_pmt_flux_overload_voltage`
:meth:`.set_gain`
:meth:`.set_integration_time`
:meth:`.set_pmt_hi_voltage`
:meth:`.set_settling_time`
:meth:`.set_scan_range`
:meth:`.set_adaptive_integration_time`
Parameters
----------
scan_mode : :class:`int`
The scan mode.
* 0 - Point to point
* 1 - Quick scan | msl/equipment/resources/optronic_laboratories/ol756ocx_32.py | send_down_parameters | MSLNZ/msl-equipment | 9 | python | def send_down_parameters(self, scan_mode):
'Sends down the parameters to the system.\n\n This needs to be called whenever parameters dealing with the PMT or\n integration time and gain has changed. Needs to be called once before\n doing any measurements or other signal acquisition including dark\n current.\n\n The following methods affect the parameters:\n :meth:`.set_pmt_flux_overload_voltage`\n :meth:`.set_gain`\n :meth:`.set_integration_time`\n :meth:`.set_pmt_hi_voltage`\n :meth:`.set_settling_time`\n :meth:`.set_scan_range`\n :meth:`.set_adaptive_integration_time`\n\n Parameters\n ----------\n scan_mode : :class:`int`\n The scan mode.\n\n * 0 - Point to point\n * 1 - Quick scan\n\n '
if (scan_mode not in [0, 1]):
raise ValueError('Invalid scan mode {}. Must be 0 or 1'.format(scan_mode))
ret = self.lib.SendDownParameters(scan_mode)
self._check(ret, (_Error.SYSTEM_BUSY,)) | def send_down_parameters(self, scan_mode):
'Sends down the parameters to the system.\n\n This needs to be called whenever parameters dealing with the PMT or\n integration time and gain has changed. Needs to be called once before\n doing any measurements or other signal acquisition including dark\n current.\n\n The following methods affect the parameters:\n :meth:`.set_pmt_flux_overload_voltage`\n :meth:`.set_gain`\n :meth:`.set_integration_time`\n :meth:`.set_pmt_hi_voltage`\n :meth:`.set_settling_time`\n :meth:`.set_scan_range`\n :meth:`.set_adaptive_integration_time`\n\n Parameters\n ----------\n scan_mode : :class:`int`\n The scan mode.\n\n * 0 - Point to point\n * 1 - Quick scan\n\n '
if (scan_mode not in [0, 1]):
raise ValueError('Invalid scan mode {}. Must be 0 or 1'.format(scan_mode))
ret = self.lib.SendDownParameters(scan_mode)
self._check(ret, (_Error.SYSTEM_BUSY,))<|docstring|>Sends down the parameters to the system.
This needs to be called whenever parameters dealing with the PMT or
integration time and gain has changed. Needs to be called once before
doing any measurements or other signal acquisition including dark
current.
The following methods affect the parameters:
:meth:`.set_pmt_flux_overload_voltage`
:meth:`.set_gain`
:meth:`.set_integration_time`
:meth:`.set_pmt_hi_voltage`
:meth:`.set_settling_time`
:meth:`.set_scan_range`
:meth:`.set_adaptive_integration_time`
Parameters
----------
scan_mode : :class:`int`
The scan mode.
* 0 - Point to point
* 1 - Quick scan<|endoftext|> |
7df660aae844bd6fb24877d00ff203f84d18a1436fafa5342d7e7f2650abe14a | def set_adaptive_integration_time(self, gain_index, speed_index):
'Sets the scan speed of the scan at a particular gain range.\n\n Adaptive integration time is used solely for point to point\n scans in auto-gain ranging.\n\n Parameters\n ----------\n gain_index : :class:`int`\n The index of the gain to use to set the integration time.\n\n * 0 - 1.0E-5\n * 1 - 1.0E-6\n * 2 - 1.0E-7\n * 3 - 1.0E-8\n * 4 - 1.0E-9\n * 5 - 1.0E-10\n * 6 - 1.0E-11\n\n speed_index : :class:`int`\n The scan speed index [0..12] -- 0=Slowest, 12=Fastest.\n '
ret = self.lib.SetAdaptiveIntegrationTime(gain_index, speed_index)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.PARAM_ERR_GAIN, _Error.PARAM_ERR_INT_TIME)) | Sets the scan speed of the scan at a particular gain range.
Adaptive integration time is used solely for point to point
scans in auto-gain ranging.
Parameters
----------
gain_index : :class:`int`
The index of the gain to use to set the integration time.
* 0 - 1.0E-5
* 1 - 1.0E-6
* 2 - 1.0E-7
* 3 - 1.0E-8
* 4 - 1.0E-9
* 5 - 1.0E-10
* 6 - 1.0E-11
speed_index : :class:`int`
The scan speed index [0..12] -- 0=Slowest, 12=Fastest. | msl/equipment/resources/optronic_laboratories/ol756ocx_32.py | set_adaptive_integration_time | MSLNZ/msl-equipment | 9 | python | def set_adaptive_integration_time(self, gain_index, speed_index):
'Sets the scan speed of the scan at a particular gain range.\n\n Adaptive integration time is used solely for point to point\n scans in auto-gain ranging.\n\n Parameters\n ----------\n gain_index : :class:`int`\n The index of the gain to use to set the integration time.\n\n * 0 - 1.0E-5\n * 1 - 1.0E-6\n * 2 - 1.0E-7\n * 3 - 1.0E-8\n * 4 - 1.0E-9\n * 5 - 1.0E-10\n * 6 - 1.0E-11\n\n speed_index : :class:`int`\n The scan speed index [0..12] -- 0=Slowest, 12=Fastest.\n '
ret = self.lib.SetAdaptiveIntegrationTime(gain_index, speed_index)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.PARAM_ERR_GAIN, _Error.PARAM_ERR_INT_TIME)) | def set_adaptive_integration_time(self, gain_index, speed_index):
'Sets the scan speed of the scan at a particular gain range.\n\n Adaptive integration time is used solely for point to point\n scans in auto-gain ranging.\n\n Parameters\n ----------\n gain_index : :class:`int`\n The index of the gain to use to set the integration time.\n\n * 0 - 1.0E-5\n * 1 - 1.0E-6\n * 2 - 1.0E-7\n * 3 - 1.0E-8\n * 4 - 1.0E-9\n * 5 - 1.0E-10\n * 6 - 1.0E-11\n\n speed_index : :class:`int`\n The scan speed index [0..12] -- 0=Slowest, 12=Fastest.\n '
ret = self.lib.SetAdaptiveIntegrationTime(gain_index, speed_index)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.PARAM_ERR_GAIN, _Error.PARAM_ERR_INT_TIME))<|docstring|>Sets the scan speed of the scan at a particular gain range.
Adaptive integration time is used solely for point to point
scans in auto-gain ranging.
Parameters
----------
gain_index : :class:`int`
The index of the gain to use to set the integration time.
* 0 - 1.0E-5
* 1 - 1.0E-6
* 2 - 1.0E-7
* 3 - 1.0E-8
* 4 - 1.0E-9
* 5 - 1.0E-10
* 6 - 1.0E-11
speed_index : :class:`int`
The scan speed index [0..12] -- 0=Slowest, 12=Fastest.<|endoftext|> |
836e0baf3deae0ef61115edb3d6180030d194e2226cbb5db9a2e5f4e91fd5c82 | def set_averaging_number_of_scan(self, num_avg_scans):
'Set the number of scans to average.\n\n Parameters\n ----------\n num_avg_scans : :class:`int`\n The number of scans to average.\n '
ret = self.lib.SetAveragingNumberOfScan(num_avg_scans)
self._check(ret, ()) | Set the number of scans to average.
Parameters
----------
num_avg_scans : :class:`int`
The number of scans to average. | msl/equipment/resources/optronic_laboratories/ol756ocx_32.py | set_averaging_number_of_scan | MSLNZ/msl-equipment | 9 | python | def set_averaging_number_of_scan(self, num_avg_scans):
'Set the number of scans to average.\n\n Parameters\n ----------\n num_avg_scans : :class:`int`\n The number of scans to average.\n '
ret = self.lib.SetAveragingNumberOfScan(num_avg_scans)
self._check(ret, ()) | def set_averaging_number_of_scan(self, num_avg_scans):
'Set the number of scans to average.\n\n Parameters\n ----------\n num_avg_scans : :class:`int`\n The number of scans to average.\n '
ret = self.lib.SetAveragingNumberOfScan(num_avg_scans)
self._check(ret, ())<|docstring|>Set the number of scans to average.
Parameters
----------
num_avg_scans : :class:`int`
The number of scans to average.<|endoftext|> |
7d65155e55b53ff89ceda19ff783c19cb0194a5b2ca8935fbadb67168ce459c1 | def set_dark_current_params(self, mode, wavelength):
'Sets the mode and the wavelength to use for a dark-current measurement.\n\n Parameters\n ----------\n mode : :class:`int`\n The mode to use to acquire a dark-current measurement\n\n * 0 - wavelength\n * 1 - shutter\n\n wavelength : :class:`float`\n The wavelength, in nanometers, to use for a dark-current measurement.\n '
ret = self.lib.SetDarkCurrentParams(mode, wavelength)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.PARAM_ERR_DCMODE, _Error.PARAM_ERR_DCWAVE)) | Sets the mode and the wavelength to use for a dark-current measurement.
Parameters
----------
mode : :class:`int`
The mode to use to acquire a dark-current measurement
* 0 - wavelength
* 1 - shutter
wavelength : :class:`float`
The wavelength, in nanometers, to use for a dark-current measurement. | msl/equipment/resources/optronic_laboratories/ol756ocx_32.py | set_dark_current_params | MSLNZ/msl-equipment | 9 | python | def set_dark_current_params(self, mode, wavelength):
'Sets the mode and the wavelength to use for a dark-current measurement.\n\n Parameters\n ----------\n mode : :class:`int`\n The mode to use to acquire a dark-current measurement\n\n * 0 - wavelength\n * 1 - shutter\n\n wavelength : :class:`float`\n The wavelength, in nanometers, to use for a dark-current measurement.\n '
ret = self.lib.SetDarkCurrentParams(mode, wavelength)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.PARAM_ERR_DCMODE, _Error.PARAM_ERR_DCWAVE)) | def set_dark_current_params(self, mode, wavelength):
'Sets the mode and the wavelength to use for a dark-current measurement.\n\n Parameters\n ----------\n mode : :class:`int`\n The mode to use to acquire a dark-current measurement\n\n * 0 - wavelength\n * 1 - shutter\n\n wavelength : :class:`float`\n The wavelength, in nanometers, to use for a dark-current measurement.\n '
ret = self.lib.SetDarkCurrentParams(mode, wavelength)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.PARAM_ERR_DCMODE, _Error.PARAM_ERR_DCWAVE))<|docstring|>Sets the mode and the wavelength to use for a dark-current measurement.
Parameters
----------
mode : :class:`int`
The mode to use to acquire a dark-current measurement
* 0 - wavelength
* 1 - shutter
wavelength : :class:`float`
The wavelength, in nanometers, to use for a dark-current measurement.<|endoftext|> |
8d97696e0ab463b4c2801e7eae7191d378f25b218dbcf79a7d02a39b3e543fe3 | def set_gain(self, scan_mode, gain_index):
'Sets the index of the gain that will be applied\n when the parameters are to be sent down.\n\n Applies to both quick scan and point to point scans.\n\n Parameters\n ----------\n scan_mode : :class:`int`\n The scan mode\n\n * 0 - Point to Point\n * 1 - Quick Scan\n\n gain_index : :class:`int`\n The gain index\n\n * 0 - 1.0E-5\n * 1 - 1.0E-6\n * 2 - 1.0E-7\n * 3 - 1.0E-8\n * 4 - 1.0E-9\n * 5 - 1.0E-10 (available only in Point to Point mode)\n * 6 - 1.0E-11 (available only in Point to Point mode)\n * 7 - Auto Gain Ranging (available only in Point to Point mode)\n\n '
ret = self.lib.SetGain(scan_mode, gain_index)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.PARAM_ERR_GAIN, _Error.PARAM_ERR_SCAN_MODE)) | Sets the index of the gain that will be applied
when the parameters are to be sent down.
Applies to both quick scan and point to point scans.
Parameters
----------
scan_mode : :class:`int`
The scan mode
* 0 - Point to Point
* 1 - Quick Scan
gain_index : :class:`int`
The gain index
* 0 - 1.0E-5
* 1 - 1.0E-6
* 2 - 1.0E-7
* 3 - 1.0E-8
* 4 - 1.0E-9
* 5 - 1.0E-10 (available only in Point to Point mode)
* 6 - 1.0E-11 (available only in Point to Point mode)
* 7 - Auto Gain Ranging (available only in Point to Point mode) | msl/equipment/resources/optronic_laboratories/ol756ocx_32.py | set_gain | MSLNZ/msl-equipment | 9 | python | def set_gain(self, scan_mode, gain_index):
'Sets the index of the gain that will be applied\n when the parameters are to be sent down.\n\n Applies to both quick scan and point to point scans.\n\n Parameters\n ----------\n scan_mode : :class:`int`\n The scan mode\n\n * 0 - Point to Point\n * 1 - Quick Scan\n\n gain_index : :class:`int`\n The gain index\n\n * 0 - 1.0E-5\n * 1 - 1.0E-6\n * 2 - 1.0E-7\n * 3 - 1.0E-8\n * 4 - 1.0E-9\n * 5 - 1.0E-10 (available only in Point to Point mode)\n * 6 - 1.0E-11 (available only in Point to Point mode)\n * 7 - Auto Gain Ranging (available only in Point to Point mode)\n\n '
ret = self.lib.SetGain(scan_mode, gain_index)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.PARAM_ERR_GAIN, _Error.PARAM_ERR_SCAN_MODE)) | def set_gain(self, scan_mode, gain_index):
'Sets the index of the gain that will be applied\n when the parameters are to be sent down.\n\n Applies to both quick scan and point to point scans.\n\n Parameters\n ----------\n scan_mode : :class:`int`\n The scan mode\n\n * 0 - Point to Point\n * 1 - Quick Scan\n\n gain_index : :class:`int`\n The gain index\n\n * 0 - 1.0E-5\n * 1 - 1.0E-6\n * 2 - 1.0E-7\n * 3 - 1.0E-8\n * 4 - 1.0E-9\n * 5 - 1.0E-10 (available only in Point to Point mode)\n * 6 - 1.0E-11 (available only in Point to Point mode)\n * 7 - Auto Gain Ranging (available only in Point to Point mode)\n\n '
ret = self.lib.SetGain(scan_mode, gain_index)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.PARAM_ERR_GAIN, _Error.PARAM_ERR_SCAN_MODE))<|docstring|>Sets the index of the gain that will be applied
when the parameters are to be sent down.
Applies to both quick scan and point to point scans.
Parameters
----------
scan_mode : :class:`int`
The scan mode
* 0 - Point to Point
* 1 - Quick Scan
gain_index : :class:`int`
The gain index
* 0 - 1.0E-5
* 1 - 1.0E-6
* 2 - 1.0E-7
* 3 - 1.0E-8
* 4 - 1.0E-9
* 5 - 1.0E-10 (available only in Point to Point mode)
* 6 - 1.0E-11 (available only in Point to Point mode)
* 7 - Auto Gain Ranging (available only in Point to Point mode)<|endoftext|> |
11b7fed82e7c30193b21f4c1474808b41bfbdc72faebea01f6f4892c71d0c703 | def set_integration_time(self, scan_mode, scan_speed):
'Sets the index of the scan speed used.\n\n Applies to both quick scan and point to point scans.\n In quick scan, the speed will vary based on the scan range and increments.\n\n Parameters\n ----------\n scan_mode : :class:`int`\n The scan mode\n\n * 0 - Point to Point\n * 1 - Quick Scan\n\n scan_speed : :class:`int`\n Index to the integration time array\n\n Point to Point mode\n\n * 0 - 1.000 sec\n * 1 - 0.500 sec\n * 2 - 0.200 sec\n * 3 - 0.100 sec\n * 4 - 0.050 sec\n * 5 - 0.020 sec\n * 6 - 0.010 sec\n * 7 - 0.005 sec\n * 8 - 0.002 sec\n * 9 - 0.001 sec\n * 10 - Adaptive\t(Point To Point mode only)\n * 11 - User defined (Point To Point mode only)\n\n Quick Scan mode\n\n * 0 - slowest\n * 10 - fastest\n\n '
ret = self.lib.SetIntegrationTime(scan_mode, scan_speed)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.PARAM_ERR_SCAN_MODE, _Error.PARAM_ERR_INT_TIME)) | Sets the index of the scan speed used.
Applies to both quick scan and point to point scans.
In quick scan, the speed will vary based on the scan range and increments.
Parameters
----------
scan_mode : :class:`int`
The scan mode
* 0 - Point to Point
* 1 - Quick Scan
scan_speed : :class:`int`
Index to the integration time array
Point to Point mode
* 0 - 1.000 sec
* 1 - 0.500 sec
* 2 - 0.200 sec
* 3 - 0.100 sec
* 4 - 0.050 sec
* 5 - 0.020 sec
* 6 - 0.010 sec
* 7 - 0.005 sec
* 8 - 0.002 sec
* 9 - 0.001 sec
* 10 - Adaptive (Point To Point mode only)
* 11 - User defined (Point To Point mode only)
Quick Scan mode
* 0 - slowest
* 10 - fastest | msl/equipment/resources/optronic_laboratories/ol756ocx_32.py | set_integration_time | MSLNZ/msl-equipment | 9 | python | def set_integration_time(self, scan_mode, scan_speed):
'Sets the index of the scan speed used.\n\n Applies to both quick scan and point to point scans.\n In quick scan, the speed will vary based on the scan range and increments.\n\n Parameters\n ----------\n scan_mode : :class:`int`\n The scan mode\n\n * 0 - Point to Point\n * 1 - Quick Scan\n\n scan_speed : :class:`int`\n Index to the integration time array\n\n Point to Point mode\n\n * 0 - 1.000 sec\n * 1 - 0.500 sec\n * 2 - 0.200 sec\n * 3 - 0.100 sec\n * 4 - 0.050 sec\n * 5 - 0.020 sec\n * 6 - 0.010 sec\n * 7 - 0.005 sec\n * 8 - 0.002 sec\n * 9 - 0.001 sec\n * 10 - Adaptive\t(Point To Point mode only)\n * 11 - User defined (Point To Point mode only)\n\n Quick Scan mode\n\n * 0 - slowest\n * 10 - fastest\n\n '
ret = self.lib.SetIntegrationTime(scan_mode, scan_speed)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.PARAM_ERR_SCAN_MODE, _Error.PARAM_ERR_INT_TIME)) | def set_integration_time(self, scan_mode, scan_speed):
'Sets the index of the scan speed used.\n\n Applies to both quick scan and point to point scans.\n In quick scan, the speed will vary based on the scan range and increments.\n\n Parameters\n ----------\n scan_mode : :class:`int`\n The scan mode\n\n * 0 - Point to Point\n * 1 - Quick Scan\n\n scan_speed : :class:`int`\n Index to the integration time array\n\n Point to Point mode\n\n * 0 - 1.000 sec\n * 1 - 0.500 sec\n * 2 - 0.200 sec\n * 3 - 0.100 sec\n * 4 - 0.050 sec\n * 5 - 0.020 sec\n * 6 - 0.010 sec\n * 7 - 0.005 sec\n * 8 - 0.002 sec\n * 9 - 0.001 sec\n * 10 - Adaptive\t(Point To Point mode only)\n * 11 - User defined (Point To Point mode only)\n\n Quick Scan mode\n\n * 0 - slowest\n * 10 - fastest\n\n '
ret = self.lib.SetIntegrationTime(scan_mode, scan_speed)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.PARAM_ERR_SCAN_MODE, _Error.PARAM_ERR_INT_TIME))<|docstring|>Sets the index of the scan speed used.
Applies to both quick scan and point to point scans.
In quick scan, the speed will vary based on the scan range and increments.
Parameters
----------
scan_mode : :class:`int`
The scan mode
* 0 - Point to Point
* 1 - Quick Scan
scan_speed : :class:`int`
Index to the integration time array
Point to Point mode
* 0 - 1.000 sec
* 1 - 0.500 sec
* 2 - 0.200 sec
* 3 - 0.100 sec
* 4 - 0.050 sec
* 5 - 0.020 sec
* 6 - 0.010 sec
* 7 - 0.005 sec
* 8 - 0.002 sec
* 9 - 0.001 sec
* 10 - Adaptive (Point To Point mode only)
* 11 - User defined (Point To Point mode only)
Quick Scan mode
* 0 - slowest
* 10 - fastest<|endoftext|> |
bd25bf4c7b22343956b34d1cfc367594276e6b830b72211123801e4c144488d6 | def set_pmt_flux_overload_voltage(self, overload_voltage):
'Sets the value to use for the photomultiplier tube flux overload.\n\n Parameters\n ----------\n overload_voltage : :class:`float`\n Voltage that the PMT will determine to be at the overload point.\n Software only, because PMT has built-in protection also.\n '
ret = self.lib.SetPMTFluxOverloadVoltage(overload_voltage)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.PARAM_ERR_OVERLOAD)) | Sets the value to use for the photomultiplier tube flux overload.
Parameters
----------
overload_voltage : :class:`float`
Voltage that the PMT will determine to be at the overload point.
Software only, because PMT has built-in protection also. | msl/equipment/resources/optronic_laboratories/ol756ocx_32.py | set_pmt_flux_overload_voltage | MSLNZ/msl-equipment | 9 | python | def set_pmt_flux_overload_voltage(self, overload_voltage):
'Sets the value to use for the photomultiplier tube flux overload.\n\n Parameters\n ----------\n overload_voltage : :class:`float`\n Voltage that the PMT will determine to be at the overload point.\n Software only, because PMT has built-in protection also.\n '
ret = self.lib.SetPMTFluxOverloadVoltage(overload_voltage)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.PARAM_ERR_OVERLOAD)) | def set_pmt_flux_overload_voltage(self, overload_voltage):
'Sets the value to use for the photomultiplier tube flux overload.\n\n Parameters\n ----------\n overload_voltage : :class:`float`\n Voltage that the PMT will determine to be at the overload point.\n Software only, because PMT has built-in protection also.\n '
ret = self.lib.SetPMTFluxOverloadVoltage(overload_voltage)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.PARAM_ERR_OVERLOAD))<|docstring|>Sets the value to use for the photomultiplier tube flux overload.
Parameters
----------
overload_voltage : :class:`float`
Voltage that the PMT will determine to be at the overload point.
Software only, because PMT has built-in protection also.<|endoftext|> |
fc995f649526518b3121559824d3f80c9f98d88e44a1ef1cfe0efdab4a0964c5 | def set_pmt_hi_voltage(self, hi_voltage):
'Sets the value to be determined to be a flux overload by the software.\n\n Parameters\n ----------\n hi_voltage : :class:`float`\n Voltage, in volts, that the PMT will determine to be overload point.\n '
ret = self.lib.SetPMTHiVoltage(hi_voltage)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.PARAM_ERR_OVERLOAD)) | Sets the value to be determined to be a flux overload by the software.
Parameters
----------
hi_voltage : :class:`float`
Voltage, in volts, that the PMT will determine to be overload point. | msl/equipment/resources/optronic_laboratories/ol756ocx_32.py | set_pmt_hi_voltage | MSLNZ/msl-equipment | 9 | python | def set_pmt_hi_voltage(self, hi_voltage):
'Sets the value to be determined to be a flux overload by the software.\n\n Parameters\n ----------\n hi_voltage : :class:`float`\n Voltage, in volts, that the PMT will determine to be overload point.\n '
ret = self.lib.SetPMTHiVoltage(hi_voltage)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.PARAM_ERR_OVERLOAD)) | def set_pmt_hi_voltage(self, hi_voltage):
'Sets the value to be determined to be a flux overload by the software.\n\n Parameters\n ----------\n hi_voltage : :class:`float`\n Voltage, in volts, that the PMT will determine to be overload point.\n '
ret = self.lib.SetPMTHiVoltage(hi_voltage)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.PARAM_ERR_OVERLOAD))<|docstring|>Sets the value to be determined to be a flux overload by the software.
Parameters
----------
hi_voltage : :class:`float`
Voltage, in volts, that the PMT will determine to be overload point.<|endoftext|> |
e7514016dcd820460d46de1a71f5f53fec4692f02bac2ae13d14ae5bf20f5d3c | def set_reference_white_point(self, white, user_def_x, user_def_y):
'Sets the value of the reference illuminant.\n\n Parameters\n ----------\n white : :class:`int`\n The reference white point\n\n * 0 - Incandescent(A)\n * 1 - Direct Sunlight(B)\n * 2 - Indirect Sunlight(C)\n * 3 - Natural Daylight(D65)\n * 4 - Normalized Reference(E)\n * 5 - User Defined\n\n user_def_x : :class:`float`\n User defined x on CIE chart.\n user_def_y : :class:`float`\n User defined y on CIE chart.\n '
ret = self.lib.SetReferenceWhitePoint(white, user_def_x, user_def_y)
self._check(ret, ()) | Sets the value of the reference illuminant.
Parameters
----------
white : :class:`int`
The reference white point
* 0 - Incandescent(A)
* 1 - Direct Sunlight(B)
* 2 - Indirect Sunlight(C)
* 3 - Natural Daylight(D65)
* 4 - Normalized Reference(E)
* 5 - User Defined
user_def_x : :class:`float`
User defined x on CIE chart.
user_def_y : :class:`float`
User defined y on CIE chart. | msl/equipment/resources/optronic_laboratories/ol756ocx_32.py | set_reference_white_point | MSLNZ/msl-equipment | 9 | python | def set_reference_white_point(self, white, user_def_x, user_def_y):
'Sets the value of the reference illuminant.\n\n Parameters\n ----------\n white : :class:`int`\n The reference white point\n\n * 0 - Incandescent(A)\n * 1 - Direct Sunlight(B)\n * 2 - Indirect Sunlight(C)\n * 3 - Natural Daylight(D65)\n * 4 - Normalized Reference(E)\n * 5 - User Defined\n\n user_def_x : :class:`float`\n User defined x on CIE chart.\n user_def_y : :class:`float`\n User defined y on CIE chart.\n '
ret = self.lib.SetReferenceWhitePoint(white, user_def_x, user_def_y)
self._check(ret, ()) | def set_reference_white_point(self, white, user_def_x, user_def_y):
'Sets the value of the reference illuminant.\n\n Parameters\n ----------\n white : :class:`int`\n The reference white point\n\n * 0 - Incandescent(A)\n * 1 - Direct Sunlight(B)\n * 2 - Indirect Sunlight(C)\n * 3 - Natural Daylight(D65)\n * 4 - Normalized Reference(E)\n * 5 - User Defined\n\n user_def_x : :class:`float`\n User defined x on CIE chart.\n user_def_y : :class:`float`\n User defined y on CIE chart.\n '
ret = self.lib.SetReferenceWhitePoint(white, user_def_x, user_def_y)
self._check(ret, ())<|docstring|>Sets the value of the reference illuminant.
Parameters
----------
white : :class:`int`
The reference white point
* 0 - Incandescent(A)
* 1 - Direct Sunlight(B)
* 2 - Indirect Sunlight(C)
* 3 - Natural Daylight(D65)
* 4 - Normalized Reference(E)
* 5 - User Defined
user_def_x : :class:`float`
User defined x on CIE chart.
user_def_y : :class:`float`
User defined y on CIE chart.<|endoftext|> |
0bd151f8336cc0109094154701f25a54e838073e78b92cfe48c04617ada4af36 | def set_scan_range(self, start, end, inc_index):
'Sets the wavelength scan range.\n\n Parameters\n ----------\n start : :class:`float`\n Starting wavelength, in nanometers.\n end : :class:`float`\n Ending wavelength, in nanometers.\n inc_index : :class:`int`\n Increment index, in nanometers.\n '
ret = self.lib.SetScanRange(start, end, inc_index)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.PARAM_ERR_WAVE_RANGE, _Error.PARAM_ERR_WAVE_INC)) | Sets the wavelength scan range.
Parameters
----------
start : :class:`float`
Starting wavelength, in nanometers.
end : :class:`float`
Ending wavelength, in nanometers.
inc_index : :class:`int`
Increment index, in nanometers. | msl/equipment/resources/optronic_laboratories/ol756ocx_32.py | set_scan_range | MSLNZ/msl-equipment | 9 | python | def set_scan_range(self, start, end, inc_index):
'Sets the wavelength scan range.\n\n Parameters\n ----------\n start : :class:`float`\n Starting wavelength, in nanometers.\n end : :class:`float`\n Ending wavelength, in nanometers.\n inc_index : :class:`int`\n Increment index, in nanometers.\n '
ret = self.lib.SetScanRange(start, end, inc_index)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.PARAM_ERR_WAVE_RANGE, _Error.PARAM_ERR_WAVE_INC)) | def set_scan_range(self, start, end, inc_index):
'Sets the wavelength scan range.\n\n Parameters\n ----------\n start : :class:`float`\n Starting wavelength, in nanometers.\n end : :class:`float`\n Ending wavelength, in nanometers.\n inc_index : :class:`int`\n Increment index, in nanometers.\n '
ret = self.lib.SetScanRange(start, end, inc_index)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.PARAM_ERR_WAVE_RANGE, _Error.PARAM_ERR_WAVE_INC))<|docstring|>Sets the wavelength scan range.
Parameters
----------
start : :class:`float`
Starting wavelength, in nanometers.
end : :class:`float`
Ending wavelength, in nanometers.
inc_index : :class:`int`
Increment index, in nanometers.<|endoftext|> |
a5bcb254ec1dadb1aa9abced47de7177c011908f4c411b70ea99412d34404778 | def set_settling_time(self, time):
'Set the settling time.\n\n Settling time is the time that the wavelength drive pauses once\n it reaches its target wavelength.\n\n Parameters\n ----------\n time : :class:`float`\n Settling Time in seconds to be sent down or has already\n been sent to the system.\n '
ret = self.lib.SetSettlingTime(time)
self._check(ret, (_Error.SYSTEM_BUSY,)) | Set the settling time.
Settling time is the time that the wavelength drive pauses once
it reaches its target wavelength.
Parameters
----------
time : :class:`float`
Settling Time in seconds to be sent down or has already
been sent to the system. | msl/equipment/resources/optronic_laboratories/ol756ocx_32.py | set_settling_time | MSLNZ/msl-equipment | 9 | python | def set_settling_time(self, time):
'Set the settling time.\n\n Settling time is the time that the wavelength drive pauses once\n it reaches its target wavelength.\n\n Parameters\n ----------\n time : :class:`float`\n Settling Time in seconds to be sent down or has already\n been sent to the system.\n '
ret = self.lib.SetSettlingTime(time)
self._check(ret, (_Error.SYSTEM_BUSY,)) | def set_settling_time(self, time):
'Set the settling time.\n\n Settling time is the time that the wavelength drive pauses once\n it reaches its target wavelength.\n\n Parameters\n ----------\n time : :class:`float`\n Settling Time in seconds to be sent down or has already\n been sent to the system.\n '
ret = self.lib.SetSettlingTime(time)
self._check(ret, (_Error.SYSTEM_BUSY,))<|docstring|>Set the settling time.
Settling time is the time that the wavelength drive pauses once
it reaches its target wavelength.
Parameters
----------
time : :class:`float`
Settling Time in seconds to be sent down or has already
been sent to the system.<|endoftext|> |
63dc6bedc2f935bcdf5e7d1e7476f51701e656c8ebbdd4c6df35c01446c3e782 | def set_tab_delimited_mode(self, enable):
'Purpose of function is to set what mode to write the data files as.\n\n Setting the tab delimited to true will write the data in a tab\n delimited format, else a false will write in a comma delimited format.\n Tab delimited files will not be compatible with some versions of the\n software. If you want data files to be compatible with v1.32 software\n and below, leave the mode to :data:`False`.\n\n Parameters\n ----------\n enable : :class:`bool`\n Whether to use the new file format using TABs as a delimited or\n the old file format compatible with v1.32 and below.\n '
self.lib.SetTabDelimitedMode(enable) | Purpose of function is to set what mode to write the data files as.
Setting the tab delimited to true will write the data in a tab
delimited format, else a false will write in a comma delimited format.
Tab delimited files will not be compatible with some versions of the
software. If you want data files to be compatible with v1.32 software
and below, leave the mode to :data:`False`.
Parameters
----------
enable : :class:`bool`
Whether to use the new file format using TABs as a delimited or
the old file format compatible with v1.32 and below. | msl/equipment/resources/optronic_laboratories/ol756ocx_32.py | set_tab_delimited_mode | MSLNZ/msl-equipment | 9 | python | def set_tab_delimited_mode(self, enable):
'Purpose of function is to set what mode to write the data files as.\n\n Setting the tab delimited to true will write the data in a tab\n delimited format, else a false will write in a comma delimited format.\n Tab delimited files will not be compatible with some versions of the\n software. If you want data files to be compatible with v1.32 software\n and below, leave the mode to :data:`False`.\n\n Parameters\n ----------\n enable : :class:`bool`\n Whether to use the new file format using TABs as a delimited or\n the old file format compatible with v1.32 and below.\n '
self.lib.SetTabDelimitedMode(enable) | def set_tab_delimited_mode(self, enable):
'Purpose of function is to set what mode to write the data files as.\n\n Setting the tab delimited to true will write the data in a tab\n delimited format, else a false will write in a comma delimited format.\n Tab delimited files will not be compatible with some versions of the\n software. If you want data files to be compatible with v1.32 software\n and below, leave the mode to :data:`False`.\n\n Parameters\n ----------\n enable : :class:`bool`\n Whether to use the new file format using TABs as a delimited or\n the old file format compatible with v1.32 and below.\n '
self.lib.SetTabDelimitedMode(enable)<|docstring|>Purpose of function is to set what mode to write the data files as.
Setting the tab delimited to true will write the data in a tab
delimited format, else a false will write in a comma delimited format.
Tab delimited files will not be compatible with some versions of the
software. If you want data files to be compatible with v1.32 software
and below, leave the mode to :data:`False`.
Parameters
----------
enable : :class:`bool`
Whether to use the new file format using TABs as a delimited or
the old file format compatible with v1.32 and below.<|endoftext|> |
14afea26bee9db3f4b3cc05a6e636040202e6120685cd2ef88a796b6ecd5118f | def set_user_defined_integration_time(self, time):
'Sets the user defined integration time to be used only in point to\n point scans and only if the user sets the integration time mode.\n\n Parameters\n ----------\n time : :class:`float`\n Integration time in seconds.\n '
ret = self.lib.SetUserDefinedIntegrationTime(time)
self._check(ret, (_Error.PARAM_ERR_INT_TIME,)) | Sets the user defined integration time to be used only in point to
point scans and only if the user sets the integration time mode.
Parameters
----------
time : :class:`float`
Integration time in seconds. | msl/equipment/resources/optronic_laboratories/ol756ocx_32.py | set_user_defined_integration_time | MSLNZ/msl-equipment | 9 | python | def set_user_defined_integration_time(self, time):
'Sets the user defined integration time to be used only in point to\n point scans and only if the user sets the integration time mode.\n\n Parameters\n ----------\n time : :class:`float`\n Integration time in seconds.\n '
ret = self.lib.SetUserDefinedIntegrationTime(time)
self._check(ret, (_Error.PARAM_ERR_INT_TIME,)) | def set_user_defined_integration_time(self, time):
'Sets the user defined integration time to be used only in point to\n point scans and only if the user sets the integration time mode.\n\n Parameters\n ----------\n time : :class:`float`\n Integration time in seconds.\n '
ret = self.lib.SetUserDefinedIntegrationTime(time)
self._check(ret, (_Error.PARAM_ERR_INT_TIME,))<|docstring|>Sets the user defined integration time to be used only in point to
point scans and only if the user sets the integration time mode.
Parameters
----------
time : :class:`float`
Integration time in seconds.<|endoftext|> |
d9f215dd4e18e20195a9bd003f54beee477d71c713944c70069fa471601f9f6f | def stop_measurement(self):
'Stops a measurement.\n\n Applies only to Point to Point measurements. Quick scans are done\n so quickly that there is no need to stop a measurement once it starts.\n '
ret = self.lib.StopMeasurement()
self._check(ret, ()) | Stops a measurement.
Applies only to Point to Point measurements. Quick scans are done
so quickly that there is no need to stop a measurement once it starts. | msl/equipment/resources/optronic_laboratories/ol756ocx_32.py | stop_measurement | MSLNZ/msl-equipment | 9 | python | def stop_measurement(self):
'Stops a measurement.\n\n Applies only to Point to Point measurements. Quick scans are done\n so quickly that there is no need to stop a measurement once it starts.\n '
ret = self.lib.StopMeasurement()
self._check(ret, ()) | def stop_measurement(self):
'Stops a measurement.\n\n Applies only to Point to Point measurements. Quick scans are done\n so quickly that there is no need to stop a measurement once it starts.\n '
ret = self.lib.StopMeasurement()
self._check(ret, ())<|docstring|>Stops a measurement.
Applies only to Point to Point measurements. Quick scans are done
so quickly that there is no need to stop a measurement once it starts.<|endoftext|> |
0c38dd139646b5a4f3eb1fa6d155dea2311e1ee4350eb83ca84f8f5eafdd6623 | def take_point_to_point_calibration(self, meas_type):
'Takes a calibration in point to point mode.\n\n Need to have called :meth:`.send_down_parameters` at least once before\n calling any of the measurement functions or data acquisition functions.\n\n Parameters\n ----------\n meas_type : :class:`int`\n The measurement type.\n\n * 3 - Irradiance Calibration\n * 4 - Radiance Calibration\n * 5 - Transmittance Calibration\n\n '
ret = self.lib.TakePointToPointCalibration(meas_type)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.SYSTEM_NOT_CONNECTED, _Error.SCAN_PARAMSNOTSENT, _Error.SCAN_CANCELLED, _Error.SCAN_FLUXOVERLOAD, _Error.PARAM_ERR_MEASTYPE)) | Takes a calibration in point to point mode.
Need to have called :meth:`.send_down_parameters` at least once before
calling any of the measurement functions or data acquisition functions.
Parameters
----------
meas_type : :class:`int`
The measurement type.
* 3 - Irradiance Calibration
* 4 - Radiance Calibration
* 5 - Transmittance Calibration | msl/equipment/resources/optronic_laboratories/ol756ocx_32.py | take_point_to_point_calibration | MSLNZ/msl-equipment | 9 | python | def take_point_to_point_calibration(self, meas_type):
'Takes a calibration in point to point mode.\n\n Need to have called :meth:`.send_down_parameters` at least once before\n calling any of the measurement functions or data acquisition functions.\n\n Parameters\n ----------\n meas_type : :class:`int`\n The measurement type.\n\n * 3 - Irradiance Calibration\n * 4 - Radiance Calibration\n * 5 - Transmittance Calibration\n\n '
ret = self.lib.TakePointToPointCalibration(meas_type)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.SYSTEM_NOT_CONNECTED, _Error.SCAN_PARAMSNOTSENT, _Error.SCAN_CANCELLED, _Error.SCAN_FLUXOVERLOAD, _Error.PARAM_ERR_MEASTYPE)) | def take_point_to_point_calibration(self, meas_type):
'Takes a calibration in point to point mode.\n\n Need to have called :meth:`.send_down_parameters` at least once before\n calling any of the measurement functions or data acquisition functions.\n\n Parameters\n ----------\n meas_type : :class:`int`\n The measurement type.\n\n * 3 - Irradiance Calibration\n * 4 - Radiance Calibration\n * 5 - Transmittance Calibration\n\n '
ret = self.lib.TakePointToPointCalibration(meas_type)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.SYSTEM_NOT_CONNECTED, _Error.SCAN_PARAMSNOTSENT, _Error.SCAN_CANCELLED, _Error.SCAN_FLUXOVERLOAD, _Error.PARAM_ERR_MEASTYPE))<|docstring|>Takes a calibration in point to point mode.
Need to have called :meth:`.send_down_parameters` at least once before
calling any of the measurement functions or data acquisition functions.
Parameters
----------
meas_type : :class:`int`
The measurement type.
* 3 - Irradiance Calibration
* 4 - Radiance Calibration
* 5 - Transmittance Calibration<|endoftext|> |
26199795c82bdce1e130c253aeeb92ac16e3b43852df08afba44e23a73c44bf7 | def take_point_to_point_measurement(self, meas_type):
'Takes a measurement in point to point mode.\n\n Need to have called :meth:`.send_down_parameters` at least once before\n calling any of the measurement functions or data acquisition functions.\n\n Parameters\n ----------\n meas_type : :class:`int`\n The measurement type.\n\n * 0 - Irradiance\n * 1 - Radiance\n * 2 - Transmittance\n\n '
ret = self.lib.TakePointToPointMeasurement(meas_type)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.SYSTEM_NOT_CONNECTED, _Error.SCAN_PARAMSNOTSENT, _Error.SCAN_CANCELLED, _Error.SCAN_FLUXOVERLOAD, _Error.PARAM_ERR_MEASTYPE)) | Takes a measurement in point to point mode.
Need to have called :meth:`.send_down_parameters` at least once before
calling any of the measurement functions or data acquisition functions.
Parameters
----------
meas_type : :class:`int`
The measurement type.
* 0 - Irradiance
* 1 - Radiance
* 2 - Transmittance | msl/equipment/resources/optronic_laboratories/ol756ocx_32.py | take_point_to_point_measurement | MSLNZ/msl-equipment | 9 | python | def take_point_to_point_measurement(self, meas_type):
'Takes a measurement in point to point mode.\n\n Need to have called :meth:`.send_down_parameters` at least once before\n calling any of the measurement functions or data acquisition functions.\n\n Parameters\n ----------\n meas_type : :class:`int`\n The measurement type.\n\n * 0 - Irradiance\n * 1 - Radiance\n * 2 - Transmittance\n\n '
ret = self.lib.TakePointToPointMeasurement(meas_type)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.SYSTEM_NOT_CONNECTED, _Error.SCAN_PARAMSNOTSENT, _Error.SCAN_CANCELLED, _Error.SCAN_FLUXOVERLOAD, _Error.PARAM_ERR_MEASTYPE)) | def take_point_to_point_measurement(self, meas_type):
'Takes a measurement in point to point mode.\n\n Need to have called :meth:`.send_down_parameters` at least once before\n calling any of the measurement functions or data acquisition functions.\n\n Parameters\n ----------\n meas_type : :class:`int`\n The measurement type.\n\n * 0 - Irradiance\n * 1 - Radiance\n * 2 - Transmittance\n\n '
ret = self.lib.TakePointToPointMeasurement(meas_type)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.SYSTEM_NOT_CONNECTED, _Error.SCAN_PARAMSNOTSENT, _Error.SCAN_CANCELLED, _Error.SCAN_FLUXOVERLOAD, _Error.PARAM_ERR_MEASTYPE))<|docstring|>Takes a measurement in point to point mode.
Need to have called :meth:`.send_down_parameters` at least once before
calling any of the measurement functions or data acquisition functions.
Parameters
----------
meas_type : :class:`int`
The measurement type.
* 0 - Irradiance
* 1 - Radiance
* 2 - Transmittance<|endoftext|> |
ec2ee03a07f2884373d9530d0b43e80a4aea5de1a1b590ca9cc9165fe3579107 | def take_quick_scan_calibration(self, meas_type):
'Takes a calibration in quick scan mode.\n\n Need to have called :meth:`.send_down_parameters` at least once before\n calling any of the measurement functions or data acquisition functions.\n\n Parameters\n ----------\n meas_type : :class:`int`\n The measurement type.\n\n * 3 - Irradiance Calibration\n * 4 - Radiance Calibration\n * 5 - Transmittance Calibration\n\n '
if (meas_type not in [3, 4, 5]):
raise ValueError('Invalid measurement type {}. Must be 3, 4 or 5'.format(meas_type))
ret = self.lib.TakeQuickScanCalibration(meas_type)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.SYSTEM_NOT_CONNECTED, _Error.SCAN_PARAMSNOTSENT)) | Takes a calibration in quick scan mode.
Need to have called :meth:`.send_down_parameters` at least once before
calling any of the measurement functions or data acquisition functions.
Parameters
----------
meas_type : :class:`int`
The measurement type.
* 3 - Irradiance Calibration
* 4 - Radiance Calibration
* 5 - Transmittance Calibration | msl/equipment/resources/optronic_laboratories/ol756ocx_32.py | take_quick_scan_calibration | MSLNZ/msl-equipment | 9 | python | def take_quick_scan_calibration(self, meas_type):
'Takes a calibration in quick scan mode.\n\n Need to have called :meth:`.send_down_parameters` at least once before\n calling any of the measurement functions or data acquisition functions.\n\n Parameters\n ----------\n meas_type : :class:`int`\n The measurement type.\n\n * 3 - Irradiance Calibration\n * 4 - Radiance Calibration\n * 5 - Transmittance Calibration\n\n '
if (meas_type not in [3, 4, 5]):
raise ValueError('Invalid measurement type {}. Must be 3, 4 or 5'.format(meas_type))
ret = self.lib.TakeQuickScanCalibration(meas_type)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.SYSTEM_NOT_CONNECTED, _Error.SCAN_PARAMSNOTSENT)) | def take_quick_scan_calibration(self, meas_type):
'Takes a calibration in quick scan mode.\n\n Need to have called :meth:`.send_down_parameters` at least once before\n calling any of the measurement functions or data acquisition functions.\n\n Parameters\n ----------\n meas_type : :class:`int`\n The measurement type.\n\n * 3 - Irradiance Calibration\n * 4 - Radiance Calibration\n * 5 - Transmittance Calibration\n\n '
if (meas_type not in [3, 4, 5]):
raise ValueError('Invalid measurement type {}. Must be 3, 4 or 5'.format(meas_type))
ret = self.lib.TakeQuickScanCalibration(meas_type)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.SYSTEM_NOT_CONNECTED, _Error.SCAN_PARAMSNOTSENT))<|docstring|>Takes a calibration in quick scan mode.
Need to have called :meth:`.send_down_parameters` at least once before
calling any of the measurement functions or data acquisition functions.
Parameters
----------
meas_type : :class:`int`
The measurement type.
* 3 - Irradiance Calibration
* 4 - Radiance Calibration
* 5 - Transmittance Calibration<|endoftext|> |
ffbb71ec727883f866a2d0acd96edcce9c63f27acf7812587bbdb05544baa76a | def take_quick_scan_measurement(self, meas_type):
'Takes a measurement in quick scan mode.\n\n Need to have called :meth:`.send_down_parameters` at least once before\n calling any of the measurement functions or data acquisition functions.\n\n Parameters\n ----------\n meas_type : :class:`int`\n The measurement type.\n\n * 0 - Irradiance\n * 1 - Radiance\n * 2 - Transmittance\n\n '
if (meas_type not in [0, 1, 2]):
raise ValueError('Invalid measurement type {}. Must be 0, 1 or 2'.format(meas_type))
ret = self.lib.TakeQuickScanMeasurement(meas_type)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.SYSTEM_NOT_CONNECTED, _Error.SCAN_PARAMSNOTSENT)) | Takes a measurement in quick scan mode.
Need to have called :meth:`.send_down_parameters` at least once before
calling any of the measurement functions or data acquisition functions.
Parameters
----------
meas_type : :class:`int`
The measurement type.
* 0 - Irradiance
* 1 - Radiance
* 2 - Transmittance | msl/equipment/resources/optronic_laboratories/ol756ocx_32.py | take_quick_scan_measurement | MSLNZ/msl-equipment | 9 | python | def take_quick_scan_measurement(self, meas_type):
'Takes a measurement in quick scan mode.\n\n Need to have called :meth:`.send_down_parameters` at least once before\n calling any of the measurement functions or data acquisition functions.\n\n Parameters\n ----------\n meas_type : :class:`int`\n The measurement type.\n\n * 0 - Irradiance\n * 1 - Radiance\n * 2 - Transmittance\n\n '
if (meas_type not in [0, 1, 2]):
raise ValueError('Invalid measurement type {}. Must be 0, 1 or 2'.format(meas_type))
ret = self.lib.TakeQuickScanMeasurement(meas_type)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.SYSTEM_NOT_CONNECTED, _Error.SCAN_PARAMSNOTSENT)) | def take_quick_scan_measurement(self, meas_type):
'Takes a measurement in quick scan mode.\n\n Need to have called :meth:`.send_down_parameters` at least once before\n calling any of the measurement functions or data acquisition functions.\n\n Parameters\n ----------\n meas_type : :class:`int`\n The measurement type.\n\n * 0 - Irradiance\n * 1 - Radiance\n * 2 - Transmittance\n\n '
if (meas_type not in [0, 1, 2]):
raise ValueError('Invalid measurement type {}. Must be 0, 1 or 2'.format(meas_type))
ret = self.lib.TakeQuickScanMeasurement(meas_type)
self._check(ret, (_Error.SYSTEM_BUSY, _Error.SYSTEM_NOT_CONNECTED, _Error.SCAN_PARAMSNOTSENT))<|docstring|>Takes a measurement in quick scan mode.
Need to have called :meth:`.send_down_parameters` at least once before
calling any of the measurement functions or data acquisition functions.
Parameters
----------
meas_type : :class:`int`
The measurement type.
* 0 - Irradiance
* 1 - Radiance
* 2 - Transmittance<|endoftext|> |
189f6ab34c443c05c8211c48d456e0cf8d27539fb934ae19b4f24443d055a102 | def die(self: Player) -> None:
'Mark player as dead.\n\n Args:\n self (Player): Itself.\n\n Returns:\n Player: Itself.\n '
Shooter.die(self)
raise End('You died!') | Mark player as dead.
Args:
self (Player): Itself.
Returns:
Player: Itself. | AIShooter/classes/Elements/Player.py | die | SanteriHetekivi/AIShooter | 0 | python | def die(self: Player) -> None:
'Mark player as dead.\n\n Args:\n self (Player): Itself.\n\n Returns:\n Player: Itself.\n '
Shooter.die(self)
raise End('You died!') | def die(self: Player) -> None:
'Mark player as dead.\n\n Args:\n self (Player): Itself.\n\n Returns:\n Player: Itself.\n '
Shooter.die(self)
raise End('You died!')<|docstring|>Mark player as dead.
Args:
self (Player): Itself.
Returns:
Player: Itself.<|endoftext|> |
3fcdf825fa144217dab1493c2bb956e93520874fd803dc0a5111256ae73eeee3 | def _frame(self: Player, time_diff: float, events: list, surface: pygame.Surface, scale: Cords) -> Player:
'Handle frame.\n\n Args:\n self (Player): Itself.\n time_diff (float): Time difference.\n events (list): Event list.\n surface (pygame.Surface): Game surface.\n scale (Cords): Scale of the drawing.\n\n Returns:\n Player: Itself.\n '
shoot = False
for event in events:
if (event.type == pygame.KEYDOWN):
if ((event.key == pygame.K_LEFT) or (event.key == ord('a'))):
self._movement.left = True
if ((event.key == pygame.K_RIGHT) or (event.key == ord('d'))):
self._movement.right = True
if ((event.key == pygame.K_UP) or (event.key == ord('w'))):
self._movement.up = True
if ((event.key == pygame.K_DOWN) or (event.key == ord('s'))):
self._movement.down = True
if (event.key == pygame.K_SPACE):
shoot = True
if (event.type == pygame.KEYUP):
if ((event.key == pygame.K_LEFT) or (event.key == ord('a'))):
self._movement.left = False
if ((event.key == pygame.K_RIGHT) or (event.key == ord('d'))):
self._movement.right = False
if ((event.key == pygame.K_UP) or (event.key == ord('w'))):
self._movement.up = False
if ((event.key == pygame.K_DOWN) or (event.key == ord('s'))):
self._movement.down = False
Shooter._frame(self, time_diff, events, surface, scale)
if shoot:
self._shoot(scale)
return self | Handle frame.
Args:
self (Player): Itself.
time_diff (float): Time difference.
events (list): Event list.
surface (pygame.Surface): Game surface.
scale (Cords): Scale of the drawing.
Returns:
Player: Itself. | AIShooter/classes/Elements/Player.py | _frame | SanteriHetekivi/AIShooter | 0 | python | def _frame(self: Player, time_diff: float, events: list, surface: pygame.Surface, scale: Cords) -> Player:
'Handle frame.\n\n Args:\n self (Player): Itself.\n time_diff (float): Time difference.\n events (list): Event list.\n surface (pygame.Surface): Game surface.\n scale (Cords): Scale of the drawing.\n\n Returns:\n Player: Itself.\n '
shoot = False
for event in events:
if (event.type == pygame.KEYDOWN):
if ((event.key == pygame.K_LEFT) or (event.key == ord('a'))):
self._movement.left = True
if ((event.key == pygame.K_RIGHT) or (event.key == ord('d'))):
self._movement.right = True
if ((event.key == pygame.K_UP) or (event.key == ord('w'))):
self._movement.up = True
if ((event.key == pygame.K_DOWN) or (event.key == ord('s'))):
self._movement.down = True
if (event.key == pygame.K_SPACE):
shoot = True
if (event.type == pygame.KEYUP):
if ((event.key == pygame.K_LEFT) or (event.key == ord('a'))):
self._movement.left = False
if ((event.key == pygame.K_RIGHT) or (event.key == ord('d'))):
self._movement.right = False
if ((event.key == pygame.K_UP) or (event.key == ord('w'))):
self._movement.up = False
if ((event.key == pygame.K_DOWN) or (event.key == ord('s'))):
self._movement.down = False
Shooter._frame(self, time_diff, events, surface, scale)
if shoot:
self._shoot(scale)
return self | def _frame(self: Player, time_diff: float, events: list, surface: pygame.Surface, scale: Cords) -> Player:
'Handle frame.\n\n Args:\n self (Player): Itself.\n time_diff (float): Time difference.\n events (list): Event list.\n surface (pygame.Surface): Game surface.\n scale (Cords): Scale of the drawing.\n\n Returns:\n Player: Itself.\n '
shoot = False
for event in events:
if (event.type == pygame.KEYDOWN):
if ((event.key == pygame.K_LEFT) or (event.key == ord('a'))):
self._movement.left = True
if ((event.key == pygame.K_RIGHT) or (event.key == ord('d'))):
self._movement.right = True
if ((event.key == pygame.K_UP) or (event.key == ord('w'))):
self._movement.up = True
if ((event.key == pygame.K_DOWN) or (event.key == ord('s'))):
self._movement.down = True
if (event.key == pygame.K_SPACE):
shoot = True
if (event.type == pygame.KEYUP):
if ((event.key == pygame.K_LEFT) or (event.key == ord('a'))):
self._movement.left = False
if ((event.key == pygame.K_RIGHT) or (event.key == ord('d'))):
self._movement.right = False
if ((event.key == pygame.K_UP) or (event.key == ord('w'))):
self._movement.up = False
if ((event.key == pygame.K_DOWN) or (event.key == ord('s'))):
self._movement.down = False
Shooter._frame(self, time_diff, events, surface, scale)
if shoot:
self._shoot(scale)
return self<|docstring|>Handle frame.
Args:
self (Player): Itself.
time_diff (float): Time difference.
events (list): Event list.
surface (pygame.Surface): Game surface.
scale (Cords): Scale of the drawing.
Returns:
Player: Itself.<|endoftext|> |
1d54a4f9caa2936d0fe065114487982c92f6a79b0cb800faac14df364a5b35db | def method():
'A method' | A method | src/nti/schema/tests/test_jsonschema.py | method | NextThought/nti.schema | 0 | python | def method():
| def method():
<|docstring|>A method<|endoftext|> |
f5007ec9581e85174d3ec6ff36ba479ca8cbc41985b833c6e7e20d96652cfd57 | def init(args=None):
'\n Imports and initializes all plugins from auto and from given arguments.\n\n @param args arguments object like argparse.Namespace\n @param args.PLUGINS list of Python modules or classes to import,\n as ["my.module", "other.module.SomeClass", ]\n '
for f in sorted(glob.glob(os.path.join(os.path.dirname(__file__), 'auto', '*'))):
if (not f.lower().endswith(('.py', '.pyc'))):
continue
name = os.path.splitext(os.path.split(f)[(- 1)])[0]
if (name.startswith('__') or (name in PLUGINS)):
continue
modulename = ('%s.auto.%s' % (__package__, name))
try:
plugin = import_item(modulename)
if callable(getattr(plugin, 'init', None)):
plugin.init(args)
PLUGINS[name] = plugin
except Exception:
ConsolePrinter.error('Error loading plugin %s.', modulename)
if args:
configure(args)
populate_known_plugins()
populate_write_formats() | Imports and initializes all plugins from auto and from given arguments.
@param args arguments object like argparse.Namespace
@param args.PLUGINS list of Python modules or classes to import,
as ["my.module", "other.module.SomeClass", ] | src/grepros/plugins/__init__.py | init | suurjaak/grepros | 10 | python | def init(args=None):
'\n Imports and initializes all plugins from auto and from given arguments.\n\n @param args arguments object like argparse.Namespace\n @param args.PLUGINS list of Python modules or classes to import,\n as ["my.module", "other.module.SomeClass", ]\n '
for f in sorted(glob.glob(os.path.join(os.path.dirname(__file__), 'auto', '*'))):
if (not f.lower().endswith(('.py', '.pyc'))):
continue
name = os.path.splitext(os.path.split(f)[(- 1)])[0]
if (name.startswith('__') or (name in PLUGINS)):
continue
modulename = ('%s.auto.%s' % (__package__, name))
try:
plugin = import_item(modulename)
if callable(getattr(plugin, 'init', None)):
plugin.init(args)
PLUGINS[name] = plugin
except Exception:
ConsolePrinter.error('Error loading plugin %s.', modulename)
if args:
configure(args)
populate_known_plugins()
populate_write_formats() | def init(args=None):
'\n Imports and initializes all plugins from auto and from given arguments.\n\n @param args arguments object like argparse.Namespace\n @param args.PLUGINS list of Python modules or classes to import,\n as ["my.module", "other.module.SomeClass", ]\n '
for f in sorted(glob.glob(os.path.join(os.path.dirname(__file__), 'auto', '*'))):
if (not f.lower().endswith(('.py', '.pyc'))):
continue
name = os.path.splitext(os.path.split(f)[(- 1)])[0]
if (name.startswith('__') or (name in PLUGINS)):
continue
modulename = ('%s.auto.%s' % (__package__, name))
try:
plugin = import_item(modulename)
if callable(getattr(plugin, 'init', None)):
plugin.init(args)
PLUGINS[name] = plugin
except Exception:
ConsolePrinter.error('Error loading plugin %s.', modulename)
if args:
configure(args)
populate_known_plugins()
populate_write_formats()<|docstring|>Imports and initializes all plugins from auto and from given arguments.
@param args arguments object like argparse.Namespace
@param args.PLUGINS list of Python modules or classes to import,
as ["my.module", "other.module.SomeClass", ]<|endoftext|> |
6d2b2e14088c34f1e72513ff7c2dc694cfbaef3123ef405d0afbf69e18b78189 | def configure(args):
'\n Imports plugin Python packages, invokes init(args) if any, raises on error.\n\n @param args arguments object like argparse.Namespace\n @param args.PLUGINS list of Python modules or classes to import,\n as ["my.module", "other.module.SomeClass", ]\n '
for name in (n for n in args.PLUGINS if (n not in PLUGINS)):
try:
plugin = import_item(name)
if callable(getattr(plugin, 'init', None)):
plugin.init(args)
PLUGINS[name] = plugin
except ImportWarning:
raise
except Exception:
ConsolePrinter.error('Error loading plugin %s.', name)
raise | Imports plugin Python packages, invokes init(args) if any, raises on error.
@param args arguments object like argparse.Namespace
@param args.PLUGINS list of Python modules or classes to import,
as ["my.module", "other.module.SomeClass", ] | src/grepros/plugins/__init__.py | configure | suurjaak/grepros | 10 | python | def configure(args):
'\n Imports plugin Python packages, invokes init(args) if any, raises on error.\n\n @param args arguments object like argparse.Namespace\n @param args.PLUGINS list of Python modules or classes to import,\n as ["my.module", "other.module.SomeClass", ]\n '
for name in (n for n in args.PLUGINS if (n not in PLUGINS)):
try:
plugin = import_item(name)
if callable(getattr(plugin, 'init', None)):
plugin.init(args)
PLUGINS[name] = plugin
except ImportWarning:
raise
except Exception:
ConsolePrinter.error('Error loading plugin %s.', name)
raise | def configure(args):
'\n Imports plugin Python packages, invokes init(args) if any, raises on error.\n\n @param args arguments object like argparse.Namespace\n @param args.PLUGINS list of Python modules or classes to import,\n as ["my.module", "other.module.SomeClass", ]\n '
for name in (n for n in args.PLUGINS if (n not in PLUGINS)):
try:
plugin = import_item(name)
if callable(getattr(plugin, 'init', None)):
plugin.init(args)
PLUGINS[name] = plugin
except ImportWarning:
raise
except Exception:
ConsolePrinter.error('Error loading plugin %s.', name)
raise<|docstring|>Imports plugin Python packages, invokes init(args) if any, raises on error.
@param args arguments object like argparse.Namespace
@param args.PLUGINS list of Python modules or classes to import,
as ["my.module", "other.module.SomeClass", ]<|endoftext|> |
925cbaee43b986c710ee2c1f64a0c93c7ad863d663cdf256e4ef0cf570c24bf3 | def load(category, args, collect=False):
'\n Returns a plugin category instance loaded from any configured plugin, or None.\n\n @param category item category like "source", "sink", or "search"\n @param args arguments object like argparse.Namespace\n @param collect if true, returns a list of instances,\n using all plugins that return something\n '
result = []
for (name, plugin) in PLUGINS.items():
if callable(getattr(plugin, 'load', None)):
try:
instance = plugin.load(category, args)
if (instance is not None):
result.append(instance)
if (not collect):
break
except Exception:
ConsolePrinter.error('Error invoking %s.load(%r, args).', name, category)
raise
return (result if collect else (result[0] if result else None)) | Returns a plugin category instance loaded from any configured plugin, or None.
@param category item category like "source", "sink", or "search"
@param args arguments object like argparse.Namespace
@param collect if true, returns a list of instances,
using all plugins that return something | src/grepros/plugins/__init__.py | load | suurjaak/grepros | 10 | python | def load(category, args, collect=False):
'\n Returns a plugin category instance loaded from any configured plugin, or None.\n\n @param category item category like "source", "sink", or "search"\n @param args arguments object like argparse.Namespace\n @param collect if true, returns a list of instances,\n using all plugins that return something\n '
result = []
for (name, plugin) in PLUGINS.items():
if callable(getattr(plugin, 'load', None)):
try:
instance = plugin.load(category, args)
if (instance is not None):
result.append(instance)
if (not collect):
break
except Exception:
ConsolePrinter.error('Error invoking %s.load(%r, args).', name, category)
raise
return (result if collect else (result[0] if result else None)) | def load(category, args, collect=False):
'\n Returns a plugin category instance loaded from any configured plugin, or None.\n\n @param category item category like "source", "sink", or "search"\n @param args arguments object like argparse.Namespace\n @param collect if true, returns a list of instances,\n using all plugins that return something\n '
result = []
for (name, plugin) in PLUGINS.items():
if callable(getattr(plugin, 'load', None)):
try:
instance = plugin.load(category, args)
if (instance is not None):
result.append(instance)
if (not collect):
break
except Exception:
ConsolePrinter.error('Error invoking %s.load(%r, args).', name, category)
raise
return (result if collect else (result[0] if result else None))<|docstring|>Returns a plugin category instance loaded from any configured plugin, or None.
@param category item category like "source", "sink", or "search"
@param args arguments object like argparse.Namespace
@param collect if true, returns a list of instances,
using all plugins that return something<|endoftext|> |
a009fc04d8474e36767efeb1f0d9dae303d0f5774522d943fe8e48f97c6c72d1 | def add_write_format(name, cls, label=None, options=()):
'\n Adds plugin to `--write` in main.ARGUMENTS and MultiSink formats.\n\n @param name format name like "csv", added to `--write .. format=FORMAT`\n @param cls class providing SinkBase interface\n @param label plugin label; if multiple plugins add the same option,\n "label output" in help text is replaced with "label1/label2/.. output"\n @param options a sequence of (name, help) to add to --write help, like\n [("template=/my/path.tpl", "custom template to use for HTML output")]\n '
MultiSink.FORMAT_CLASSES[name] = cls
if options:
WRITE_OPTIONS.setdefault(label, []).extend(options) | Adds plugin to `--write` in main.ARGUMENTS and MultiSink formats.
@param name format name like "csv", added to `--write .. format=FORMAT`
@param cls class providing SinkBase interface
@param label plugin label; if multiple plugins add the same option,
"label output" in help text is replaced with "label1/label2/.. output"
@param options a sequence of (name, help) to add to --write help, like
[("template=/my/path.tpl", "custom template to use for HTML output")] | src/grepros/plugins/__init__.py | add_write_format | suurjaak/grepros | 10 | python | def add_write_format(name, cls, label=None, options=()):
'\n Adds plugin to `--write` in main.ARGUMENTS and MultiSink formats.\n\n @param name format name like "csv", added to `--write .. format=FORMAT`\n @param cls class providing SinkBase interface\n @param label plugin label; if multiple plugins add the same option,\n "label output" in help text is replaced with "label1/label2/.. output"\n @param options a sequence of (name, help) to add to --write help, like\n [("template=/my/path.tpl", "custom template to use for HTML output")]\n '
MultiSink.FORMAT_CLASSES[name] = cls
if options:
WRITE_OPTIONS.setdefault(label, []).extend(options) | def add_write_format(name, cls, label=None, options=()):
'\n Adds plugin to `--write` in main.ARGUMENTS and MultiSink formats.\n\n @param name format name like "csv", added to `--write .. format=FORMAT`\n @param cls class providing SinkBase interface\n @param label plugin label; if multiple plugins add the same option,\n "label output" in help text is replaced with "label1/label2/.. output"\n @param options a sequence of (name, help) to add to --write help, like\n [("template=/my/path.tpl", "custom template to use for HTML output")]\n '
MultiSink.FORMAT_CLASSES[name] = cls
if options:
WRITE_OPTIONS.setdefault(label, []).extend(options)<|docstring|>Adds plugin to `--write` in main.ARGUMENTS and MultiSink formats.
@param name format name like "csv", added to `--write .. format=FORMAT`
@param cls class providing SinkBase interface
@param label plugin label; if multiple plugins add the same option,
"label output" in help text is replaced with "label1/label2/.. output"
@param options a sequence of (name, help) to add to --write help, like
[("template=/my/path.tpl", "custom template to use for HTML output")]<|endoftext|> |
72a1872d9244d494918226e49066bdc21cfddc4e4c68e05783fb32ed5ddf1a8a | def get_argument(name, group=None):
'\n Returns a command-line argument dictionary, or None if not found.\n\n @param name argument name like "--write"\n @param group argument group like "Output control", if any\n '
from .. import main
if group:
return next((d for d in main.ARGUMENTS.get('groups', {}).get(group, []) if (name in d.get('args'))), None)
return next((d for d in main.ARGUMENTS.get('arguments', []) if (name in d.get('args'))), None) | Returns a command-line argument dictionary, or None if not found.
@param name argument name like "--write"
@param group argument group like "Output control", if any | src/grepros/plugins/__init__.py | get_argument | suurjaak/grepros | 10 | python | def get_argument(name, group=None):
'\n Returns a command-line argument dictionary, or None if not found.\n\n @param name argument name like "--write"\n @param group argument group like "Output control", if any\n '
from .. import main
if group:
return next((d for d in main.ARGUMENTS.get('groups', {}).get(group, []) if (name in d.get('args'))), None)
return next((d for d in main.ARGUMENTS.get('arguments', []) if (name in d.get('args'))), None) | def get_argument(name, group=None):
'\n Returns a command-line argument dictionary, or None if not found.\n\n @param name argument name like "--write"\n @param group argument group like "Output control", if any\n '
from .. import main
if group:
return next((d for d in main.ARGUMENTS.get('groups', {}).get(group, []) if (name in d.get('args'))), None)
return next((d for d in main.ARGUMENTS.get('arguments', []) if (name in d.get('args'))), None)<|docstring|>Returns a command-line argument dictionary, or None if not found.
@param name argument name like "--write"
@param group argument group like "Output control", if any<|endoftext|> |
b8123773754e663b80219fc791cf69d0c516cf33872daac34ba43632f2caa520 | def populate_known_plugins():
'Adds known non-auto plugins to `--plugin` argument help.'
plugins = []
for f in sorted(glob.glob(os.path.join(os.path.dirname(__file__), '*'))):
if (not f.lower().endswith(('.py', '.pyc'))):
continue
name = os.path.splitext(os.path.split(f)[(- 1)])[0]
if (not name.startswith('__')):
plugins.append(('%s.%s' % (__package__, name)))
pluginarg = get_argument('--plugin')
if (pluginarg and plugins):
MAXLINELEN = 60
lines = ['load a Python module or class as plugin', '(built-in plugins: ']
for (i, name) in enumerate(plugins):
if (not i):
lines[(- 1)] += name
elif (len(((lines[(- 1)] + ', ') + name)) > MAXLINELEN):
lines[(- 1)] += ', '
lines.append((' ' + name))
else:
lines[(- 1)] += (', ' + name)
lines[(- 1)] += ')'
pluginarg['help'] = '\n'.join(lines) | Adds known non-auto plugins to `--plugin` argument help. | src/grepros/plugins/__init__.py | populate_known_plugins | suurjaak/grepros | 10 | python | def populate_known_plugins():
plugins = []
for f in sorted(glob.glob(os.path.join(os.path.dirname(__file__), '*'))):
if (not f.lower().endswith(('.py', '.pyc'))):
continue
name = os.path.splitext(os.path.split(f)[(- 1)])[0]
if (not name.startswith('__')):
plugins.append(('%s.%s' % (__package__, name)))
pluginarg = get_argument('--plugin')
if (pluginarg and plugins):
MAXLINELEN = 60
lines = ['load a Python module or class as plugin', '(built-in plugins: ']
for (i, name) in enumerate(plugins):
if (not i):
lines[(- 1)] += name
elif (len(((lines[(- 1)] + ', ') + name)) > MAXLINELEN):
lines[(- 1)] += ', '
lines.append((' ' + name))
else:
lines[(- 1)] += (', ' + name)
lines[(- 1)] += ')'
pluginarg['help'] = '\n'.join(lines) | def populate_known_plugins():
plugins = []
for f in sorted(glob.glob(os.path.join(os.path.dirname(__file__), '*'))):
if (not f.lower().endswith(('.py', '.pyc'))):
continue
name = os.path.splitext(os.path.split(f)[(- 1)])[0]
if (not name.startswith('__')):
plugins.append(('%s.%s' % (__package__, name)))
pluginarg = get_argument('--plugin')
if (pluginarg and plugins):
MAXLINELEN = 60
lines = ['load a Python module or class as plugin', '(built-in plugins: ']
for (i, name) in enumerate(plugins):
if (not i):
lines[(- 1)] += name
elif (len(((lines[(- 1)] + ', ') + name)) > MAXLINELEN):
lines[(- 1)] += ', '
lines.append((' ' + name))
else:
lines[(- 1)] += (', ' + name)
lines[(- 1)] += ')'
pluginarg['help'] = '\n'.join(lines)<|docstring|>Adds known non-auto plugins to `--plugin` argument help.<|endoftext|> |
da07a4c0a819b1653cdc3429e9490f729b44f810ff3f00819088b0f23f610ce0 | def populate_write_formats():
'Populates main.ARGUMENTS with added write formats and options.'
writearg = get_argument('--write')
if (not writearg):
return
formats = sorted(set(MultiSink.FORMAT_CLASSES))
writearg['metavar'] = ('TARGET [format=%s] [KEY=VALUE ...]' % '|'.join(formats))
if (not WRITE_OPTIONS):
return
MAXNAME = 24
LEADING = ' '
texts = {}
inters = {}
namelabels = {}
namelens = {}
for (label, opts) in WRITE_OPTIONS.items():
for (name, help) in opts:
texts.setdefault(name, help)
namelabels.setdefault(name, []).append(label)
namelens[name] = len(name)
maxname = max(((x if (x <= MAXNAME) else 0) for x in namelens.values()))
for (label, opts) in WRITE_OPTIONS.items():
for (name, help) in opts:
inters[name] = ('\n' if (len(name) > MAXNAME) else (' ' * ((maxname - len(name)) + 2)))
indent = ((LEADING + ' ') + (' ' * (maxname or MAXNAME)))
PLACEHOLDER = '<plugin label replacement>'
for name in list(texts):
if (len(namelabels[name]) > 1):
for label in namelabels[name]:
texts[name] = texts[name].replace(('%s output' % label), PLACEHOLDER)
labels = '/'.join(sorted(filter(bool, namelabels[name]), key=(lambda x: x.lower())))
texts[name] = texts[name].replace(PLACEHOLDER, (labels + ' output'))
fmt = (lambda n, h: '\n'.join((((indent if (i or ('\n' == inters[n])) else '') + l) for (i, l) in enumerate(h.splitlines()))))
text = '\n'.join(sorted((''.join((LEADING, n, inters[n], fmt(n, h))) for (n, h) in texts.items())))
writearg['help'] += ('\n' + text) | Populates main.ARGUMENTS with added write formats and options. | src/grepros/plugins/__init__.py | populate_write_formats | suurjaak/grepros | 10 | python | def populate_write_formats():
writearg = get_argument('--write')
if (not writearg):
return
formats = sorted(set(MultiSink.FORMAT_CLASSES))
writearg['metavar'] = ('TARGET [format=%s] [KEY=VALUE ...]' % '|'.join(formats))
if (not WRITE_OPTIONS):
return
MAXNAME = 24
LEADING = ' '
texts = {}
inters = {}
namelabels = {}
namelens = {}
for (label, opts) in WRITE_OPTIONS.items():
for (name, help) in opts:
texts.setdefault(name, help)
namelabels.setdefault(name, []).append(label)
namelens[name] = len(name)
maxname = max(((x if (x <= MAXNAME) else 0) for x in namelens.values()))
for (label, opts) in WRITE_OPTIONS.items():
for (name, help) in opts:
inters[name] = ('\n' if (len(name) > MAXNAME) else (' ' * ((maxname - len(name)) + 2)))
indent = ((LEADING + ' ') + (' ' * (maxname or MAXNAME)))
PLACEHOLDER = '<plugin label replacement>'
for name in list(texts):
if (len(namelabels[name]) > 1):
for label in namelabels[name]:
texts[name] = texts[name].replace(('%s output' % label), PLACEHOLDER)
labels = '/'.join(sorted(filter(bool, namelabels[name]), key=(lambda x: x.lower())))
texts[name] = texts[name].replace(PLACEHOLDER, (labels + ' output'))
fmt = (lambda n, h: '\n'.join((((indent if (i or ('\n' == inters[n])) else ) + l) for (i, l) in enumerate(h.splitlines()))))
text = '\n'.join(sorted((.join((LEADING, n, inters[n], fmt(n, h))) for (n, h) in texts.items())))
writearg['help'] += ('\n' + text) | def populate_write_formats():
writearg = get_argument('--write')
if (not writearg):
return
formats = sorted(set(MultiSink.FORMAT_CLASSES))
writearg['metavar'] = ('TARGET [format=%s] [KEY=VALUE ...]' % '|'.join(formats))
if (not WRITE_OPTIONS):
return
MAXNAME = 24
LEADING = ' '
texts = {}
inters = {}
namelabels = {}
namelens = {}
for (label, opts) in WRITE_OPTIONS.items():
for (name, help) in opts:
texts.setdefault(name, help)
namelabels.setdefault(name, []).append(label)
namelens[name] = len(name)
maxname = max(((x if (x <= MAXNAME) else 0) for x in namelens.values()))
for (label, opts) in WRITE_OPTIONS.items():
for (name, help) in opts:
inters[name] = ('\n' if (len(name) > MAXNAME) else (' ' * ((maxname - len(name)) + 2)))
indent = ((LEADING + ' ') + (' ' * (maxname or MAXNAME)))
PLACEHOLDER = '<plugin label replacement>'
for name in list(texts):
if (len(namelabels[name]) > 1):
for label in namelabels[name]:
texts[name] = texts[name].replace(('%s output' % label), PLACEHOLDER)
labels = '/'.join(sorted(filter(bool, namelabels[name]), key=(lambda x: x.lower())))
texts[name] = texts[name].replace(PLACEHOLDER, (labels + ' output'))
fmt = (lambda n, h: '\n'.join((((indent if (i or ('\n' == inters[n])) else ) + l) for (i, l) in enumerate(h.splitlines()))))
text = '\n'.join(sorted((.join((LEADING, n, inters[n], fmt(n, h))) for (n, h) in texts.items())))
writearg['help'] += ('\n' + text)<|docstring|>Populates main.ARGUMENTS with added write formats and options.<|endoftext|> |
ba8627d0d3d1cb6aeb5a434d32ff2b256c6eaa99e11ae7efda764d2f20ace147 | def conv3x3(in_planes, out_planes, stride=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) | 3x3 convolution with padding | lib/models/multi_resnetbNrelu.py | conv3x3 | sumaliqinghua/DSPNet | 5 | python | def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) | def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)<|docstring|>3x3 convolution with padding<|endoftext|> |
a58b6c585b57dcd42061e5ac841e93cab186e3a524234c526e3a5a2a07de7f0b | def do_search(self, public_object_search_request, **kwargs):
'do_search # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.do_search(public_object_search_request, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param PublicObjectSearchRequest public_object_search_request: (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: CollectionResponseWithTotalSimplePublicObjectForwardPaging\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
return self.do_search_with_http_info(public_object_search_request, **kwargs) | do_search # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.do_search(public_object_search_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param PublicObjectSearchRequest public_object_search_request: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: CollectionResponseWithTotalSimplePublicObjectForwardPaging
If the method is called asynchronously,
returns the request thread. | hubspot/crm/contacts/api/search_api.py | do_search | Ronfer/hubspot-api-python | 117 | python | def do_search(self, public_object_search_request, **kwargs):
'do_search # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.do_search(public_object_search_request, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param PublicObjectSearchRequest public_object_search_request: (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: CollectionResponseWithTotalSimplePublicObjectForwardPaging\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
return self.do_search_with_http_info(public_object_search_request, **kwargs) | def do_search(self, public_object_search_request, **kwargs):
'do_search # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.do_search(public_object_search_request, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param PublicObjectSearchRequest public_object_search_request: (required)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: CollectionResponseWithTotalSimplePublicObjectForwardPaging\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
return self.do_search_with_http_info(public_object_search_request, **kwargs)<|docstring|>do_search # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.do_search(public_object_search_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param PublicObjectSearchRequest public_object_search_request: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: CollectionResponseWithTotalSimplePublicObjectForwardPaging
If the method is called asynchronously,
returns the request thread.<|endoftext|> |
82227d75045e66264b154291591983c943d75fce66862418df99d4bc6bd8ee05 | def do_search_with_http_info(self, public_object_search_request, **kwargs):
'do_search # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.do_search_with_http_info(public_object_search_request, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param PublicObjectSearchRequest public_object_search_request: (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(CollectionResponseWithTotalSimplePublicObjectForwardPaging, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = ['public_object_search_request']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise ApiTypeError(("Got an unexpected keyword argument '%s' to method do_search" % key))
local_var_params[key] = val
del local_var_params['kwargs']
if (self.api_client.client_side_validation and (('public_object_search_request' not in local_var_params) or (local_var_params['public_object_search_request'] is None))):
raise ApiValueError('Missing the required parameter `public_object_search_request` when calling `do_search`')
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if ('public_object_search_request' in local_var_params):
body_params = local_var_params['public_object_search_request']
header_params['Accept'] = self.api_client.select_header_accept(['application/json', '*/*'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['hapikey', 'oauth2', 'oauth2_legacy']
return self.api_client.call_api('/crm/v3/objects/contacts/search', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='CollectionResponseWithTotalSimplePublicObjectForwardPaging', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) | do_search # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.do_search_with_http_info(public_object_search_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param PublicObjectSearchRequest public_object_search_request: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(CollectionResponseWithTotalSimplePublicObjectForwardPaging, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread. | hubspot/crm/contacts/api/search_api.py | do_search_with_http_info | Ronfer/hubspot-api-python | 117 | python | def do_search_with_http_info(self, public_object_search_request, **kwargs):
'do_search # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.do_search_with_http_info(public_object_search_request, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param PublicObjectSearchRequest public_object_search_request: (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(CollectionResponseWithTotalSimplePublicObjectForwardPaging, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = ['public_object_search_request']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise ApiTypeError(("Got an unexpected keyword argument '%s' to method do_search" % key))
local_var_params[key] = val
del local_var_params['kwargs']
if (self.api_client.client_side_validation and (('public_object_search_request' not in local_var_params) or (local_var_params['public_object_search_request'] is None))):
raise ApiValueError('Missing the required parameter `public_object_search_request` when calling `do_search`')
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if ('public_object_search_request' in local_var_params):
body_params = local_var_params['public_object_search_request']
header_params['Accept'] = self.api_client.select_header_accept(['application/json', '*/*'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['hapikey', 'oauth2', 'oauth2_legacy']
return self.api_client.call_api('/crm/v3/objects/contacts/search', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='CollectionResponseWithTotalSimplePublicObjectForwardPaging', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) | def do_search_with_http_info(self, public_object_search_request, **kwargs):
'do_search # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.do_search_with_http_info(public_object_search_request, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param PublicObjectSearchRequest public_object_search_request: (required)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(CollectionResponseWithTotalSimplePublicObjectForwardPaging, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = ['public_object_search_request']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise ApiTypeError(("Got an unexpected keyword argument '%s' to method do_search" % key))
local_var_params[key] = val
del local_var_params['kwargs']
if (self.api_client.client_side_validation and (('public_object_search_request' not in local_var_params) or (local_var_params['public_object_search_request'] is None))):
raise ApiValueError('Missing the required parameter `public_object_search_request` when calling `do_search`')
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if ('public_object_search_request' in local_var_params):
body_params = local_var_params['public_object_search_request']
header_params['Accept'] = self.api_client.select_header_accept(['application/json', '*/*'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['hapikey', 'oauth2', 'oauth2_legacy']
return self.api_client.call_api('/crm/v3/objects/contacts/search', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='CollectionResponseWithTotalSimplePublicObjectForwardPaging', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)<|docstring|>do_search # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.do_search_with_http_info(public_object_search_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param PublicObjectSearchRequest public_object_search_request: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(CollectionResponseWithTotalSimplePublicObjectForwardPaging, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.<|endoftext|> |
9d65325bfa9c7d60f30f588477352ef97dde428bbffa35d2c3c12b9c89a5e1c3 | def erode(images, random_state, parents, hooks):
'Apply erosion to images'
size = 3
kernel = np.ones((size, size), np.uint8)
center = radius = ((size - 1) / 2)
for x in range(size):
for y in range(size):
if ((((x - center) ** 2) + ((y - center) ** 2)) > (radius ** 2)):
kernel[x][y] = 0
img_erosion = []
for image in images:
img_erosion.append(cv2.erode(image, kernel, iterations=1))
return img_erosion | Apply erosion to images | codes/yeastcell.py | erode | DoubleMuL/ML_project_2 | 1 | python | def erode(images, random_state, parents, hooks):
size = 3
kernel = np.ones((size, size), np.uint8)
center = radius = ((size - 1) / 2)
for x in range(size):
for y in range(size):
if ((((x - center) ** 2) + ((y - center) ** 2)) > (radius ** 2)):
kernel[x][y] = 0
img_erosion = []
for image in images:
img_erosion.append(cv2.erode(image, kernel, iterations=1))
return img_erosion | def erode(images, random_state, parents, hooks):
size = 3
kernel = np.ones((size, size), np.uint8)
center = radius = ((size - 1) / 2)
for x in range(size):
for y in range(size):
if ((((x - center) ** 2) + ((y - center) ** 2)) > (radius ** 2)):
kernel[x][y] = 0
img_erosion = []
for image in images:
img_erosion.append(cv2.erode(image, kernel, iterations=1))
return img_erosion<|docstring|>Apply erosion to images<|endoftext|> |
85946071dd29d2a13c0cf9f4482d8e8468242916dad30e57f1252a46fa9299a2 | def dilate(image, size=3):
'Apply dilation to the image'
kernel = np.ones((size, size), np.uint8)
center = radius = ((size - 1) / 2)
for x in range(size):
for y in range(size):
if ((((x - center) ** 2) + ((y - center) ** 2)) > (radius ** 2)):
kernel[x][y] = 0
image = image.astype('int16')
image = cv2.UMat(image)
img_dilation = cv2.dilate(image, kernel, iterations=1)
img_dilation = img_dilation.get()
return img_dilation | Apply dilation to the image | codes/yeastcell.py | dilate | DoubleMuL/ML_project_2 | 1 | python | def dilate(image, size=3):
kernel = np.ones((size, size), np.uint8)
center = radius = ((size - 1) / 2)
for x in range(size):
for y in range(size):
if ((((x - center) ** 2) + ((y - center) ** 2)) > (radius ** 2)):
kernel[x][y] = 0
image = image.astype('int16')
image = cv2.UMat(image)
img_dilation = cv2.dilate(image, kernel, iterations=1)
img_dilation = img_dilation.get()
return img_dilation | def dilate(image, size=3):
kernel = np.ones((size, size), np.uint8)
center = radius = ((size - 1) / 2)
for x in range(size):
for y in range(size):
if ((((x - center) ** 2) + ((y - center) ** 2)) > (radius ** 2)):
kernel[x][y] = 0
image = image.astype('int16')
image = cv2.UMat(image)
img_dilation = cv2.dilate(image, kernel, iterations=1)
img_dilation = img_dilation.get()
return img_dilation<|docstring|>Apply dilation to the image<|endoftext|> |
6be6837121141f3e0e5a47f1dd79391491db3310892a10849c6df349047c6104 | def load_cells(self, dataset_dir):
'Generate the requested number of synthetic images.\n count: number of images to generate.\n height, width: the size of the generated images.\n '
self.add_class('YeastCell', 1, 'cell')
frames_ids = next(os.walk(dataset_dir))[2]
for frames_id in frames_ids:
frames_id = '.'.join(frames_id.split('.')[:(- 1)])
framesize = skimage.io.imread(os.path.join(dataset_dir, '{}.tif'.format(frames_id))).shape[0]
if (frames_id == 'michael_1.2_im'):
framesize -= 1
if (frames_id == 'michael_1.2.2_im'):
framesize -= 2
for i in range(framesize):
self.add_image('YeastCell', image_id=((frames_id + '*') + str(i)), path=os.path.join(dataset_dir, '{}.tif'.format(frames_id))) | Generate the requested number of synthetic images.
count: number of images to generate.
height, width: the size of the generated images. | codes/yeastcell.py | load_cells | DoubleMuL/ML_project_2 | 1 | python | def load_cells(self, dataset_dir):
'Generate the requested number of synthetic images.\n count: number of images to generate.\n height, width: the size of the generated images.\n '
self.add_class('YeastCell', 1, 'cell')
frames_ids = next(os.walk(dataset_dir))[2]
for frames_id in frames_ids:
frames_id = '.'.join(frames_id.split('.')[:(- 1)])
framesize = skimage.io.imread(os.path.join(dataset_dir, '{}.tif'.format(frames_id))).shape[0]
if (frames_id == 'michael_1.2_im'):
framesize -= 1
if (frames_id == 'michael_1.2.2_im'):
framesize -= 2
for i in range(framesize):
self.add_image('YeastCell', image_id=((frames_id + '*') + str(i)), path=os.path.join(dataset_dir, '{}.tif'.format(frames_id))) | def load_cells(self, dataset_dir):
'Generate the requested number of synthetic images.\n count: number of images to generate.\n height, width: the size of the generated images.\n '
self.add_class('YeastCell', 1, 'cell')
frames_ids = next(os.walk(dataset_dir))[2]
for frames_id in frames_ids:
frames_id = '.'.join(frames_id.split('.')[:(- 1)])
framesize = skimage.io.imread(os.path.join(dataset_dir, '{}.tif'.format(frames_id))).shape[0]
if (frames_id == 'michael_1.2_im'):
framesize -= 1
if (frames_id == 'michael_1.2.2_im'):
framesize -= 2
for i in range(framesize):
self.add_image('YeastCell', image_id=((frames_id + '*') + str(i)), path=os.path.join(dataset_dir, '{}.tif'.format(frames_id)))<|docstring|>Generate the requested number of synthetic images.
count: number of images to generate.
height, width: the size of the generated images.<|endoftext|> |
49d4797913aebd580a35f5cb74f9721b06e91f6a60da6fcd6ea2fb37aaf6a9f0 | def load_image(self, image_id):
'Load the specified image and return a [H,W,3] Numpy array.\n '
image_name = self.image_info[image_id]['id']
print(image_name)
frame_number = self.image_info[image_id]['id'].split('*')[1]
image = skimage.io.imread(self.image_info[image_id]['path'])[int(frame_number)]
image = image.astype('int16')
image = ((image / image.max()) * 255)
if (image.ndim != 3):
image = skimage.color.gray2rgb(image)
if (image.shape[(- 1)] == 4):
image = image[(..., :3)]
return image | Load the specified image and return a [H,W,3] Numpy array. | codes/yeastcell.py | load_image | DoubleMuL/ML_project_2 | 1 | python | def load_image(self, image_id):
'\n '
image_name = self.image_info[image_id]['id']
print(image_name)
frame_number = self.image_info[image_id]['id'].split('*')[1]
image = skimage.io.imread(self.image_info[image_id]['path'])[int(frame_number)]
image = image.astype('int16')
image = ((image / image.max()) * 255)
if (image.ndim != 3):
image = skimage.color.gray2rgb(image)
if (image.shape[(- 1)] == 4):
image = image[(..., :3)]
return image | def load_image(self, image_id):
'\n '
image_name = self.image_info[image_id]['id']
print(image_name)
frame_number = self.image_info[image_id]['id'].split('*')[1]
image = skimage.io.imread(self.image_info[image_id]['path'])[int(frame_number)]
image = image.astype('int16')
image = ((image / image.max()) * 255)
if (image.ndim != 3):
image = skimage.color.gray2rgb(image)
if (image.shape[(- 1)] == 4):
image = image[(..., :3)]
return image<|docstring|>Load the specified image and return a [H,W,3] Numpy array.<|endoftext|> |
850e1f643d2fd003c5422d802501f388c93371b0df591c8928069afed569f83f | def load_mask(self, image_id):
'Generate instance masks for an image.\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n one mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n '
info = self.image_info[image_id]
mask_dir = os.path.join(os.path.dirname(os.path.dirname(info['path'])), 'mask')
mask = []
name = info['id'].split('*')[0]
name = name.replace('frames', 'mask')
name = name.replace('im', 'mask')
frame_number = info['id'].split('*')[1]
m = skimage.io.imread(os.path.join(mask_dir, '{}.tif'.format(name)))[int(frame_number)]
m = m.astype('int16')
instance_number = m.max()
if (instance_number == 0):
print((((('Frame ' + name) + ' ') + str(frame_number)) + ' have no cell!'))
for i in range(1, (instance_number + 1)):
m_instance = np.where((m != i), 0, m)
m_instance = m_instance.astype(np.bool)
mask.append(m_instance)
mask = np.stack(mask, axis=(- 1))
return (mask, np.ones([mask.shape[(- 1)]], dtype=np.int32)) | Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks. | codes/yeastcell.py | load_mask | DoubleMuL/ML_project_2 | 1 | python | def load_mask(self, image_id):
'Generate instance masks for an image.\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n one mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n '
info = self.image_info[image_id]
mask_dir = os.path.join(os.path.dirname(os.path.dirname(info['path'])), 'mask')
mask = []
name = info['id'].split('*')[0]
name = name.replace('frames', 'mask')
name = name.replace('im', 'mask')
frame_number = info['id'].split('*')[1]
m = skimage.io.imread(os.path.join(mask_dir, '{}.tif'.format(name)))[int(frame_number)]
m = m.astype('int16')
instance_number = m.max()
if (instance_number == 0):
print((((('Frame ' + name) + ' ') + str(frame_number)) + ' have no cell!'))
for i in range(1, (instance_number + 1)):
m_instance = np.where((m != i), 0, m)
m_instance = m_instance.astype(np.bool)
mask.append(m_instance)
mask = np.stack(mask, axis=(- 1))
return (mask, np.ones([mask.shape[(- 1)]], dtype=np.int32)) | def load_mask(self, image_id):
'Generate instance masks for an image.\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n one mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n '
info = self.image_info[image_id]
mask_dir = os.path.join(os.path.dirname(os.path.dirname(info['path'])), 'mask')
mask = []
name = info['id'].split('*')[0]
name = name.replace('frames', 'mask')
name = name.replace('im', 'mask')
frame_number = info['id'].split('*')[1]
m = skimage.io.imread(os.path.join(mask_dir, '{}.tif'.format(name)))[int(frame_number)]
m = m.astype('int16')
instance_number = m.max()
if (instance_number == 0):
print((((('Frame ' + name) + ' ') + str(frame_number)) + ' have no cell!'))
for i in range(1, (instance_number + 1)):
m_instance = np.where((m != i), 0, m)
m_instance = m_instance.astype(np.bool)
mask.append(m_instance)
mask = np.stack(mask, axis=(- 1))
return (mask, np.ones([mask.shape[(- 1)]], dtype=np.int32))<|docstring|>Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.<|endoftext|> |
2b5c1bb727e588d7232cd3c62d7dd1d3fc8494cac00502f4c51be99fefcd87e2 | def image_reference(self, image_id):
'Return the path of the image.'
info = self.image_info[image_id]
if (info['source'] == 'YeastCell'):
return info['id']
else:
super(self.__class__, self).image_reference(image_id) | Return the path of the image. | codes/yeastcell.py | image_reference | DoubleMuL/ML_project_2 | 1 | python | def image_reference(self, image_id):
info = self.image_info[image_id]
if (info['source'] == 'YeastCell'):
return info['id']
else:
super(self.__class__, self).image_reference(image_id) | def image_reference(self, image_id):
info = self.image_info[image_id]
if (info['source'] == 'YeastCell'):
return info['id']
else:
super(self.__class__, self).image_reference(image_id)<|docstring|>Return the path of the image.<|endoftext|> |
1abdf0b94b520fa23c313e3c028c5df565188c28940031213d7133089d7812d9 | def start(self):
'Start the plugin life-cycle.\n\n '
super(TextMonitorPlugin, self).start()
self._rule_types = DeclaratorsCollector(workbench=self.workbench, point=RULE_TYPE_POINT, ext_class=(Rules, RuleType))
self._rule_types.start()
validator = make_extension_validator(RuleConfig, attributes=('id', 'description', 'rule_type', 'config'))
self._rule_configs = ExtensionsCollector(workbench=self.workbench, point=RULE_CONFIG_POINT, ext_class=RuleConfig, validate_ext=validator)
self._rule_configs.start()
self._update_rule_types(None)
self._update_rules(None)
defaults = [r for r in self.default_rules if (r in self.rules)]
if (defaults != self.default_rules):
msg = 'The following rules for the TextMonitor are not defined, and have been removed from the defaults : %s'
removed = (set(self.default_rules) - set(defaults))
logger.warning(msg, removed)
self.default_rules = defaults
self._bind_observers() | Start the plugin life-cycle. | exopy/measurement/monitors/text_monitor/plugin.py | start | jerjohste/exopy | 16 | python | def start(self):
'\n\n '
super(TextMonitorPlugin, self).start()
self._rule_types = DeclaratorsCollector(workbench=self.workbench, point=RULE_TYPE_POINT, ext_class=(Rules, RuleType))
self._rule_types.start()
validator = make_extension_validator(RuleConfig, attributes=('id', 'description', 'rule_type', 'config'))
self._rule_configs = ExtensionsCollector(workbench=self.workbench, point=RULE_CONFIG_POINT, ext_class=RuleConfig, validate_ext=validator)
self._rule_configs.start()
self._update_rule_types(None)
self._update_rules(None)
defaults = [r for r in self.default_rules if (r in self.rules)]
if (defaults != self.default_rules):
msg = 'The following rules for the TextMonitor are not defined, and have been removed from the defaults : %s'
removed = (set(self.default_rules) - set(defaults))
logger.warning(msg, removed)
self.default_rules = defaults
self._bind_observers() | def start(self):
'\n\n '
super(TextMonitorPlugin, self).start()
self._rule_types = DeclaratorsCollector(workbench=self.workbench, point=RULE_TYPE_POINT, ext_class=(Rules, RuleType))
self._rule_types.start()
validator = make_extension_validator(RuleConfig, attributes=('id', 'description', 'rule_type', 'config'))
self._rule_configs = ExtensionsCollector(workbench=self.workbench, point=RULE_CONFIG_POINT, ext_class=RuleConfig, validate_ext=validator)
self._rule_configs.start()
self._update_rule_types(None)
self._update_rules(None)
defaults = [r for r in self.default_rules if (r in self.rules)]
if (defaults != self.default_rules):
msg = 'The following rules for the TextMonitor are not defined, and have been removed from the defaults : %s'
removed = (set(self.default_rules) - set(defaults))
logger.warning(msg, removed)
self.default_rules = defaults
self._bind_observers()<|docstring|>Start the plugin life-cycle.<|endoftext|> |
303c430b4df22fa656bc64a0d8430ef236c25eb2fe7251d569c1339c302c36d7 | def stop(self):
'Stop the plugin and clear all ressources.\n\n '
self._unbind_observers()
self.rule_types = []
self.rules = []
self._rule_types.stop()
self._rule_configs.stop() | Stop the plugin and clear all ressources. | exopy/measurement/monitors/text_monitor/plugin.py | stop | jerjohste/exopy | 16 | python | def stop(self):
'\n\n '
self._unbind_observers()
self.rule_types = []
self.rules = []
self._rule_types.stop()
self._rule_configs.stop() | def stop(self):
'\n\n '
self._unbind_observers()
self.rule_types = []
self.rules = []
self._rule_types.stop()
self._rule_configs.stop()<|docstring|>Stop the plugin and clear all ressources.<|endoftext|> |
be0d4348e56905f89498e4e32f502fc07db0ef9317299950fef620b5ede905ac | def build_rule(self, name_or_config):
' Build rule from a dict.\n\n Parameters\n ----------\n name_or_config : unicode|dict\n Name of the rule to build or dict containing the infos to build the\n rule from scratch.\n\n Returns\n -------\n rule : BaseRule | None\n New rule properly initialized.\n\n '
if (not isinstance(name_or_config, dict)):
if (name_or_config in self._user_rules):
config = self._user_rules[name_or_config].copy()
config['id'] = name_or_config
elif (name_or_config in self._rule_configs.contributions):
rule_config = self._rule_configs.contributions[name_or_config]
config = rule_config.config.copy()
config['class_id'] = rule_config.rule_type
config['description'] = rule_config.description
config['id'] = name_or_config
else:
msg = 'Requested rule not found : {}'.format(name_or_config)
logger.warning(msg)
return
else:
config = name_or_config.copy()
class_id = config.pop('class_id')
rule_infos = self._rule_types.contributions.get(class_id)
if (rule_infos is not None):
rule = rule_infos.cls()
rule.update_members_from_preferences(config)
return rule
else:
msg = 'Requested rule class not found : {}'.format(class_id)
logger.warning(msg) | Build rule from a dict.
Parameters
----------
name_or_config : unicode|dict
Name of the rule to build or dict containing the infos to build the
rule from scratch.
Returns
-------
rule : BaseRule | None
New rule properly initialized. | exopy/measurement/monitors/text_monitor/plugin.py | build_rule | jerjohste/exopy | 16 | python | def build_rule(self, name_or_config):
' Build rule from a dict.\n\n Parameters\n ----------\n name_or_config : unicode|dict\n Name of the rule to build or dict containing the infos to build the\n rule from scratch.\n\n Returns\n -------\n rule : BaseRule | None\n New rule properly initialized.\n\n '
if (not isinstance(name_or_config, dict)):
if (name_or_config in self._user_rules):
config = self._user_rules[name_or_config].copy()
config['id'] = name_or_config
elif (name_or_config in self._rule_configs.contributions):
rule_config = self._rule_configs.contributions[name_or_config]
config = rule_config.config.copy()
config['class_id'] = rule_config.rule_type
config['description'] = rule_config.description
config['id'] = name_or_config
else:
msg = 'Requested rule not found : {}'.format(name_or_config)
logger.warning(msg)
return
else:
config = name_or_config.copy()
class_id = config.pop('class_id')
rule_infos = self._rule_types.contributions.get(class_id)
if (rule_infos is not None):
rule = rule_infos.cls()
rule.update_members_from_preferences(config)
return rule
else:
msg = 'Requested rule class not found : {}'.format(class_id)
logger.warning(msg) | def build_rule(self, name_or_config):
' Build rule from a dict.\n\n Parameters\n ----------\n name_or_config : unicode|dict\n Name of the rule to build or dict containing the infos to build the\n rule from scratch.\n\n Returns\n -------\n rule : BaseRule | None\n New rule properly initialized.\n\n '
if (not isinstance(name_or_config, dict)):
if (name_or_config in self._user_rules):
config = self._user_rules[name_or_config].copy()
config['id'] = name_or_config
elif (name_or_config in self._rule_configs.contributions):
rule_config = self._rule_configs.contributions[name_or_config]
config = rule_config.config.copy()
config['class_id'] = rule_config.rule_type
config['description'] = rule_config.description
config['id'] = name_or_config
else:
msg = 'Requested rule not found : {}'.format(name_or_config)
logger.warning(msg)
return
else:
config = name_or_config.copy()
class_id = config.pop('class_id')
rule_infos = self._rule_types.contributions.get(class_id)
if (rule_infos is not None):
rule = rule_infos.cls()
rule.update_members_from_preferences(config)
return rule
else:
msg = 'Requested rule class not found : {}'.format(class_id)
logger.warning(msg)<|docstring|>Build rule from a dict.
Parameters
----------
name_or_config : unicode|dict
Name of the rule to build or dict containing the infos to build the
rule from scratch.
Returns
-------
rule : BaseRule | None
New rule properly initialized.<|endoftext|> |
294947688eba1cbd351d44ccdcde78fd3db1460590a373357d814d4a84f8de11 | def get_rule_type(self, rule_type_id):
'Access the class corresponding to a given id.\n\n '
return self._rule_types.contributions[rule_type_id].cls | Access the class corresponding to a given id. | exopy/measurement/monitors/text_monitor/plugin.py | get_rule_type | jerjohste/exopy | 16 | python | def get_rule_type(self, rule_type_id):
'\n\n '
return self._rule_types.contributions[rule_type_id].cls | def get_rule_type(self, rule_type_id):
'\n\n '
return self._rule_types.contributions[rule_type_id].cls<|docstring|>Access the class corresponding to a given id.<|endoftext|> |
45fdcbb72de09d8688abeda7222034ae3457f9821fe9eab08be88a6c2d9021a4 | def get_rule_view(self, rule):
'CReate a view corresponding to the given object.\n\n '
infos = self._rule_types.contributions[rule.class_id]
is_user = (rule.id not in self._rule_configs.contributions)
return infos.view(rule=rule, plugin=self, enabled=is_user) | CReate a view corresponding to the given object. | exopy/measurement/monitors/text_monitor/plugin.py | get_rule_view | jerjohste/exopy | 16 | python | def get_rule_view(self, rule):
'\n\n '
infos = self._rule_types.contributions[rule.class_id]
is_user = (rule.id not in self._rule_configs.contributions)
return infos.view(rule=rule, plugin=self, enabled=is_user) | def get_rule_view(self, rule):
'\n\n '
infos = self._rule_types.contributions[rule.class_id]
is_user = (rule.id not in self._rule_configs.contributions)
return infos.view(rule=rule, plugin=self, enabled=is_user)<|docstring|>CReate a view corresponding to the given object.<|endoftext|> |
3efee8d49f2b7a104251cca4ada333cb0e63da63bdae18129549350f4a170bea | def save_rule(self, rule):
'Add a rule present on a plugin to the saved rules.\n\n '
self._user_rules[rule.id] = rule.preferences_from_members()
self._update_rules(None) | Add a rule present on a plugin to the saved rules. | exopy/measurement/monitors/text_monitor/plugin.py | save_rule | jerjohste/exopy | 16 | python | def save_rule(self, rule):
'\n\n '
self._user_rules[rule.id] = rule.preferences_from_members()
self._update_rules(None) | def save_rule(self, rule):
'\n\n '
self._user_rules[rule.id] = rule.preferences_from_members()
self._update_rules(None)<|docstring|>Add a rule present on a plugin to the saved rules.<|endoftext|> |
8de5f348cc88415a5ce4223d0243027a830a0421f250b8f8f6254a1c93a6dde9 | def create_monitor(self, default=False):
' Create a new monitor.\n\n Parameters\n ----------\n default : bool, optionnal\n Whether or not to add the default rules to the new monitor.\n\n Returns\n -------\n monitor : TextMonitor\n New text monitor.\n\n '
exts = [e for e in self.manifest.extensions if (e.id == 'monitors')]
decl = exts[0].get_child(Monitor)
monitor = TextMonitor(_plugin=self, declaration=decl)
if default:
rules = []
for rule_name in self.default_rules:
rule = self.build_rule(rule_name)
if rule:
rules.append(rule)
monitor.rules = rules
return monitor | Create a new monitor.
Parameters
----------
default : bool, optionnal
Whether or not to add the default rules to the new monitor.
Returns
-------
monitor : TextMonitor
New text monitor. | exopy/measurement/monitors/text_monitor/plugin.py | create_monitor | jerjohste/exopy | 16 | python | def create_monitor(self, default=False):
' Create a new monitor.\n\n Parameters\n ----------\n default : bool, optionnal\n Whether or not to add the default rules to the new monitor.\n\n Returns\n -------\n monitor : TextMonitor\n New text monitor.\n\n '
exts = [e for e in self.manifest.extensions if (e.id == 'monitors')]
decl = exts[0].get_child(Monitor)
monitor = TextMonitor(_plugin=self, declaration=decl)
if default:
rules = []
for rule_name in self.default_rules:
rule = self.build_rule(rule_name)
if rule:
rules.append(rule)
monitor.rules = rules
return monitor | def create_monitor(self, default=False):
' Create a new monitor.\n\n Parameters\n ----------\n default : bool, optionnal\n Whether or not to add the default rules to the new monitor.\n\n Returns\n -------\n monitor : TextMonitor\n New text monitor.\n\n '
exts = [e for e in self.manifest.extensions if (e.id == 'monitors')]
decl = exts[0].get_child(Monitor)
monitor = TextMonitor(_plugin=self, declaration=decl)
if default:
rules = []
for rule_name in self.default_rules:
rule = self.build_rule(rule_name)
if rule:
rules.append(rule)
monitor.rules = rules
return monitor<|docstring|>Create a new monitor.
Parameters
----------
default : bool, optionnal
Whether or not to add the default rules to the new monitor.
Returns
-------
monitor : TextMonitor
New text monitor.<|endoftext|> |
cb840b42ac36a6821ac17eac95f861816778773af26546d4482668ab9291d7c0 | def _update_rule_types(self, change):
'Update the public rule types class id when new ones get registered.\n\n '
self.rule_types = list(self._rule_types.contributions) | Update the public rule types class id when new ones get registered. | exopy/measurement/monitors/text_monitor/plugin.py | _update_rule_types | jerjohste/exopy | 16 | python | def _update_rule_types(self, change):
'\n\n '
self.rule_types = list(self._rule_types.contributions) | def _update_rule_types(self, change):
'\n\n '
self.rule_types = list(self._rule_types.contributions)<|docstring|>Update the public rule types class id when new ones get registered.<|endoftext|> |
fb2866b650ef6b4c3b04a0a659385f3aebc373ef56c217e48d48b28ad6081788 | def _update_rules(self, change):
'Update the rule names whenever a new contributed rule or a new user\n rule is added.\n\n '
contrib = set(self._rule_configs.contributions)
users = set(self._user_rules)
self.rules = list((contrib | users)) | Update the rule names whenever a new contributed rule or a new user
rule is added. | exopy/measurement/monitors/text_monitor/plugin.py | _update_rules | jerjohste/exopy | 16 | python | def _update_rules(self, change):
'Update the rule names whenever a new contributed rule or a new user\n rule is added.\n\n '
contrib = set(self._rule_configs.contributions)
users = set(self._user_rules)
self.rules = list((contrib | users)) | def _update_rules(self, change):
'Update the rule names whenever a new contributed rule or a new user\n rule is added.\n\n '
contrib = set(self._rule_configs.contributions)
users = set(self._user_rules)
self.rules = list((contrib | users))<|docstring|>Update the rule names whenever a new contributed rule or a new user
rule is added.<|endoftext|> |
895ccd550c49d53bd509605c1db5d18b86d33613a33dafa1d0fec6885adc14a2 | def _bind_observers(self):
'Observe the collectors to update public attributes.\n\n '
self._rule_configs.observe('contributions', self._update_rules)
self._rule_types.observe('contributions', self._update_rule_types) | Observe the collectors to update public attributes. | exopy/measurement/monitors/text_monitor/plugin.py | _bind_observers | jerjohste/exopy | 16 | python | def _bind_observers(self):
'\n\n '
self._rule_configs.observe('contributions', self._update_rules)
self._rule_types.observe('contributions', self._update_rule_types) | def _bind_observers(self):
'\n\n '
self._rule_configs.observe('contributions', self._update_rules)
self._rule_types.observe('contributions', self._update_rule_types)<|docstring|>Observe the collectors to update public attributes.<|endoftext|> |
1038a46941fe4751e2eca0c415eb5d40478b07c74601bc1e22800c39c70ecb88 | def _unbind_observers(self):
'Unobserve the collectors.\n\n '
self._rule_configs.unobserve('contributions', self._update_rules)
self._rule_types.unobserve('contributions', self._update_rule_types) | Unobserve the collectors. | exopy/measurement/monitors/text_monitor/plugin.py | _unbind_observers | jerjohste/exopy | 16 | python | def _unbind_observers(self):
'\n\n '
self._rule_configs.unobserve('contributions', self._update_rules)
self._rule_types.unobserve('contributions', self._update_rule_types) | def _unbind_observers(self):
'\n\n '
self._rule_configs.unobserve('contributions', self._update_rules)
self._rule_types.unobserve('contributions', self._update_rule_types)<|docstring|>Unobserve the collectors.<|endoftext|> |
94b1724863f1b24944e295a319701dc3922d7105b43d84731a74cbe2391ba70f | def create(data_connection_name, database_name, kusto_pool_name, resource_group, workspace_name, consumer_group=None, data_format=None, event_system_properties=None, iot_hub_resource_id=None, location=None, mapping_rule_name=None, no_wait=None, shared_access_policy_name=None, table_name=None):
'\n Create a data connection.\n\n Required Parameters:\n - data_connection_name -- The name of the data connection.\n - database_name -- The name of the database in the Kusto pool.\n - kusto_pool_name -- The name of the Kusto pool.\n - resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`\n - workspace_name -- The name of the workspace\n\n Optional Parameters:\n - consumer_group -- The iot hub consumer group.\n - data_format -- The data format of the message. Optionally the data format can be added to each message.\n - event_system_properties -- System properties of the iot hub\n - iot_hub_resource_id -- The resource ID of the Iot hub to be used to create a data connection.\n - location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.\n - mapping_rule_name -- The mapping rule to be used to ingest the data. Optionally the mapping information can be added to each message.\n - no_wait -- Do not wait for the long-running operation to finish.\n - shared_access_policy_name -- The name of the share access policy\n - table_name -- The table where the data should be ingested. Optionally the table information can be added to each message.\n '
return _call_az('az synapse kusto data-connection iot-hub create', locals()) | Create a data connection.
Required Parameters:
- data_connection_name -- The name of the data connection.
- database_name -- The name of the database in the Kusto pool.
- kusto_pool_name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- consumer_group -- The iot hub consumer group.
- data_format -- The data format of the message. Optionally the data format can be added to each message.
- event_system_properties -- System properties of the iot hub
- iot_hub_resource_id -- The resource ID of the Iot hub to be used to create a data connection.
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- mapping_rule_name -- The mapping rule to be used to ingest the data. Optionally the mapping information can be added to each message.
- no_wait -- Do not wait for the long-running operation to finish.
- shared_access_policy_name -- The name of the share access policy
- table_name -- The table where the data should be ingested. Optionally the table information can be added to each message. | pyaz/synapse/kusto/data_connection/iot_hub/__init__.py | create | py-az-cli/py-az-cli | 0 | python | def create(data_connection_name, database_name, kusto_pool_name, resource_group, workspace_name, consumer_group=None, data_format=None, event_system_properties=None, iot_hub_resource_id=None, location=None, mapping_rule_name=None, no_wait=None, shared_access_policy_name=None, table_name=None):
'\n Create a data connection.\n\n Required Parameters:\n - data_connection_name -- The name of the data connection.\n - database_name -- The name of the database in the Kusto pool.\n - kusto_pool_name -- The name of the Kusto pool.\n - resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`\n - workspace_name -- The name of the workspace\n\n Optional Parameters:\n - consumer_group -- The iot hub consumer group.\n - data_format -- The data format of the message. Optionally the data format can be added to each message.\n - event_system_properties -- System properties of the iot hub\n - iot_hub_resource_id -- The resource ID of the Iot hub to be used to create a data connection.\n - location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.\n - mapping_rule_name -- The mapping rule to be used to ingest the data. Optionally the mapping information can be added to each message.\n - no_wait -- Do not wait for the long-running operation to finish.\n - shared_access_policy_name -- The name of the share access policy\n - table_name -- The table where the data should be ingested. Optionally the table information can be added to each message.\n '
return _call_az('az synapse kusto data-connection iot-hub create', locals()) | def create(data_connection_name, database_name, kusto_pool_name, resource_group, workspace_name, consumer_group=None, data_format=None, event_system_properties=None, iot_hub_resource_id=None, location=None, mapping_rule_name=None, no_wait=None, shared_access_policy_name=None, table_name=None):
'\n Create a data connection.\n\n Required Parameters:\n - data_connection_name -- The name of the data connection.\n - database_name -- The name of the database in the Kusto pool.\n - kusto_pool_name -- The name of the Kusto pool.\n - resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`\n - workspace_name -- The name of the workspace\n\n Optional Parameters:\n - consumer_group -- The iot hub consumer group.\n - data_format -- The data format of the message. Optionally the data format can be added to each message.\n - event_system_properties -- System properties of the iot hub\n - iot_hub_resource_id -- The resource ID of the Iot hub to be used to create a data connection.\n - location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.\n - mapping_rule_name -- The mapping rule to be used to ingest the data. Optionally the mapping information can be added to each message.\n - no_wait -- Do not wait for the long-running operation to finish.\n - shared_access_policy_name -- The name of the share access policy\n - table_name -- The table where the data should be ingested. Optionally the table information can be added to each message.\n '
return _call_az('az synapse kusto data-connection iot-hub create', locals())<|docstring|>Create a data connection.
Required Parameters:
- data_connection_name -- The name of the data connection.
- database_name -- The name of the database in the Kusto pool.
- kusto_pool_name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- consumer_group -- The iot hub consumer group.
- data_format -- The data format of the message. Optionally the data format can be added to each message.
- event_system_properties -- System properties of the iot hub
- iot_hub_resource_id -- The resource ID of the Iot hub to be used to create a data connection.
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- mapping_rule_name -- The mapping rule to be used to ingest the data. Optionally the mapping information can be added to each message.
- no_wait -- Do not wait for the long-running operation to finish.
- shared_access_policy_name -- The name of the share access policy
- table_name -- The table where the data should be ingested. Optionally the table information can be added to each message.<|endoftext|> |
84daa997d3213bd3cce2d730a06a71181958a85c197dbd27520e9df04b081bf8 | def update(data_connection_name, database_name, kusto_pool_name, resource_group, workspace_name, consumer_group=None, data_format=None, event_system_properties=None, iot_hub_resource_id=None, location=None, mapping_rule_name=None, no_wait=None, shared_access_policy_name=None, table_name=None):
'\n Updates a data connection.\n\n Required Parameters:\n - data_connection_name -- The name of the data connection.\n - database_name -- The name of the database in the Kusto pool.\n - kusto_pool_name -- The name of the Kusto pool.\n - resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`\n - workspace_name -- The name of the workspace\n\n Optional Parameters:\n - consumer_group -- The iot hub consumer group.\n - data_format -- The data format of the message. Optionally the data format can be added to each message.\n - event_system_properties -- System properties of the iot hub\n - iot_hub_resource_id -- The resource ID of the Iot hub to be used to create a data connection.\n - location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.\n - mapping_rule_name -- The mapping rule to be used to ingest the data. Optionally the mapping information can be added to each message.\n - no_wait -- Do not wait for the long-running operation to finish.\n - shared_access_policy_name -- The name of the share access policy\n - table_name -- The table where the data should be ingested. Optionally the table information can be added to each message.\n '
return _call_az('az synapse kusto data-connection iot-hub update', locals()) | Updates a data connection.
Required Parameters:
- data_connection_name -- The name of the data connection.
- database_name -- The name of the database in the Kusto pool.
- kusto_pool_name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- consumer_group -- The iot hub consumer group.
- data_format -- The data format of the message. Optionally the data format can be added to each message.
- event_system_properties -- System properties of the iot hub
- iot_hub_resource_id -- The resource ID of the Iot hub to be used to create a data connection.
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- mapping_rule_name -- The mapping rule to be used to ingest the data. Optionally the mapping information can be added to each message.
- no_wait -- Do not wait for the long-running operation to finish.
- shared_access_policy_name -- The name of the share access policy
- table_name -- The table where the data should be ingested. Optionally the table information can be added to each message. | pyaz/synapse/kusto/data_connection/iot_hub/__init__.py | update | py-az-cli/py-az-cli | 0 | python | def update(data_connection_name, database_name, kusto_pool_name, resource_group, workspace_name, consumer_group=None, data_format=None, event_system_properties=None, iot_hub_resource_id=None, location=None, mapping_rule_name=None, no_wait=None, shared_access_policy_name=None, table_name=None):
'\n Updates a data connection.\n\n Required Parameters:\n - data_connection_name -- The name of the data connection.\n - database_name -- The name of the database in the Kusto pool.\n - kusto_pool_name -- The name of the Kusto pool.\n - resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`\n - workspace_name -- The name of the workspace\n\n Optional Parameters:\n - consumer_group -- The iot hub consumer group.\n - data_format -- The data format of the message. Optionally the data format can be added to each message.\n - event_system_properties -- System properties of the iot hub\n - iot_hub_resource_id -- The resource ID of the Iot hub to be used to create a data connection.\n - location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.\n - mapping_rule_name -- The mapping rule to be used to ingest the data. Optionally the mapping information can be added to each message.\n - no_wait -- Do not wait for the long-running operation to finish.\n - shared_access_policy_name -- The name of the share access policy\n - table_name -- The table where the data should be ingested. Optionally the table information can be added to each message.\n '
return _call_az('az synapse kusto data-connection iot-hub update', locals()) | def update(data_connection_name, database_name, kusto_pool_name, resource_group, workspace_name, consumer_group=None, data_format=None, event_system_properties=None, iot_hub_resource_id=None, location=None, mapping_rule_name=None, no_wait=None, shared_access_policy_name=None, table_name=None):
'\n Updates a data connection.\n\n Required Parameters:\n - data_connection_name -- The name of the data connection.\n - database_name -- The name of the database in the Kusto pool.\n - kusto_pool_name -- The name of the Kusto pool.\n - resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`\n - workspace_name -- The name of the workspace\n\n Optional Parameters:\n - consumer_group -- The iot hub consumer group.\n - data_format -- The data format of the message. Optionally the data format can be added to each message.\n - event_system_properties -- System properties of the iot hub\n - iot_hub_resource_id -- The resource ID of the Iot hub to be used to create a data connection.\n - location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.\n - mapping_rule_name -- The mapping rule to be used to ingest the data. Optionally the mapping information can be added to each message.\n - no_wait -- Do not wait for the long-running operation to finish.\n - shared_access_policy_name -- The name of the share access policy\n - table_name -- The table where the data should be ingested. Optionally the table information can be added to each message.\n '
return _call_az('az synapse kusto data-connection iot-hub update', locals())<|docstring|>Updates a data connection.
Required Parameters:
- data_connection_name -- The name of the data connection.
- database_name -- The name of the database in the Kusto pool.
- kusto_pool_name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- consumer_group -- The iot hub consumer group.
- data_format -- The data format of the message. Optionally the data format can be added to each message.
- event_system_properties -- System properties of the iot hub
- iot_hub_resource_id -- The resource ID of the Iot hub to be used to create a data connection.
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- mapping_rule_name -- The mapping rule to be used to ingest the data. Optionally the mapping information can be added to each message.
- no_wait -- Do not wait for the long-running operation to finish.
- shared_access_policy_name -- The name of the share access policy
- table_name -- The table where the data should be ingested. Optionally the table information can be added to each message.<|endoftext|> |
9b686c90139cd72783e4a3873faf5c596c1ade556193db9340ed9b7899484ec8 | def create(configuration, key=None):
'Create an instance (called by cimon.py)'
return HueOutput(ipaddress=configuration.get('ipaddress', None), lamps=configuration.get('lamps', []), unused=configuration.get('unused', []), mappings=configuration.get('mappings', []), transitiontimeMillis=configuration.get('transitiontimeMillis', DEFAULT_TRANSITIONTIME_MILLIS)) | Create an instance (called by cimon.py) | src/hueoutput.py | create | SchweizerischeBundesbahnen/cimon | 4 | python | def create(configuration, key=None):
return HueOutput(ipaddress=configuration.get('ipaddress', None), lamps=configuration.get('lamps', []), unused=configuration.get('unused', []), mappings=configuration.get('mappings', []), transitiontimeMillis=configuration.get('transitiontimeMillis', DEFAULT_TRANSITIONTIME_MILLIS)) | def create(configuration, key=None):
return HueOutput(ipaddress=configuration.get('ipaddress', None), lamps=configuration.get('lamps', []), unused=configuration.get('unused', []), mappings=configuration.get('mappings', []), transitiontimeMillis=configuration.get('transitiontimeMillis', DEFAULT_TRANSITIONTIME_MILLIS))<|docstring|>Create an instance (called by cimon.py)<|endoftext|> |
d0a9c7790ed51f3eb2ab0e4f7955850377a1f3e834c1ccf15d6efe80fd0646d2 | def on_update(self, status):
'Display the given status.\n This information seems to no longer be correct:\n Status is a dict of status type, for instance { \'build\' : {"<job_name_1>": {"request_status" : "error" | "not_found" | "ok", "result" : "failure" | "unstable" | "other" | "success"},\n "<job_name_2>": {...},\n ...}\n\n Instead, the following is delivered:\n Status is a dict of status type, for instance { ("<url>", "<job_name1>") : {"request_status" : <RequestStatus.OK: 1> | <RequestStatus.NOT_FOUND: 2> | <RequestStatus.ERROR: 3>,\n "health" : <Healthy.HEALTHY: 1> | <Health.UNWELL: 2> | <Health:SICK: 3> | <Health.OTHER: 4> | <Health.UNDEFINED: 5>,\n "active" : True | False,\n ... },\n ("<url>", "<job_name2>") : {...}\n }\n '
logger.debug('--- HueOutput.onUpdate start ---')
logger.debug('- status contains {} entries'.format(len(status)))
states = {}
items = status.items()
logger.debug('-> Evaluating Jobs')
for (key, value) in items:
url = key[0]
job = key[1]
self.treatBuild(states, url, job, value)
logger.debug('-> Updating Lamps')
self.updateLamps(states)
logger.debug('--- HueOutput.onUpdate done ---') | Display the given status.
This information seems to no longer be correct:
Status is a dict of status type, for instance { 'build' : {"<job_name_1>": {"request_status" : "error" | "not_found" | "ok", "result" : "failure" | "unstable" | "other" | "success"},
"<job_name_2>": {...},
...}
Instead, the following is delivered:
Status is a dict of status type, for instance { ("<url>", "<job_name1>") : {"request_status" : <RequestStatus.OK: 1> | <RequestStatus.NOT_FOUND: 2> | <RequestStatus.ERROR: 3>,
"health" : <Healthy.HEALTHY: 1> | <Health.UNWELL: 2> | <Health:SICK: 3> | <Health.OTHER: 4> | <Health.UNDEFINED: 5>,
"active" : True | False,
... },
("<url>", "<job_name2>") : {...}
} | src/hueoutput.py | on_update | SchweizerischeBundesbahnen/cimon | 4 | python | def on_update(self, status):
'Display the given status.\n This information seems to no longer be correct:\n Status is a dict of status type, for instance { \'build\' : {"<job_name_1>": {"request_status" : "error" | "not_found" | "ok", "result" : "failure" | "unstable" | "other" | "success"},\n "<job_name_2>": {...},\n ...}\n\n Instead, the following is delivered:\n Status is a dict of status type, for instance { ("<url>", "<job_name1>") : {"request_status" : <RequestStatus.OK: 1> | <RequestStatus.NOT_FOUND: 2> | <RequestStatus.ERROR: 3>,\n "health" : <Healthy.HEALTHY: 1> | <Health.UNWELL: 2> | <Health:SICK: 3> | <Health.OTHER: 4> | <Health.UNDEFINED: 5>,\n "active" : True | False,\n ... },\n ("<url>", "<job_name2>") : {...}\n }\n '
logger.debug('--- HueOutput.onUpdate start ---')
logger.debug('- status contains {} entries'.format(len(status)))
states = {}
items = status.items()
logger.debug('-> Evaluating Jobs')
for (key, value) in items:
url = key[0]
job = key[1]
self.treatBuild(states, url, job, value)
logger.debug('-> Updating Lamps')
self.updateLamps(states)
logger.debug('--- HueOutput.onUpdate done ---') | def on_update(self, status):
'Display the given status.\n This information seems to no longer be correct:\n Status is a dict of status type, for instance { \'build\' : {"<job_name_1>": {"request_status" : "error" | "not_found" | "ok", "result" : "failure" | "unstable" | "other" | "success"},\n "<job_name_2>": {...},\n ...}\n\n Instead, the following is delivered:\n Status is a dict of status type, for instance { ("<url>", "<job_name1>") : {"request_status" : <RequestStatus.OK: 1> | <RequestStatus.NOT_FOUND: 2> | <RequestStatus.ERROR: 3>,\n "health" : <Healthy.HEALTHY: 1> | <Health.UNWELL: 2> | <Health:SICK: 3> | <Health.OTHER: 4> | <Health.UNDEFINED: 5>,\n "active" : True | False,\n ... },\n ("<url>", "<job_name2>") : {...}\n }\n '
logger.debug('--- HueOutput.onUpdate start ---')
logger.debug('- status contains {} entries'.format(len(status)))
states = {}
items = status.items()
logger.debug('-> Evaluating Jobs')
for (key, value) in items:
url = key[0]
job = key[1]
self.treatBuild(states, url, job, value)
logger.debug('-> Updating Lamps')
self.updateLamps(states)
logger.debug('--- HueOutput.onUpdate done ---')<|docstring|>Display the given status.
This information seems to no longer be correct:
Status is a dict of status type, for instance { 'build' : {"<job_name_1>": {"request_status" : "error" | "not_found" | "ok", "result" : "failure" | "unstable" | "other" | "success"},
"<job_name_2>": {...},
...}
Instead, the following is delivered:
Status is a dict of status type, for instance { ("<url>", "<job_name1>") : {"request_status" : <RequestStatus.OK: 1> | <RequestStatus.NOT_FOUND: 2> | <RequestStatus.ERROR: 3>,
"health" : <Healthy.HEALTHY: 1> | <Health.UNWELL: 2> | <Health:SICK: 3> | <Health.OTHER: 4> | <Health.UNDEFINED: 5>,
"active" : True | False,
... },
("<url>", "<job_name2>") : {...}
}<|endoftext|> |
869fcb4855c8f6b9db4da1cd1ed8b239101a06527410ae53bf36e130fe7f958d | def kl_loss(mu: torch.Tensor, logstd: torch.Tensor):
'Computes the KL loss, either for the passed arguments :obj:`mu`\n and :obj:`logstd`.\n Args:\n mu (Tensor): The latent space for :math:`\\mu`.\n logstd (Tensor): The latent space for :math:`\\log\\sigma`.\n '
logstd = logstd.clamp(max=MAX_LOGSTD)
return ((- 0.5) * torch.mean(torch.sum((((1 + (2 * logstd)) - (mu ** 2)) - (logstd.exp() ** 2)), dim=1))) | Computes the KL loss, either for the passed arguments :obj:`mu`
and :obj:`logstd`.
Args:
mu (Tensor): The latent space for :math:`\mu`.
logstd (Tensor): The latent space for :math:`\log\sigma`. | mdgraph/models/graph_vae.py | kl_loss | hengma1001/pytorch-geometric-sandbox | 0 | python | def kl_loss(mu: torch.Tensor, logstd: torch.Tensor):
'Computes the KL loss, either for the passed arguments :obj:`mu`\n and :obj:`logstd`.\n Args:\n mu (Tensor): The latent space for :math:`\\mu`.\n logstd (Tensor): The latent space for :math:`\\log\\sigma`.\n '
logstd = logstd.clamp(max=MAX_LOGSTD)
return ((- 0.5) * torch.mean(torch.sum((((1 + (2 * logstd)) - (mu ** 2)) - (logstd.exp() ** 2)), dim=1))) | def kl_loss(mu: torch.Tensor, logstd: torch.Tensor):
'Computes the KL loss, either for the passed arguments :obj:`mu`\n and :obj:`logstd`.\n Args:\n mu (Tensor): The latent space for :math:`\\mu`.\n logstd (Tensor): The latent space for :math:`\\log\\sigma`.\n '
logstd = logstd.clamp(max=MAX_LOGSTD)
return ((- 0.5) * torch.mean(torch.sum((((1 + (2 * logstd)) - (mu ** 2)) - (logstd.exp() ** 2)), dim=1)))<|docstring|>Computes the KL loss, either for the passed arguments :obj:`mu`
and :obj:`logstd`.
Args:
mu (Tensor): The latent space for :math:`\mu`.
logstd (Tensor): The latent space for :math:`\log\sigma`.<|endoftext|> |
f432d77f00ea509c7b2099e0ed24622449ae41ccbe964919d6b64db56ac0daae | def loss(self, phi, y):
'\n input: phi: shape (..., x_dim+u_dim) - prediction\n y: shape (..., y_dim) \n output: loss: sum of errors squared\n '
err = (y - phi)
err_quadform = (0.5 * (err ** 2))
return err_quadform | input: phi: shape (..., x_dim+u_dim) - prediction
y: shape (..., y_dim)
output: loss: sum of errors squared | models/feedforward.py | loss | StanfordASL/UP | 6 | python | def loss(self, phi, y):
'\n input: phi: shape (..., x_dim+u_dim) - prediction\n y: shape (..., y_dim) \n output: loss: sum of errors squared\n '
err = (y - phi)
err_quadform = (0.5 * (err ** 2))
return err_quadform | def loss(self, phi, y):
'\n input: phi: shape (..., x_dim+u_dim) - prediction\n y: shape (..., y_dim) \n output: loss: sum of errors squared\n '
err = (y - phi)
err_quadform = (0.5 * (err ** 2))
return err_quadform<|docstring|>input: phi: shape (..., x_dim+u_dim) - prediction
y: shape (..., y_dim)
output: loss: sum of errors squared<|endoftext|> |
25bd03cd70e59c5f88a36278ac9bf3f51ff1d57ce541fe227c423366ed30fa95 | def forward(self, x, u):
'\n input: x, u\n output: phi\n '
z = torch.cat([x, u], dim=(- 1))
phi = self.encoder(z)
return phi | input: x, u
output: phi | models/feedforward.py | forward | StanfordASL/UP | 6 | python | def forward(self, x, u):
'\n input: x, u\n output: phi\n '
z = torch.cat([x, u], dim=(- 1))
phi = self.encoder(z)
return phi | def forward(self, x, u):
'\n input: x, u\n output: phi\n '
z = torch.cat([x, u], dim=(- 1))
phi = self.encoder(z)
return phi<|docstring|>input: x, u
output: phi<|endoftext|> |
d83192a422b49b153b7e0fb96311195ab13e5e23ef1d8c8606277beae7013f70 | def __init__(self, model, summary_name='FeedForward', dt=1.0):
'\n Inputs:\n model: FeedForward object\n '
super().__init__()
self.f_nom = (lambda x, u: x)
self.model = model
self.reset()
self.ob_dim = self.model.x_dim
self.u_dim = self.model.u_dim
self.train_step = 0
self.writer = SummaryWriter(((('./runs/' + summary_name) + '_') + datetime.datetime.now().strftime('y%y_m%m_d%d_s%s')))
self.optimizer = optim.Adam(self.model.parameters(), lr=self.model.config['learning_rate'], weight_decay=self.model.config['l2_reg'])
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=1, gamma=(1 - self.model.config['lr_decay_rate']))
self.dt = dt
self.B_UP_method = 'robUP'
self.mu_0 = np.zeros(self.ob_dim)
self.Q_0 = np.zeros((self.ob_dim, self.ob_dim))
self.X0s_MC = np.zeros((0, self.ob_dim, self.ob_dim))
self.eta_x0s = 0.01 | Inputs:
model: FeedForward object | models/feedforward.py | __init__ | StanfordASL/UP | 6 | python | def __init__(self, model, summary_name='FeedForward', dt=1.0):
'\n Inputs:\n model: FeedForward object\n '
super().__init__()
self.f_nom = (lambda x, u: x)
self.model = model
self.reset()
self.ob_dim = self.model.x_dim
self.u_dim = self.model.u_dim
self.train_step = 0
self.writer = SummaryWriter(((('./runs/' + summary_name) + '_') + datetime.datetime.now().strftime('y%y_m%m_d%d_s%s')))
self.optimizer = optim.Adam(self.model.parameters(), lr=self.model.config['learning_rate'], weight_decay=self.model.config['l2_reg'])
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=1, gamma=(1 - self.model.config['lr_decay_rate']))
self.dt = dt
self.B_UP_method = 'robUP'
self.mu_0 = np.zeros(self.ob_dim)
self.Q_0 = np.zeros((self.ob_dim, self.ob_dim))
self.X0s_MC = np.zeros((0, self.ob_dim, self.ob_dim))
self.eta_x0s = 0.01 | def __init__(self, model, summary_name='FeedForward', dt=1.0):
'\n Inputs:\n model: FeedForward object\n '
super().__init__()
self.f_nom = (lambda x, u: x)
self.model = model
self.reset()
self.ob_dim = self.model.x_dim
self.u_dim = self.model.u_dim
self.train_step = 0
self.writer = SummaryWriter(((('./runs/' + summary_name) + '_') + datetime.datetime.now().strftime('y%y_m%m_d%d_s%s')))
self.optimizer = optim.Adam(self.model.parameters(), lr=self.model.config['learning_rate'], weight_decay=self.model.config['l2_reg'])
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=1, gamma=(1 - self.model.config['lr_decay_rate']))
self.dt = dt
self.B_UP_method = 'robUP'
self.mu_0 = np.zeros(self.ob_dim)
self.Q_0 = np.zeros((self.ob_dim, self.ob_dim))
self.X0s_MC = np.zeros((0, self.ob_dim, self.ob_dim))
self.eta_x0s = 0.01<|docstring|>Inputs:
model: FeedForward object<|endoftext|> |
4984e924b5b6f0c9c5ff6cbc05a2e8153ff225e9b127e9433359d25e04b73819 | def evaluate_loss(self, x, u, xp):
'\n mean over time horizon/batch (dim 1), mean over batch (dim 0)\n '
horizon = self.model.config['data_horizon']
x = x.float()
u = u.float()
xp = xp.float()
z = torch.cat([x, u], dim=(- 1))
phi = self.model.encoder(z)
y = (xp - self.f_nom(x, u))
total_loss = (self.model.loss(phi, y) / horizon)
return total_loss.mean() | mean over time horizon/batch (dim 1), mean over batch (dim 0) | models/feedforward.py | evaluate_loss | StanfordASL/UP | 6 | python | def evaluate_loss(self, x, u, xp):
'\n \n '
horizon = self.model.config['data_horizon']
x = x.float()
u = u.float()
xp = xp.float()
z = torch.cat([x, u], dim=(- 1))
phi = self.model.encoder(z)
y = (xp - self.f_nom(x, u))
total_loss = (self.model.loss(phi, y) / horizon)
return total_loss.mean() | def evaluate_loss(self, x, u, xp):
'\n \n '
horizon = self.model.config['data_horizon']
x = x.float()
u = u.float()
xp = xp.float()
z = torch.cat([x, u], dim=(- 1))
phi = self.model.encoder(z)
y = (xp - self.f_nom(x, u))
total_loss = (self.model.loss(phi, y) / horizon)
return total_loss.mean()<|docstring|>mean over time horizon/batch (dim 1), mean over batch (dim 0)<|endoftext|> |
0f103de109411b475b90f8b7be7147c97faefca65d25eb6d5729fe91cb1910fb | def to_torch(self, *args):
'\n for every argument (assumed to be a numpy array), this function\n puts the arguments into a float32 torch.Tensor and pushes it to the same device\n as self.model\n '
device = next(self.model.parameters()).device
return [torch.as_tensor(x, dtype=torch.float32, device=device) for x in args] | for every argument (assumed to be a numpy array), this function
puts the arguments into a float32 torch.Tensor and pushes it to the same device
as self.model | models/feedforward.py | to_torch | StanfordASL/UP | 6 | python | def to_torch(self, *args):
'\n for every argument (assumed to be a numpy array), this function\n puts the arguments into a float32 torch.Tensor and pushes it to the same device\n as self.model\n '
device = next(self.model.parameters()).device
return [torch.as_tensor(x, dtype=torch.float32, device=device) for x in args] | def to_torch(self, *args):
'\n for every argument (assumed to be a numpy array), this function\n puts the arguments into a float32 torch.Tensor and pushes it to the same device\n as self.model\n '
device = next(self.model.parameters()).device
return [torch.as_tensor(x, dtype=torch.float32, device=device) for x in args]<|docstring|>for every argument (assumed to be a numpy array), this function
puts the arguments into a float32 torch.Tensor and pushes it to the same device
as self.model<|endoftext|> |
9dfca4e84b7859e778af5f574aa159ee8fe401c61e37a6dca4fb2db10681ae5c | def save(self, path):
'\n Saves self.model to path.\n '
torch.save({'config': self.model.config, 'state_dict': self.model.state_dict()}, path) | Saves self.model to path. | models/feedforward.py | save | StanfordASL/UP | 6 | python | def save(self, path):
'\n \n '
torch.save({'config': self.model.config, 'state_dict': self.model.state_dict()}, path) | def save(self, path):
'\n \n '
torch.save({'config': self.model.config, 'state_dict': self.model.state_dict()}, path)<|docstring|>Saves self.model to path.<|endoftext|> |
d85e6557770b19f5a8e1b1899f6cc52c230f311676b283d4ab05b36cb7aa566c | def get_model_torch(self, n_samples=10):
'\n returns function mapping x,u to mu (torch tensor)\n '
def f(x, u):
'\n assumes dim -2 of inputs is batch over model samples\n inputs must broadcast to (..., N, x/u dim)\n '
z = torch.cat([x, u], dim=(- 1))
phi = self.model.encoder(z)
mu = (phi + self.f_nom(x, u))
return mu
return f | returns function mapping x,u to mu (torch tensor) | models/feedforward.py | get_model_torch | StanfordASL/UP | 6 | python | def get_model_torch(self, n_samples=10):
'\n \n '
def f(x, u):
'\n assumes dim -2 of inputs is batch over model samples\n inputs must broadcast to (..., N, x/u dim)\n '
z = torch.cat([x, u], dim=(- 1))
phi = self.model.encoder(z)
mu = (phi + self.f_nom(x, u))
return mu
return f | def get_model_torch(self, n_samples=10):
'\n \n '
def f(x, u):
'\n assumes dim -2 of inputs is batch over model samples\n inputs must broadcast to (..., N, x/u dim)\n '
z = torch.cat([x, u], dim=(- 1))
phi = self.model.encoder(z)
mu = (phi + self.f_nom(x, u))
return mu
return f<|docstring|>returns function mapping x,u to mu (torch tensor)<|endoftext|> |
6d0ed1f466cbda045b077d3cd79e2d6f0fc6f4193a307b248b030f610f8b447f | def Xs_dparams_MC(self, Xs, Xs_dx):
'\n Returns the Jacobian matrices of the state TRAJECTORY\n w.r.t. to all parameters\n Inputs: Xs : (N_MC, N , n_x)\n Xs_dx : (N_MC, N , n_x, n_x)\n Outputs: Xs_dX0 : (N_MC, N, n_x, n_x)\n '
(N_MC, N, n_x) = (Xs.shape[0], Xs.shape[1], Xs.shape[2])
Xs_dX0s = np.zeros((N_MC, N, n_x, n_x))
Xs_dX0s[(:, 0, :, :)] = np.repeat(np.eye(n_x)[(None, :)], N_MC, axis=0)
for j in range((N - 1)):
Xs_dX0s[(:, (j + 1), :, :)] = np.einsum('Mxy,Myz->Mxz', Xs_dx[(:, j, :, :)], Xs_dX0s[(:, j, :, :)])
return Xs_dX0s | Returns the Jacobian matrices of the state TRAJECTORY
w.r.t. to all parameters
Inputs: Xs : (N_MC, N , n_x)
Xs_dx : (N_MC, N , n_x, n_x)
Outputs: Xs_dX0 : (N_MC, N, n_x, n_x) | models/feedforward.py | Xs_dparams_MC | StanfordASL/UP | 6 | python | def Xs_dparams_MC(self, Xs, Xs_dx):
'\n Returns the Jacobian matrices of the state TRAJECTORY\n w.r.t. to all parameters\n Inputs: Xs : (N_MC, N , n_x)\n Xs_dx : (N_MC, N , n_x, n_x)\n Outputs: Xs_dX0 : (N_MC, N, n_x, n_x)\n '
(N_MC, N, n_x) = (Xs.shape[0], Xs.shape[1], Xs.shape[2])
Xs_dX0s = np.zeros((N_MC, N, n_x, n_x))
Xs_dX0s[(:, 0, :, :)] = np.repeat(np.eye(n_x)[(None, :)], N_MC, axis=0)
for j in range((N - 1)):
Xs_dX0s[(:, (j + 1), :, :)] = np.einsum('Mxy,Myz->Mxz', Xs_dx[(:, j, :, :)], Xs_dX0s[(:, j, :, :)])
return Xs_dX0s | def Xs_dparams_MC(self, Xs, Xs_dx):
'\n Returns the Jacobian matrices of the state TRAJECTORY\n w.r.t. to all parameters\n Inputs: Xs : (N_MC, N , n_x)\n Xs_dx : (N_MC, N , n_x, n_x)\n Outputs: Xs_dX0 : (N_MC, N, n_x, n_x)\n '
(N_MC, N, n_x) = (Xs.shape[0], Xs.shape[1], Xs.shape[2])
Xs_dX0s = np.zeros((N_MC, N, n_x, n_x))
Xs_dX0s[(:, 0, :, :)] = np.repeat(np.eye(n_x)[(None, :)], N_MC, axis=0)
for j in range((N - 1)):
Xs_dX0s[(:, (j + 1), :, :)] = np.einsum('Mxy,Myz->Mxz', Xs_dx[(:, j, :, :)], Xs_dX0s[(:, j, :, :)])
return Xs_dX0s<|docstring|>Returns the Jacobian matrices of the state TRAJECTORY
w.r.t. to all parameters
Inputs: Xs : (N_MC, N , n_x)
Xs_dx : (N_MC, N , n_x, n_x)
Outputs: Xs_dX0 : (N_MC, N, n_x, n_x)<|endoftext|> |
3012909327cf288bbde3e463cb7a5e42431d1f8bd4ed77984fefac52bb09653a | def adv_sample_params(self, Xs, Xs_dx):
'\n resamples parameters self.X0s \n using Xs : (N_MC, N, n_x)\n Xs_dx : (N_MC, N, n_x, n_x)\n '
(N_MC, T, x_dim) = (Xs.shape[0], Xs.shape[1], Xs.shape[2])
X0s = self.X0s_MC
eta_x0s = self.eta_x0s
Cs = np.mean(Xs, 0)
Qs = np.zeros((T, x_dim, x_dim))
for t in range(1, T):
Qs[(t, :, :)] = np.linalg.inv(np.cov(Xs[(:, t, :)].T))
Jdists_dXs = np.einsum('txy,Mty->Mtx', (2 * Qs), (Xs - Cs))
Xs_dX0 = self.Xs_dparams_MC(Xs, Xs_dx)
Jdists_dX0s = np.mean(np.einsum('MTx,MTxy->MTy', Jdists_dXs, Xs_dX0), axis=1)
X0s = (self.X0s_MC + (eta_x0s * Jdists_dX0s))
mu0s = np.repeat(self.mu_0[(None, :)], N_MC, axis=0)
Q0_inv = np.linalg.inv(self.Q_0)
Q0s = np.repeat(Q0_inv[(None, :, :)], N_MC, axis=0)
X0s_deltas = ell_proj_np.proj_ell(Q0s, (X0s - mu0s), eps=0.0005)
X0s = (mu0s + X0s_deltas)
self.X0s_MC = X0s
return X0s | resamples parameters self.X0s
using Xs : (N_MC, N, n_x)
Xs_dx : (N_MC, N, n_x, n_x) | models/feedforward.py | adv_sample_params | StanfordASL/UP | 6 | python | def adv_sample_params(self, Xs, Xs_dx):
'\n resamples parameters self.X0s \n using Xs : (N_MC, N, n_x)\n Xs_dx : (N_MC, N, n_x, n_x)\n '
(N_MC, T, x_dim) = (Xs.shape[0], Xs.shape[1], Xs.shape[2])
X0s = self.X0s_MC
eta_x0s = self.eta_x0s
Cs = np.mean(Xs, 0)
Qs = np.zeros((T, x_dim, x_dim))
for t in range(1, T):
Qs[(t, :, :)] = np.linalg.inv(np.cov(Xs[(:, t, :)].T))
Jdists_dXs = np.einsum('txy,Mty->Mtx', (2 * Qs), (Xs - Cs))
Xs_dX0 = self.Xs_dparams_MC(Xs, Xs_dx)
Jdists_dX0s = np.mean(np.einsum('MTx,MTxy->MTy', Jdists_dXs, Xs_dX0), axis=1)
X0s = (self.X0s_MC + (eta_x0s * Jdists_dX0s))
mu0s = np.repeat(self.mu_0[(None, :)], N_MC, axis=0)
Q0_inv = np.linalg.inv(self.Q_0)
Q0s = np.repeat(Q0_inv[(None, :, :)], N_MC, axis=0)
X0s_deltas = ell_proj_np.proj_ell(Q0s, (X0s - mu0s), eps=0.0005)
X0s = (mu0s + X0s_deltas)
self.X0s_MC = X0s
return X0s | def adv_sample_params(self, Xs, Xs_dx):
'\n resamples parameters self.X0s \n using Xs : (N_MC, N, n_x)\n Xs_dx : (N_MC, N, n_x, n_x)\n '
(N_MC, T, x_dim) = (Xs.shape[0], Xs.shape[1], Xs.shape[2])
X0s = self.X0s_MC
eta_x0s = self.eta_x0s
Cs = np.mean(Xs, 0)
Qs = np.zeros((T, x_dim, x_dim))
for t in range(1, T):
Qs[(t, :, :)] = np.linalg.inv(np.cov(Xs[(:, t, :)].T))
Jdists_dXs = np.einsum('txy,Mty->Mtx', (2 * Qs), (Xs - Cs))
Xs_dX0 = self.Xs_dparams_MC(Xs, Xs_dx)
Jdists_dX0s = np.mean(np.einsum('MTx,MTxy->MTy', Jdists_dXs, Xs_dX0), axis=1)
X0s = (self.X0s_MC + (eta_x0s * Jdists_dX0s))
mu0s = np.repeat(self.mu_0[(None, :)], N_MC, axis=0)
Q0_inv = np.linalg.inv(self.Q_0)
Q0s = np.repeat(Q0_inv[(None, :, :)], N_MC, axis=0)
X0s_deltas = ell_proj_np.proj_ell(Q0s, (X0s - mu0s), eps=0.0005)
X0s = (mu0s + X0s_deltas)
self.X0s_MC = X0s
return X0s<|docstring|>resamples parameters self.X0s
using Xs : (N_MC, N, n_x)
Xs_dx : (N_MC, N, n_x, n_x)<|endoftext|> |
da5d597dcb57006b03fbafd8abe1c525be964ce4c9f35f2207c8f1b7e8a6ba26 | def f(x, u):
'\n assumes dim -2 of inputs is batch over model samples\n inputs must broadcast to (..., N, x/u dim)\n '
z = torch.cat([x, u], dim=(- 1))
phi = self.model.encoder(z)
mu = (phi + self.f_nom(x, u))
return mu | assumes dim -2 of inputs is batch over model samples
inputs must broadcast to (..., N, x/u dim) | models/feedforward.py | f | StanfordASL/UP | 6 | python | def f(x, u):
'\n assumes dim -2 of inputs is batch over model samples\n inputs must broadcast to (..., N, x/u dim)\n '
z = torch.cat([x, u], dim=(- 1))
phi = self.model.encoder(z)
mu = (phi + self.f_nom(x, u))
return mu | def f(x, u):
'\n assumes dim -2 of inputs is batch over model samples\n inputs must broadcast to (..., N, x/u dim)\n '
z = torch.cat([x, u], dim=(- 1))
phi = self.model.encoder(z)
mu = (phi + self.f_nom(x, u))
return mu<|docstring|>assumes dim -2 of inputs is batch over model samples
inputs must broadcast to (..., N, x/u dim)<|endoftext|> |
bdcd127581d15e58c6bfc3b72b6095e452540002f335ef3846465c12a4853286 | def get(self, request, format=None):
'\n Return the subscription status information for the authenticated user.\n '
(client_ip, is_routable) = get_client_ip(self.request)
serializer = SubscriptionRetrieveSerializer(request.user)
serialized_data = serializer.data
return Response(serialized_data) | Return the subscription status information for the authenticated user. | mikaponics/ecommerce/views/resources/subscription_crud_views.py | get | mikaponics/mikaponics-back | 2 | python | def get(self, request, format=None):
'\n \n '
(client_ip, is_routable) = get_client_ip(self.request)
serializer = SubscriptionRetrieveSerializer(request.user)
serialized_data = serializer.data
return Response(serialized_data) | def get(self, request, format=None):
'\n \n '
(client_ip, is_routable) = get_client_ip(self.request)
serializer = SubscriptionRetrieveSerializer(request.user)
serialized_data = serializer.data
return Response(serialized_data)<|docstring|>Return the subscription status information for the authenticated user.<|endoftext|> |
99fc2a8778ae8459a8af486595ba661c1e2657b23eceef429ca7859a1f50ecef | def post(self, request, format=None):
'\n Update the existing "onboarding invoice".\n '
(client_ip, is_routable) = get_client_ip(self.request)
serializer = SubscriptionUpdateSerializer(request.user, data=request.data, context={'from': client_ip, 'from_is_public': is_routable})
serializer.is_valid(raise_exception=True)
serializer.save()
serializer = SubscriptionRetrieveSerializer(request.user)
return Response(serializer.data) | Update the existing "onboarding invoice". | mikaponics/ecommerce/views/resources/subscription_crud_views.py | post | mikaponics/mikaponics-back | 2 | python | def post(self, request, format=None):
'\n \n '
(client_ip, is_routable) = get_client_ip(self.request)
serializer = SubscriptionUpdateSerializer(request.user, data=request.data, context={'from': client_ip, 'from_is_public': is_routable})
serializer.is_valid(raise_exception=True)
serializer.save()
serializer = SubscriptionRetrieveSerializer(request.user)
return Response(serializer.data) | def post(self, request, format=None):
'\n \n '
(client_ip, is_routable) = get_client_ip(self.request)
serializer = SubscriptionUpdateSerializer(request.user, data=request.data, context={'from': client_ip, 'from_is_public': is_routable})
serializer.is_valid(raise_exception=True)
serializer.save()
serializer = SubscriptionRetrieveSerializer(request.user)
return Response(serializer.data)<|docstring|>Update the existing "onboarding invoice".<|endoftext|> |
0b25458f8d5bfca0ccc44f127c208e853721f2633eb69769d8d8dc972863ea88 | def __init__(self, size):
'Initialize a new square.\n Args:\n size (int): The size of the new square.\n '
self.size = size | Initialize a new square.
Args:
size (int): The size of the new square. | 0x06-python-classes/5-square.py | __init__ | oluwaseun-ebenezer/holbertonschool-higher_level_programming | 0 | python | def __init__(self, size):
'Initialize a new square.\n Args:\n size (int): The size of the new square.\n '
self.size = size | def __init__(self, size):
'Initialize a new square.\n Args:\n size (int): The size of the new square.\n '
self.size = size<|docstring|>Initialize a new square.
Args:
size (int): The size of the new square.<|endoftext|> |
b36dcb8bd5cba20cf7fa669741fc233989c11b23db154d6eb6c4214cfa18384b | @property
def size(self):
'Get/set the current size of the square.'
return self.__size | Get/set the current size of the square. | 0x06-python-classes/5-square.py | size | oluwaseun-ebenezer/holbertonschool-higher_level_programming | 0 | python | @property
def size(self):
return self.__size | @property
def size(self):
return self.__size<|docstring|>Get/set the current size of the square.<|endoftext|> |
c048ed58ddbb321baaa4a055ba2ef01ec9917ac584fa3f28269da80605b2524c | def area(self):
'Return the current area of the square.'
return (self.__size * self.__size) | Return the current area of the square. | 0x06-python-classes/5-square.py | area | oluwaseun-ebenezer/holbertonschool-higher_level_programming | 0 | python | def area(self):
return (self.__size * self.__size) | def area(self):
return (self.__size * self.__size)<|docstring|>Return the current area of the square.<|endoftext|> |
a4b5b0e8b20382513b2236e8e39d6da90ed66e3006e6d77fb8b5a7c0e3566efa | def my_print(self):
'Print the square with the # character.'
for i in range(0, self.__size):
[print('#', end='') for j in range(self.__size)]
print('')
if (self.__size == 0):
print('') | Print the square with the # character. | 0x06-python-classes/5-square.py | my_print | oluwaseun-ebenezer/holbertonschool-higher_level_programming | 0 | python | def my_print(self):
for i in range(0, self.__size):
[print('#', end=) for j in range(self.__size)]
print()
if (self.__size == 0):
print() | def my_print(self):
for i in range(0, self.__size):
[print('#', end=) for j in range(self.__size)]
print()
if (self.__size == 0):
print()<|docstring|>Print the square with the # character.<|endoftext|> |
eb756efbb963793c939a32c4cdcac809f6b334276ec56c360a2049348526f821 | def get_final_text(token_text: str, unigram_text: str, do_lower_case: bool=True) -> str:
'Projects the token-concated text back to the unigram-concated text.\n\n This function is branched from the original BERT `run_squad.py`.\n\n When we created the data, we kept track of the alignment between original\n (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n now `unigram_text` contains the span of our original text corresponding to the\n span that we predicted.\n\n However, `unigram_text` may contain extra characters that we don\'t want in\n our prediction.\n\n For example, let\'s say:\n token_text = steve smith\n unigram_text = Steve Smith\'s\n\n We don\'t want to return `unigram_text` because it contains the extra "\'s".\n\n We don\'t want to return `token_text` because it\'s already been normalized\n (the SQuAD eval script also does punctuation stripping/lower casing but\n our tokenizer does additional normalization like stripping accent\n characters).\n\n What we really want to return is "Steve Smith".\n\n Therefore, we have to apply a semi-complicated alignment heruistic between\n `token_text` and `unigram_text` to get a character-to-charcter alignment. This\n can fail in certain cases in which case we just return `unigram_text`.\n\n Args:\n token_text: The text obtained by concatenating wordpiece tokens and removing\n \'##\' and \' ##\' symbols.\n unigram_text: The text obtained by concatenating unigrams.\n do_lower_case: Whether the tokenizer is doing lower case.\n\n Returns:\n The text corresponding to `token_text` in `unigram_text`. If unable to find\n such correspondence, `unigram_text` is returned directly.\n '
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if (c == ' '):
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = ''.join(ns_chars)
return (ns_text, ns_to_s_map)
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = ' '.join(tokenizer.tokenize(unigram_text))
start_position = tok_text.find(token_text)
if (start_position == (- 1)):
return unigram_text
end_position = ((start_position + len(token_text)) - 1)
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(unigram_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if (len(orig_ns_text) != len(tok_ns_text)):
return unigram_text
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if (start_position in tok_s_to_ns_map):
ns_start_position = tok_s_to_ns_map[start_position]
if (ns_start_position in orig_ns_to_s_map):
orig_start_position = orig_ns_to_s_map[ns_start_position]
if (orig_start_position is None):
return unigram_text
orig_end_position = None
if (end_position in tok_s_to_ns_map):
ns_end_position = tok_s_to_ns_map[end_position]
if (ns_end_position in orig_ns_to_s_map):
orig_end_position = orig_ns_to_s_map[ns_end_position]
if (orig_end_position is None):
return unigram_text
output_text = unigram_text[orig_start_position:(orig_end_position + 1)]
return output_text | Projects the token-concated text back to the unigram-concated text.
This function is branched from the original BERT `run_squad.py`.
When we created the data, we kept track of the alignment between original
(whitespace tokenized) tokens and our WordPiece tokenized tokens. So
now `unigram_text` contains the span of our original text corresponding to the
span that we predicted.
However, `unigram_text` may contain extra characters that we don't want in
our prediction.
For example, let's say:
token_text = steve smith
unigram_text = Steve Smith's
We don't want to return `unigram_text` because it contains the extra "'s".
We don't want to return `token_text` because it's already been normalized
(the SQuAD eval script also does punctuation stripping/lower casing but
our tokenizer does additional normalization like stripping accent
characters).
What we really want to return is "Steve Smith".
Therefore, we have to apply a semi-complicated alignment heruistic between
`token_text` and `unigram_text` to get a character-to-charcter alignment. This
can fail in certain cases in which case we just return `unigram_text`.
Args:
token_text: The text obtained by concatenating wordpiece tokens and removing
'##' and ' ##' symbols.
unigram_text: The text obtained by concatenating unigrams.
do_lower_case: Whether the tokenizer is doing lower case.
Returns:
The text corresponding to `token_text` in `unigram_text`. If unable to find
such correspondence, `unigram_text` is returned directly. | etcmodel/models/hotpotqa/eval_utils.py | get_final_text | wy-go/google-research | 23,901 | python | def get_final_text(token_text: str, unigram_text: str, do_lower_case: bool=True) -> str:
'Projects the token-concated text back to the unigram-concated text.\n\n This function is branched from the original BERT `run_squad.py`.\n\n When we created the data, we kept track of the alignment between original\n (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n now `unigram_text` contains the span of our original text corresponding to the\n span that we predicted.\n\n However, `unigram_text` may contain extra characters that we don\'t want in\n our prediction.\n\n For example, let\'s say:\n token_text = steve smith\n unigram_text = Steve Smith\'s\n\n We don\'t want to return `unigram_text` because it contains the extra "\'s".\n\n We don\'t want to return `token_text` because it\'s already been normalized\n (the SQuAD eval script also does punctuation stripping/lower casing but\n our tokenizer does additional normalization like stripping accent\n characters).\n\n What we really want to return is "Steve Smith".\n\n Therefore, we have to apply a semi-complicated alignment heruistic between\n `token_text` and `unigram_text` to get a character-to-charcter alignment. This\n can fail in certain cases in which case we just return `unigram_text`.\n\n Args:\n token_text: The text obtained by concatenating wordpiece tokens and removing\n \'##\' and \' ##\' symbols.\n unigram_text: The text obtained by concatenating unigrams.\n do_lower_case: Whether the tokenizer is doing lower case.\n\n Returns:\n The text corresponding to `token_text` in `unigram_text`. If unable to find\n such correspondence, `unigram_text` is returned directly.\n '
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if (c == ' '):
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = .join(ns_chars)
return (ns_text, ns_to_s_map)
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = ' '.join(tokenizer.tokenize(unigram_text))
start_position = tok_text.find(token_text)
if (start_position == (- 1)):
return unigram_text
end_position = ((start_position + len(token_text)) - 1)
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(unigram_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if (len(orig_ns_text) != len(tok_ns_text)):
return unigram_text
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if (start_position in tok_s_to_ns_map):
ns_start_position = tok_s_to_ns_map[start_position]
if (ns_start_position in orig_ns_to_s_map):
orig_start_position = orig_ns_to_s_map[ns_start_position]
if (orig_start_position is None):
return unigram_text
orig_end_position = None
if (end_position in tok_s_to_ns_map):
ns_end_position = tok_s_to_ns_map[end_position]
if (ns_end_position in orig_ns_to_s_map):
orig_end_position = orig_ns_to_s_map[ns_end_position]
if (orig_end_position is None):
return unigram_text
output_text = unigram_text[orig_start_position:(orig_end_position + 1)]
return output_text | def get_final_text(token_text: str, unigram_text: str, do_lower_case: bool=True) -> str:
'Projects the token-concated text back to the unigram-concated text.\n\n This function is branched from the original BERT `run_squad.py`.\n\n When we created the data, we kept track of the alignment between original\n (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n now `unigram_text` contains the span of our original text corresponding to the\n span that we predicted.\n\n However, `unigram_text` may contain extra characters that we don\'t want in\n our prediction.\n\n For example, let\'s say:\n token_text = steve smith\n unigram_text = Steve Smith\'s\n\n We don\'t want to return `unigram_text` because it contains the extra "\'s".\n\n We don\'t want to return `token_text` because it\'s already been normalized\n (the SQuAD eval script also does punctuation stripping/lower casing but\n our tokenizer does additional normalization like stripping accent\n characters).\n\n What we really want to return is "Steve Smith".\n\n Therefore, we have to apply a semi-complicated alignment heruistic between\n `token_text` and `unigram_text` to get a character-to-charcter alignment. This\n can fail in certain cases in which case we just return `unigram_text`.\n\n Args:\n token_text: The text obtained by concatenating wordpiece tokens and removing\n \'##\' and \' ##\' symbols.\n unigram_text: The text obtained by concatenating unigrams.\n do_lower_case: Whether the tokenizer is doing lower case.\n\n Returns:\n The text corresponding to `token_text` in `unigram_text`. If unable to find\n such correspondence, `unigram_text` is returned directly.\n '
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if (c == ' '):
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = .join(ns_chars)
return (ns_text, ns_to_s_map)
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = ' '.join(tokenizer.tokenize(unigram_text))
start_position = tok_text.find(token_text)
if (start_position == (- 1)):
return unigram_text
end_position = ((start_position + len(token_text)) - 1)
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(unigram_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if (len(orig_ns_text) != len(tok_ns_text)):
return unigram_text
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if (start_position in tok_s_to_ns_map):
ns_start_position = tok_s_to_ns_map[start_position]
if (ns_start_position in orig_ns_to_s_map):
orig_start_position = orig_ns_to_s_map[ns_start_position]
if (orig_start_position is None):
return unigram_text
orig_end_position = None
if (end_position in tok_s_to_ns_map):
ns_end_position = tok_s_to_ns_map[end_position]
if (ns_end_position in orig_ns_to_s_map):
orig_end_position = orig_ns_to_s_map[ns_end_position]
if (orig_end_position is None):
return unigram_text
output_text = unigram_text[orig_start_position:(orig_end_position + 1)]
return output_text<|docstring|>Projects the token-concated text back to the unigram-concated text.
This function is branched from the original BERT `run_squad.py`.
When we created the data, we kept track of the alignment between original
(whitespace tokenized) tokens and our WordPiece tokenized tokens. So
now `unigram_text` contains the span of our original text corresponding to the
span that we predicted.
However, `unigram_text` may contain extra characters that we don't want in
our prediction.
For example, let's say:
token_text = steve smith
unigram_text = Steve Smith's
We don't want to return `unigram_text` because it contains the extra "'s".
We don't want to return `token_text` because it's already been normalized
(the SQuAD eval script also does punctuation stripping/lower casing but
our tokenizer does additional normalization like stripping accent
characters).
What we really want to return is "Steve Smith".
Therefore, we have to apply a semi-complicated alignment heruistic between
`token_text` and `unigram_text` to get a character-to-charcter alignment. This
can fail in certain cases in which case we just return `unigram_text`.
Args:
token_text: The text obtained by concatenating wordpiece tokens and removing
'##' and ' ##' symbols.
unigram_text: The text obtained by concatenating unigrams.
do_lower_case: Whether the tokenizer is doing lower case.
Returns:
The text corresponding to `token_text` in `unigram_text`. If unable to find
such correspondence, `unigram_text` is returned directly.<|endoftext|> |
358f2a2197ae42e7d75cd4aec0ec7688f76978091acc73cf4d6337b6c0bc18d8 | def _get_sentence_text(sentence_id: int, raw_prediction: _RawPredictionType, data_point) -> str:
'Gets the sentence (or title) text in the json data point.'
actual_paragraph_id = raw_prediction['global_paragraph_ids'][sentence_id]
if (actual_paragraph_id == (- 1)):
return ''
actual_sentence_id = raw_prediction['global_sentence_ids'][sentence_id]
(title, sentences) = data_point['context'][actual_paragraph_id]
if (actual_sentence_id == (- 1)):
return title
return sentences[actual_sentence_id] | Gets the sentence (or title) text in the json data point. | etcmodel/models/hotpotqa/eval_utils.py | _get_sentence_text | wy-go/google-research | 23,901 | python | def _get_sentence_text(sentence_id: int, raw_prediction: _RawPredictionType, data_point) -> str:
actual_paragraph_id = raw_prediction['global_paragraph_ids'][sentence_id]
if (actual_paragraph_id == (- 1)):
return
actual_sentence_id = raw_prediction['global_sentence_ids'][sentence_id]
(title, sentences) = data_point['context'][actual_paragraph_id]
if (actual_sentence_id == (- 1)):
return title
return sentences[actual_sentence_id] | def _get_sentence_text(sentence_id: int, raw_prediction: _RawPredictionType, data_point) -> str:
actual_paragraph_id = raw_prediction['global_paragraph_ids'][sentence_id]
if (actual_paragraph_id == (- 1)):
return
actual_sentence_id = raw_prediction['global_sentence_ids'][sentence_id]
(title, sentences) = data_point['context'][actual_paragraph_id]
if (actual_sentence_id == (- 1)):
return title
return sentences[actual_sentence_id]<|docstring|>Gets the sentence (or title) text in the json data point.<|endoftext|> |
12f1f5c00ea79490eff69cb2b7e7d0f333cfaa363c86fd8623c5e41f303a2578 | def _get_answer_unigram_text(token_span: _SpanType, raw_prediction: _RawPredictionType, data_point) -> str:
'Gets the original answer unigram text corresponding to the token span.'
unigram_span = tuple((raw_prediction['long_tokens_to_unigrams'][idx] for idx in token_span))
sentence_id = raw_prediction['long_sentence_ids'][token_span[0]]
sentence_text = _get_sentence_text(sentence_id, raw_prediction, data_point)
(sentence_unigrams, _, _) = data_utils.whitespace_split_with_indices(sentence_text)
answer_unigrams = sentence_unigrams[unigram_span[0]:(unigram_span[1] + 1)]
return ' '.join(answer_unigrams) | Gets the original answer unigram text corresponding to the token span. | etcmodel/models/hotpotqa/eval_utils.py | _get_answer_unigram_text | wy-go/google-research | 23,901 | python | def _get_answer_unigram_text(token_span: _SpanType, raw_prediction: _RawPredictionType, data_point) -> str:
unigram_span = tuple((raw_prediction['long_tokens_to_unigrams'][idx] for idx in token_span))
sentence_id = raw_prediction['long_sentence_ids'][token_span[0]]
sentence_text = _get_sentence_text(sentence_id, raw_prediction, data_point)
(sentence_unigrams, _, _) = data_utils.whitespace_split_with_indices(sentence_text)
answer_unigrams = sentence_unigrams[unigram_span[0]:(unigram_span[1] + 1)]
return ' '.join(answer_unigrams) | def _get_answer_unigram_text(token_span: _SpanType, raw_prediction: _RawPredictionType, data_point) -> str:
unigram_span = tuple((raw_prediction['long_tokens_to_unigrams'][idx] for idx in token_span))
sentence_id = raw_prediction['long_sentence_ids'][token_span[0]]
sentence_text = _get_sentence_text(sentence_id, raw_prediction, data_point)
(sentence_unigrams, _, _) = data_utils.whitespace_split_with_indices(sentence_text)
answer_unigrams = sentence_unigrams[unigram_span[0]:(unigram_span[1] + 1)]
return ' '.join(answer_unigrams)<|docstring|>Gets the original answer unigram text corresponding to the token span.<|endoftext|> |
7a323b2b147e7622a76f9682a489fd57eff72452140910d4ee97d3ebac5c7a8c | def _get_wordpiece_detokenized_text(token_span: _SpanType, raw_prediction: _RawPredictionType, tokenizer: tokenization.FullTokenizer) -> str:
'Gets the normalized answer token text given the token span.'
answer_tokens = tokenizer.convert_ids_to_tokens(raw_prediction['long_token_ids'][token_span[0]:(token_span[1] + 1)])
return data_utils.wordpiece_tokens_to_normalized_text(answer_tokens) | Gets the normalized answer token text given the token span. | etcmodel/models/hotpotqa/eval_utils.py | _get_wordpiece_detokenized_text | wy-go/google-research | 23,901 | python | def _get_wordpiece_detokenized_text(token_span: _SpanType, raw_prediction: _RawPredictionType, tokenizer: tokenization.FullTokenizer) -> str:
answer_tokens = tokenizer.convert_ids_to_tokens(raw_prediction['long_token_ids'][token_span[0]:(token_span[1] + 1)])
return data_utils.wordpiece_tokens_to_normalized_text(answer_tokens) | def _get_wordpiece_detokenized_text(token_span: _SpanType, raw_prediction: _RawPredictionType, tokenizer: tokenization.FullTokenizer) -> str:
answer_tokens = tokenizer.convert_ids_to_tokens(raw_prediction['long_token_ids'][token_span[0]:(token_span[1] + 1)])
return data_utils.wordpiece_tokens_to_normalized_text(answer_tokens)<|docstring|>Gets the normalized answer token text given the token span.<|endoftext|> |
52cb78d9d713dc312afb12c483c9db51540dbc4e461733efd60f800a11b45272 | def _get_wordpiece_final_text(token_span: _SpanType, raw_prediction: _RawPredictionType, data_point, tokenizer: tokenization.FullTokenizer):
'Gets final text using WordPiece tokens.'
answer_unigram_text = _get_answer_unigram_text(token_span, raw_prediction, data_point)
answer_token_text = _get_wordpiece_detokenized_text(token_span, raw_prediction, tokenizer)
return get_final_text(answer_token_text, answer_unigram_text, True) | Gets final text using WordPiece tokens. | etcmodel/models/hotpotqa/eval_utils.py | _get_wordpiece_final_text | wy-go/google-research | 23,901 | python | def _get_wordpiece_final_text(token_span: _SpanType, raw_prediction: _RawPredictionType, data_point, tokenizer: tokenization.FullTokenizer):
answer_unigram_text = _get_answer_unigram_text(token_span, raw_prediction, data_point)
answer_token_text = _get_wordpiece_detokenized_text(token_span, raw_prediction, tokenizer)
return get_final_text(answer_token_text, answer_unigram_text, True) | def _get_wordpiece_final_text(token_span: _SpanType, raw_prediction: _RawPredictionType, data_point, tokenizer: tokenization.FullTokenizer):
answer_unigram_text = _get_answer_unigram_text(token_span, raw_prediction, data_point)
answer_token_text = _get_wordpiece_detokenized_text(token_span, raw_prediction, tokenizer)
return get_final_text(answer_token_text, answer_unigram_text, True)<|docstring|>Gets final text using WordPiece tokens.<|endoftext|> |
bb131e015177a1ad664e133a03df246ba24e627f90bbb07deb11b0f5f3c05a99 | def _get_sentencepiece_detokenized_text(token_span: _SpanType, raw_prediction: _RawPredictionType, tokenizer: tokenization.FullTokenizer):
'Gets final text using SentencePiece tokens.'
long_token_ids = raw_prediction['long_token_ids']
answer_tokens = tokenizer.convert_ids_to_tokens(long_token_ids[token_span[0]:(token_span[1] + 1)].tolist())
return data_utils.sentencepiece_detokenize(answer_tokens) | Gets final text using SentencePiece tokens. | etcmodel/models/hotpotqa/eval_utils.py | _get_sentencepiece_detokenized_text | wy-go/google-research | 23,901 | python | def _get_sentencepiece_detokenized_text(token_span: _SpanType, raw_prediction: _RawPredictionType, tokenizer: tokenization.FullTokenizer):
long_token_ids = raw_prediction['long_token_ids']
answer_tokens = tokenizer.convert_ids_to_tokens(long_token_ids[token_span[0]:(token_span[1] + 1)].tolist())
return data_utils.sentencepiece_detokenize(answer_tokens) | def _get_sentencepiece_detokenized_text(token_span: _SpanType, raw_prediction: _RawPredictionType, tokenizer: tokenization.FullTokenizer):
long_token_ids = raw_prediction['long_token_ids']
answer_tokens = tokenizer.convert_ids_to_tokens(long_token_ids[token_span[0]:(token_span[1] + 1)].tolist())
return data_utils.sentencepiece_detokenize(answer_tokens)<|docstring|>Gets final text using SentencePiece tokens.<|endoftext|> |
5ff8396ef32017cf7a17069b6863759e2c71d4c8d83811e643456958d63dd2ba | def get_spans_from_bio_encoding(raw_prediction: _RawPredictionType, max_answer_length: int, supporting_facts: Sequence[bool]) -> List[Tuple[(float, _SpanType)]]:
'Gets top-1 answer span from BIO encoding.'
answer_bio_probs = raw_prediction['answer_bio_probs']
answer_bio_ids = raw_prediction['answer_bio_ids']
long_token_type_ids = raw_prediction['long_token_type_ids']
long_sentence_ids = raw_prediction['long_sentence_ids']
answer_spans = []
for begin in np.where((answer_bio_ids == 0))[0]:
if (long_token_type_ids[begin] not in _TITLE_AND_SENTENCE_TYPE_IDS):
continue
end = begin
while (((end + 1) < len(answer_bio_ids)) and (answer_bio_ids[(end + 1)] == 1)):
end += 1
if (long_token_type_ids[end] not in _TITLE_AND_SENTENCE_TYPE_IDS):
continue
begin_sentence_id = long_sentence_ids[begin]
end_sentence_id = long_sentence_ids[end]
if (begin_sentence_id != end_sentence_id):
continue
if (not supporting_facts[begin_sentence_id]):
continue
if (((end - begin) + 1) > max_answer_length):
continue
answer_spans.append((answer_bio_probs[begin], (begin, end)))
return answer_spans | Gets top-1 answer span from BIO encoding. | etcmodel/models/hotpotqa/eval_utils.py | get_spans_from_bio_encoding | wy-go/google-research | 23,901 | python | def get_spans_from_bio_encoding(raw_prediction: _RawPredictionType, max_answer_length: int, supporting_facts: Sequence[bool]) -> List[Tuple[(float, _SpanType)]]:
answer_bio_probs = raw_prediction['answer_bio_probs']
answer_bio_ids = raw_prediction['answer_bio_ids']
long_token_type_ids = raw_prediction['long_token_type_ids']
long_sentence_ids = raw_prediction['long_sentence_ids']
answer_spans = []
for begin in np.where((answer_bio_ids == 0))[0]:
if (long_token_type_ids[begin] not in _TITLE_AND_SENTENCE_TYPE_IDS):
continue
end = begin
while (((end + 1) < len(answer_bio_ids)) and (answer_bio_ids[(end + 1)] == 1)):
end += 1
if (long_token_type_ids[end] not in _TITLE_AND_SENTENCE_TYPE_IDS):
continue
begin_sentence_id = long_sentence_ids[begin]
end_sentence_id = long_sentence_ids[end]
if (begin_sentence_id != end_sentence_id):
continue
if (not supporting_facts[begin_sentence_id]):
continue
if (((end - begin) + 1) > max_answer_length):
continue
answer_spans.append((answer_bio_probs[begin], (begin, end)))
return answer_spans | def get_spans_from_bio_encoding(raw_prediction: _RawPredictionType, max_answer_length: int, supporting_facts: Sequence[bool]) -> List[Tuple[(float, _SpanType)]]:
answer_bio_probs = raw_prediction['answer_bio_probs']
answer_bio_ids = raw_prediction['answer_bio_ids']
long_token_type_ids = raw_prediction['long_token_type_ids']
long_sentence_ids = raw_prediction['long_sentence_ids']
answer_spans = []
for begin in np.where((answer_bio_ids == 0))[0]:
if (long_token_type_ids[begin] not in _TITLE_AND_SENTENCE_TYPE_IDS):
continue
end = begin
while (((end + 1) < len(answer_bio_ids)) and (answer_bio_ids[(end + 1)] == 1)):
end += 1
if (long_token_type_ids[end] not in _TITLE_AND_SENTENCE_TYPE_IDS):
continue
begin_sentence_id = long_sentence_ids[begin]
end_sentence_id = long_sentence_ids[end]
if (begin_sentence_id != end_sentence_id):
continue
if (not supporting_facts[begin_sentence_id]):
continue
if (((end - begin) + 1) > max_answer_length):
continue
answer_spans.append((answer_bio_probs[begin], (begin, end)))
return answer_spans<|docstring|>Gets top-1 answer span from BIO encoding.<|endoftext|> |
ad555ffd2a78e9c9830afb3f378af1912beaf989b6e1ff8b172bf0a6e7218b64 | def get_spans_from_bio_encoding_v2(raw_prediction: _RawPredictionType, max_answer_length: int, supporting_facts: Sequence[bool]) -> List[Tuple[(float, _SpanType)]]:
'Gets top-1 answer span from BIO encoding.'
answer_bio_probs = raw_prediction['answer_bio_probs']
answer_bio_ids = raw_prediction['answer_bio_ids']
long_token_type_ids = raw_prediction['long_token_type_ids']
long_sentence_ids = raw_prediction['long_sentence_ids']
span_candidates = []
curr_begin = None
for (index, bio_id) in enumerate(answer_bio_ids):
if (bio_id == 0):
if (curr_begin is not None):
span_candidates.append((curr_begin, (index - 1)))
curr_begin = index
elif (bio_id == 1):
if (curr_begin is None):
curr_begin = index
elif (curr_begin is not None):
span_candidates.append((curr_begin, (index - 1)))
curr_begin = None
answer_spans = []
for (begin, end) in span_candidates:
if ((long_token_type_ids[begin] not in _TITLE_AND_SENTENCE_TYPE_IDS) or (long_token_type_ids[end] not in _TITLE_AND_SENTENCE_TYPE_IDS)):
continue
begin_sentence_id = long_sentence_ids[begin]
end_sentence_id = long_sentence_ids[end]
if (begin_sentence_id != end_sentence_id):
continue
if (not supporting_facts[begin_sentence_id]):
continue
if (((end - begin) + 1) > max_answer_length):
continue
score = (sum(answer_bio_probs[begin:(end + 1)]) / ((end - begin) + 1))
answer_spans.append((score, (begin, end)))
return answer_spans | Gets top-1 answer span from BIO encoding. | etcmodel/models/hotpotqa/eval_utils.py | get_spans_from_bio_encoding_v2 | wy-go/google-research | 23,901 | python | def get_spans_from_bio_encoding_v2(raw_prediction: _RawPredictionType, max_answer_length: int, supporting_facts: Sequence[bool]) -> List[Tuple[(float, _SpanType)]]:
answer_bio_probs = raw_prediction['answer_bio_probs']
answer_bio_ids = raw_prediction['answer_bio_ids']
long_token_type_ids = raw_prediction['long_token_type_ids']
long_sentence_ids = raw_prediction['long_sentence_ids']
span_candidates = []
curr_begin = None
for (index, bio_id) in enumerate(answer_bio_ids):
if (bio_id == 0):
if (curr_begin is not None):
span_candidates.append((curr_begin, (index - 1)))
curr_begin = index
elif (bio_id == 1):
if (curr_begin is None):
curr_begin = index
elif (curr_begin is not None):
span_candidates.append((curr_begin, (index - 1)))
curr_begin = None
answer_spans = []
for (begin, end) in span_candidates:
if ((long_token_type_ids[begin] not in _TITLE_AND_SENTENCE_TYPE_IDS) or (long_token_type_ids[end] not in _TITLE_AND_SENTENCE_TYPE_IDS)):
continue
begin_sentence_id = long_sentence_ids[begin]
end_sentence_id = long_sentence_ids[end]
if (begin_sentence_id != end_sentence_id):
continue
if (not supporting_facts[begin_sentence_id]):
continue
if (((end - begin) + 1) > max_answer_length):
continue
score = (sum(answer_bio_probs[begin:(end + 1)]) / ((end - begin) + 1))
answer_spans.append((score, (begin, end)))
return answer_spans | def get_spans_from_bio_encoding_v2(raw_prediction: _RawPredictionType, max_answer_length: int, supporting_facts: Sequence[bool]) -> List[Tuple[(float, _SpanType)]]:
answer_bio_probs = raw_prediction['answer_bio_probs']
answer_bio_ids = raw_prediction['answer_bio_ids']
long_token_type_ids = raw_prediction['long_token_type_ids']
long_sentence_ids = raw_prediction['long_sentence_ids']
span_candidates = []
curr_begin = None
for (index, bio_id) in enumerate(answer_bio_ids):
if (bio_id == 0):
if (curr_begin is not None):
span_candidates.append((curr_begin, (index - 1)))
curr_begin = index
elif (bio_id == 1):
if (curr_begin is None):
curr_begin = index
elif (curr_begin is not None):
span_candidates.append((curr_begin, (index - 1)))
curr_begin = None
answer_spans = []
for (begin, end) in span_candidates:
if ((long_token_type_ids[begin] not in _TITLE_AND_SENTENCE_TYPE_IDS) or (long_token_type_ids[end] not in _TITLE_AND_SENTENCE_TYPE_IDS)):
continue
begin_sentence_id = long_sentence_ids[begin]
end_sentence_id = long_sentence_ids[end]
if (begin_sentence_id != end_sentence_id):
continue
if (not supporting_facts[begin_sentence_id]):
continue
if (((end - begin) + 1) > max_answer_length):
continue
score = (sum(answer_bio_probs[begin:(end + 1)]) / ((end - begin) + 1))
answer_spans.append((score, (begin, end)))
return answer_spans<|docstring|>Gets top-1 answer span from BIO encoding.<|endoftext|> |
e3c25b46c48981ff8e15b6e85f61cb8c3651c6ae1317769599e6a76d89e88a2f | def get_spans_from_span_encoding(raw_prediction: _RawPredictionType, max_answer_length: int, supporting_facts: Sequence[bool]) -> List[Tuple[(float, _SpanType)]]:
'Gets top-1 answer span from SPAN encoding.'
begin_probs = raw_prediction['answer_begin_top_probs']
begin_indices = raw_prediction['answer_begin_top_indices']
end_probs = raw_prediction['answer_end_top_probs']
end_indices = raw_prediction['answer_end_top_indices']
long_token_type_ids = raw_prediction['long_token_type_ids']
long_sentence_ids = raw_prediction['long_sentence_ids']
answer_spans = []
for (begin_prob, begin) in zip(begin_probs, begin_indices):
if (long_token_type_ids[begin] not in _TITLE_AND_SENTENCE_TYPE_IDS):
continue
for (end_prob, end) in zip(end_probs, end_indices):
if (long_token_type_ids[end] not in _TITLE_AND_SENTENCE_TYPE_IDS):
continue
begin_sentence_id = long_sentence_ids[begin]
end_sentence_id = long_sentence_ids[end]
if (begin_sentence_id != end_sentence_id):
continue
if (not supporting_facts[begin_sentence_id]):
continue
if ((begin > end) or (((end - begin) + 1) > max_answer_length)):
continue
answer_spans.append(((begin_prob * end_prob), (begin, end)))
return answer_spans | Gets top-1 answer span from SPAN encoding. | etcmodel/models/hotpotqa/eval_utils.py | get_spans_from_span_encoding | wy-go/google-research | 23,901 | python | def get_spans_from_span_encoding(raw_prediction: _RawPredictionType, max_answer_length: int, supporting_facts: Sequence[bool]) -> List[Tuple[(float, _SpanType)]]:
begin_probs = raw_prediction['answer_begin_top_probs']
begin_indices = raw_prediction['answer_begin_top_indices']
end_probs = raw_prediction['answer_end_top_probs']
end_indices = raw_prediction['answer_end_top_indices']
long_token_type_ids = raw_prediction['long_token_type_ids']
long_sentence_ids = raw_prediction['long_sentence_ids']
answer_spans = []
for (begin_prob, begin) in zip(begin_probs, begin_indices):
if (long_token_type_ids[begin] not in _TITLE_AND_SENTENCE_TYPE_IDS):
continue
for (end_prob, end) in zip(end_probs, end_indices):
if (long_token_type_ids[end] not in _TITLE_AND_SENTENCE_TYPE_IDS):
continue
begin_sentence_id = long_sentence_ids[begin]
end_sentence_id = long_sentence_ids[end]
if (begin_sentence_id != end_sentence_id):
continue
if (not supporting_facts[begin_sentence_id]):
continue
if ((begin > end) or (((end - begin) + 1) > max_answer_length)):
continue
answer_spans.append(((begin_prob * end_prob), (begin, end)))
return answer_spans | def get_spans_from_span_encoding(raw_prediction: _RawPredictionType, max_answer_length: int, supporting_facts: Sequence[bool]) -> List[Tuple[(float, _SpanType)]]:
begin_probs = raw_prediction['answer_begin_top_probs']
begin_indices = raw_prediction['answer_begin_top_indices']
end_probs = raw_prediction['answer_end_top_probs']
end_indices = raw_prediction['answer_end_top_indices']
long_token_type_ids = raw_prediction['long_token_type_ids']
long_sentence_ids = raw_prediction['long_sentence_ids']
answer_spans = []
for (begin_prob, begin) in zip(begin_probs, begin_indices):
if (long_token_type_ids[begin] not in _TITLE_AND_SENTENCE_TYPE_IDS):
continue
for (end_prob, end) in zip(end_probs, end_indices):
if (long_token_type_ids[end] not in _TITLE_AND_SENTENCE_TYPE_IDS):
continue
begin_sentence_id = long_sentence_ids[begin]
end_sentence_id = long_sentence_ids[end]
if (begin_sentence_id != end_sentence_id):
continue
if (not supporting_facts[begin_sentence_id]):
continue
if ((begin > end) or (((end - begin) + 1) > max_answer_length)):
continue
answer_spans.append(((begin_prob * end_prob), (begin, end)))
return answer_spans<|docstring|>Gets top-1 answer span from SPAN encoding.<|endoftext|> |
69f02b738eef8687a03e868f2480a3db6d13051295edfa1dfe9013902868174e | def get_top1_answer(raw_prediction: _RawPredictionType, data_point, max_answer_length: int, supporting_facts: Sequence[bool], tokenizer: tokenization.FullTokenizer, use_wordpiece: bool, answer_encoding_method: str) -> str:
'Gets top-1 answer text.'
if (answer_encoding_method == 'span'):
answer_spans = get_spans_from_span_encoding(raw_prediction, max_answer_length, supporting_facts)
elif (answer_encoding_method == 'bio'):
answer_spans = get_spans_from_bio_encoding_v2(raw_prediction, max_answer_length, supporting_facts)
else:
raise ValueError(f'Invalid answer encoding method {answer_encoding_method}')
if (not answer_spans):
return ''
token_span = sorted(answer_spans)[(- 1)][1]
if use_wordpiece:
return _get_wordpiece_final_text(token_span, raw_prediction, data_point, tokenizer)
return _get_sentencepiece_detokenized_text(token_span, raw_prediction, tokenizer) | Gets top-1 answer text. | etcmodel/models/hotpotqa/eval_utils.py | get_top1_answer | wy-go/google-research | 23,901 | python | def get_top1_answer(raw_prediction: _RawPredictionType, data_point, max_answer_length: int, supporting_facts: Sequence[bool], tokenizer: tokenization.FullTokenizer, use_wordpiece: bool, answer_encoding_method: str) -> str:
if (answer_encoding_method == 'span'):
answer_spans = get_spans_from_span_encoding(raw_prediction, max_answer_length, supporting_facts)
elif (answer_encoding_method == 'bio'):
answer_spans = get_spans_from_bio_encoding_v2(raw_prediction, max_answer_length, supporting_facts)
else:
raise ValueError(f'Invalid answer encoding method {answer_encoding_method}')
if (not answer_spans):
return
token_span = sorted(answer_spans)[(- 1)][1]
if use_wordpiece:
return _get_wordpiece_final_text(token_span, raw_prediction, data_point, tokenizer)
return _get_sentencepiece_detokenized_text(token_span, raw_prediction, tokenizer) | def get_top1_answer(raw_prediction: _RawPredictionType, data_point, max_answer_length: int, supporting_facts: Sequence[bool], tokenizer: tokenization.FullTokenizer, use_wordpiece: bool, answer_encoding_method: str) -> str:
if (answer_encoding_method == 'span'):
answer_spans = get_spans_from_span_encoding(raw_prediction, max_answer_length, supporting_facts)
elif (answer_encoding_method == 'bio'):
answer_spans = get_spans_from_bio_encoding_v2(raw_prediction, max_answer_length, supporting_facts)
else:
raise ValueError(f'Invalid answer encoding method {answer_encoding_method}')
if (not answer_spans):
return
token_span = sorted(answer_spans)[(- 1)][1]
if use_wordpiece:
return _get_wordpiece_final_text(token_span, raw_prediction, data_point, tokenizer)
return _get_sentencepiece_detokenized_text(token_span, raw_prediction, tokenizer)<|docstring|>Gets top-1 answer text.<|endoftext|> |
40682bb9db7a2e965adb916af7417e6b9f62979f0e639b23a930e47bf8b2796a | def generate_prediction_json(raw_predictions: Sequence[_RawPredictionType], gold_json_data, tokenizer: tokenization.FullTokenizer, sp_threshold: float=0.5, max_answer_length: int=30, use_wordpiece: bool=False, answer_encoding_method: str='span'):
'Generates HotpotQA official format prediction json object.\n\n Args:\n raw_predictions: Raw model predict outputs.\n gold_json_data: Gold json eval data.\n tokenizer: The BERT tokenizer.\n sp_threshold: Probability threshold for prediction supporting facts.\n max_answer_length: Max number of wordpiece tokens allowed for answer.\n use_wordpiece: Whether WordPirce tokenizer is used.\n answer_encoding_method: The answer encoding method.\n\n Returns:\n The official json format of predictions.\n '
ids_to_raw_predictions = {}
for raw_prediction in raw_predictions:
unique_id = raw_prediction['unique_ids']
if isinstance(unique_id, bytes):
unique_id = unique_id.decode('utf-8')
ids_to_raw_predictions[unique_id] = raw_prediction
answers = {}
sps = {}
for data_point in gold_json_data:
unique_id = data_point['_id']
answers[unique_id] = ''
sps[unique_id] = []
raw_prediction = ids_to_raw_predictions.get(unique_id, None)
if (raw_prediction is None):
continue
supporting_facts = (raw_prediction['supporting_facts_probs'] >= sp_threshold)
for (sp, para_id, sent_id) in zip(supporting_facts, raw_prediction['global_paragraph_ids'], raw_prediction['global_sentence_ids']):
if ((para_id != (- 1)) and (sent_id != (- 1)) and sp):
title = data_point['context'][para_id][0]
sps[unique_id].append([title, int(sent_id)])
answer_type = raw_prediction['answer_types']
if (answer_type == 0):
answers[unique_id] = get_top1_answer(raw_prediction, data_point, max_answer_length, supporting_facts, tokenizer, use_wordpiece, answer_encoding_method)
elif (answer_type == 1):
answers[unique_id] = 'yes'
else:
answers[unique_id] = 'no'
return {'answer': answers, 'sp': sps} | Generates HotpotQA official format prediction json object.
Args:
raw_predictions: Raw model predict outputs.
gold_json_data: Gold json eval data.
tokenizer: The BERT tokenizer.
sp_threshold: Probability threshold for prediction supporting facts.
max_answer_length: Max number of wordpiece tokens allowed for answer.
use_wordpiece: Whether WordPirce tokenizer is used.
answer_encoding_method: The answer encoding method.
Returns:
The official json format of predictions. | etcmodel/models/hotpotqa/eval_utils.py | generate_prediction_json | wy-go/google-research | 23,901 | python | def generate_prediction_json(raw_predictions: Sequence[_RawPredictionType], gold_json_data, tokenizer: tokenization.FullTokenizer, sp_threshold: float=0.5, max_answer_length: int=30, use_wordpiece: bool=False, answer_encoding_method: str='span'):
'Generates HotpotQA official format prediction json object.\n\n Args:\n raw_predictions: Raw model predict outputs.\n gold_json_data: Gold json eval data.\n tokenizer: The BERT tokenizer.\n sp_threshold: Probability threshold for prediction supporting facts.\n max_answer_length: Max number of wordpiece tokens allowed for answer.\n use_wordpiece: Whether WordPirce tokenizer is used.\n answer_encoding_method: The answer encoding method.\n\n Returns:\n The official json format of predictions.\n '
ids_to_raw_predictions = {}
for raw_prediction in raw_predictions:
unique_id = raw_prediction['unique_ids']
if isinstance(unique_id, bytes):
unique_id = unique_id.decode('utf-8')
ids_to_raw_predictions[unique_id] = raw_prediction
answers = {}
sps = {}
for data_point in gold_json_data:
unique_id = data_point['_id']
answers[unique_id] =
sps[unique_id] = []
raw_prediction = ids_to_raw_predictions.get(unique_id, None)
if (raw_prediction is None):
continue
supporting_facts = (raw_prediction['supporting_facts_probs'] >= sp_threshold)
for (sp, para_id, sent_id) in zip(supporting_facts, raw_prediction['global_paragraph_ids'], raw_prediction['global_sentence_ids']):
if ((para_id != (- 1)) and (sent_id != (- 1)) and sp):
title = data_point['context'][para_id][0]
sps[unique_id].append([title, int(sent_id)])
answer_type = raw_prediction['answer_types']
if (answer_type == 0):
answers[unique_id] = get_top1_answer(raw_prediction, data_point, max_answer_length, supporting_facts, tokenizer, use_wordpiece, answer_encoding_method)
elif (answer_type == 1):
answers[unique_id] = 'yes'
else:
answers[unique_id] = 'no'
return {'answer': answers, 'sp': sps} | def generate_prediction_json(raw_predictions: Sequence[_RawPredictionType], gold_json_data, tokenizer: tokenization.FullTokenizer, sp_threshold: float=0.5, max_answer_length: int=30, use_wordpiece: bool=False, answer_encoding_method: str='span'):
'Generates HotpotQA official format prediction json object.\n\n Args:\n raw_predictions: Raw model predict outputs.\n gold_json_data: Gold json eval data.\n tokenizer: The BERT tokenizer.\n sp_threshold: Probability threshold for prediction supporting facts.\n max_answer_length: Max number of wordpiece tokens allowed for answer.\n use_wordpiece: Whether WordPirce tokenizer is used.\n answer_encoding_method: The answer encoding method.\n\n Returns:\n The official json format of predictions.\n '
ids_to_raw_predictions = {}
for raw_prediction in raw_predictions:
unique_id = raw_prediction['unique_ids']
if isinstance(unique_id, bytes):
unique_id = unique_id.decode('utf-8')
ids_to_raw_predictions[unique_id] = raw_prediction
answers = {}
sps = {}
for data_point in gold_json_data:
unique_id = data_point['_id']
answers[unique_id] =
sps[unique_id] = []
raw_prediction = ids_to_raw_predictions.get(unique_id, None)
if (raw_prediction is None):
continue
supporting_facts = (raw_prediction['supporting_facts_probs'] >= sp_threshold)
for (sp, para_id, sent_id) in zip(supporting_facts, raw_prediction['global_paragraph_ids'], raw_prediction['global_sentence_ids']):
if ((para_id != (- 1)) and (sent_id != (- 1)) and sp):
title = data_point['context'][para_id][0]
sps[unique_id].append([title, int(sent_id)])
answer_type = raw_prediction['answer_types']
if (answer_type == 0):
answers[unique_id] = get_top1_answer(raw_prediction, data_point, max_answer_length, supporting_facts, tokenizer, use_wordpiece, answer_encoding_method)
elif (answer_type == 1):
answers[unique_id] = 'yes'
else:
answers[unique_id] = 'no'
return {'answer': answers, 'sp': sps}<|docstring|>Generates HotpotQA official format prediction json object.
Args:
raw_predictions: Raw model predict outputs.
gold_json_data: Gold json eval data.
tokenizer: The BERT tokenizer.
sp_threshold: Probability threshold for prediction supporting facts.
max_answer_length: Max number of wordpiece tokens allowed for answer.
use_wordpiece: Whether WordPirce tokenizer is used.
answer_encoding_method: The answer encoding method.
Returns:
The official json format of predictions.<|endoftext|> |
dad1d6a63e100672a991cd117c9e69407c7e2d05d4aa0e8753cd377abb4f9d6a | def expectation(self):
'\n E[y]\n\n betaかgammaが0の場合期待値がNaNになってしまう問題\n '
E = sp.zeros_like(self.beta)
gig_inds = (self.gamma > 1e-200)
gam_inds = (self.gamma <= 1e-200)
sqrt_beta = sp.sqrt(self.beta[gig_inds])
sqrt_gamma = sp.sqrt(self.gamma[gig_inds])
bessel_alpha_plus = scipy.special.kve((self.alpha + 1), ((2 * sqrt_beta) * sqrt_gamma))
bessel_alpha = scipy.special.kve(self.alpha, ((2 * sqrt_beta) * sqrt_gamma))
E[gig_inds] = (((bessel_alpha_plus * sqrt_gamma) / sqrt_beta) / bessel_alpha)
E[gam_inds] = (self.alpha / self.beta[gam_inds])
return E | E[y]
betaかgammaが0の場合期待値がNaNになってしまう問題 | stats/prob_dists.py | expectation | ansvver/pylufia | 0 | python | def expectation(self):
'\n E[y]\n\n betaかgammaが0の場合期待値がNaNになってしまう問題\n '
E = sp.zeros_like(self.beta)
gig_inds = (self.gamma > 1e-200)
gam_inds = (self.gamma <= 1e-200)
sqrt_beta = sp.sqrt(self.beta[gig_inds])
sqrt_gamma = sp.sqrt(self.gamma[gig_inds])
bessel_alpha_plus = scipy.special.kve((self.alpha + 1), ((2 * sqrt_beta) * sqrt_gamma))
bessel_alpha = scipy.special.kve(self.alpha, ((2 * sqrt_beta) * sqrt_gamma))
E[gig_inds] = (((bessel_alpha_plus * sqrt_gamma) / sqrt_beta) / bessel_alpha)
E[gam_inds] = (self.alpha / self.beta[gam_inds])
return E | def expectation(self):
'\n E[y]\n\n betaかgammaが0の場合期待値がNaNになってしまう問題\n '
E = sp.zeros_like(self.beta)
gig_inds = (self.gamma > 1e-200)
gam_inds = (self.gamma <= 1e-200)
sqrt_beta = sp.sqrt(self.beta[gig_inds])
sqrt_gamma = sp.sqrt(self.gamma[gig_inds])
bessel_alpha_plus = scipy.special.kve((self.alpha + 1), ((2 * sqrt_beta) * sqrt_gamma))
bessel_alpha = scipy.special.kve(self.alpha, ((2 * sqrt_beta) * sqrt_gamma))
E[gig_inds] = (((bessel_alpha_plus * sqrt_gamma) / sqrt_beta) / bessel_alpha)
E[gam_inds] = (self.alpha / self.beta[gam_inds])
return E<|docstring|>E[y]
betaかgammaが0の場合期待値がNaNになってしまう問題<|endoftext|> |
400db4b157ec6fb768798921633540cf29ad801839c924147d4cabc10b1c49a3 | def inv_expectation(self):
'\n E[1/y]\n '
Einv = sp.zeros_like(self.beta)
gig_inds = (self.gamma > 1e-200)
gam_inds = (self.gamma <= 1e-200)
sqrt_beta = sp.sqrt(self.beta[gig_inds])
sqrt_gamma = sp.sqrt(self.gamma[gig_inds])
bessel_alpha_minus = scipy.special.kve((self.alpha - 1), ((2 * sqrt_beta) * sqrt_gamma))
bessel_alpha = scipy.special.kve(self.alpha, ((2 * sqrt_beta) * sqrt_gamma))
Einv[gig_inds] = (bessel_alpha_minus / ((sqrt_gamma / sqrt_beta) * bessel_alpha))
Einv[gam_inds] = (self.beta[gam_inds] / (self.alpha - 1))
return Einv | E[1/y] | stats/prob_dists.py | inv_expectation | ansvver/pylufia | 0 | python | def inv_expectation(self):
'\n \n '
Einv = sp.zeros_like(self.beta)
gig_inds = (self.gamma > 1e-200)
gam_inds = (self.gamma <= 1e-200)
sqrt_beta = sp.sqrt(self.beta[gig_inds])
sqrt_gamma = sp.sqrt(self.gamma[gig_inds])
bessel_alpha_minus = scipy.special.kve((self.alpha - 1), ((2 * sqrt_beta) * sqrt_gamma))
bessel_alpha = scipy.special.kve(self.alpha, ((2 * sqrt_beta) * sqrt_gamma))
Einv[gig_inds] = (bessel_alpha_minus / ((sqrt_gamma / sqrt_beta) * bessel_alpha))
Einv[gam_inds] = (self.beta[gam_inds] / (self.alpha - 1))
return Einv | def inv_expectation(self):
'\n \n '
Einv = sp.zeros_like(self.beta)
gig_inds = (self.gamma > 1e-200)
gam_inds = (self.gamma <= 1e-200)
sqrt_beta = sp.sqrt(self.beta[gig_inds])
sqrt_gamma = sp.sqrt(self.gamma[gig_inds])
bessel_alpha_minus = scipy.special.kve((self.alpha - 1), ((2 * sqrt_beta) * sqrt_gamma))
bessel_alpha = scipy.special.kve(self.alpha, ((2 * sqrt_beta) * sqrt_gamma))
Einv[gig_inds] = (bessel_alpha_minus / ((sqrt_gamma / sqrt_beta) * bessel_alpha))
Einv[gam_inds] = (self.beta[gam_inds] / (self.alpha - 1))
return Einv<|docstring|>E[1/y]<|endoftext|> |
ccb405d0db5c6bba6ae5bbd6a7b89049cec940ed912d75ac289f083458b99341 | def var(self):
'\n var[y]\n '
fact1 = (bessel((self.alpha + 2), sp.sqrt((self.beta * self.gamma))) / bessel(self.alpha, sp.sqrt((self.beta * self.gamma))))
fact2 = (bessel((self.alpha + 1), sp.sqrt((self.beta * self.gamma))) / bessel(self.alpha, sp.sqrt((self.beta * self.gamma))))
V = ((self.gamma / self.beta) * (fact1 - (fact2 ** 2)))
return V | var[y] | stats/prob_dists.py | var | ansvver/pylufia | 0 | python | def var(self):
'\n \n '
fact1 = (bessel((self.alpha + 2), sp.sqrt((self.beta * self.gamma))) / bessel(self.alpha, sp.sqrt((self.beta * self.gamma))))
fact2 = (bessel((self.alpha + 1), sp.sqrt((self.beta * self.gamma))) / bessel(self.alpha, sp.sqrt((self.beta * self.gamma))))
V = ((self.gamma / self.beta) * (fact1 - (fact2 ** 2)))
return V | def var(self):
'\n \n '
fact1 = (bessel((self.alpha + 2), sp.sqrt((self.beta * self.gamma))) / bessel(self.alpha, sp.sqrt((self.beta * self.gamma))))
fact2 = (bessel((self.alpha + 1), sp.sqrt((self.beta * self.gamma))) / bessel(self.alpha, sp.sqrt((self.beta * self.gamma))))
V = ((self.gamma / self.beta) * (fact1 - (fact2 ** 2)))
return V<|docstring|>var[y]<|endoftext|> |
58290afe009d3be3a9f02c6a8f3a093a6fdc43ae144ba9a69ea5a90ef2da0c8a | def handler(*events, **kwargs):
'Event handler decorator factory.\n To hook an event, decorate a method in your plugin class with this\n decorator. You may hook both per-vm-class and global events.\n .. note::\n This decorator is intended only for extensions! For regular use in the\n core, see :py:func:`vanir.events.handler`.\n :param str event: event type\n :param type vm: VM to hook (leave as None to hook all VMs)\n :param bool system: when :py:obj:`True`, hook is system-wide (not attached to any VM)\n '
def decorator(func):
func.ha_events = events
if kwargs.get('system', False):
func.ha_vm = None
elif ('vm' in kwargs):
func.ha_vm = kwargs['vm']
else:
func.ha_vm = vanir.vm.BaseVM
return func
return decorator | Event handler decorator factory.
To hook an event, decorate a method in your plugin class with this
decorator. You may hook both per-vm-class and global events.
.. note::
This decorator is intended only for extensions! For regular use in the
core, see :py:func:`vanir.events.handler`.
:param str event: event type
:param type vm: VM to hook (leave as None to hook all VMs)
:param bool system: when :py:obj:`True`, hook is system-wide (not attached to any VM) | vanir/ext/__init__.py | handler | VanirLab/VOS | 0 | python | def handler(*events, **kwargs):
'Event handler decorator factory.\n To hook an event, decorate a method in your plugin class with this\n decorator. You may hook both per-vm-class and global events.\n .. note::\n This decorator is intended only for extensions! For regular use in the\n core, see :py:func:`vanir.events.handler`.\n :param str event: event type\n :param type vm: VM to hook (leave as None to hook all VMs)\n :param bool system: when :py:obj:`True`, hook is system-wide (not attached to any VM)\n '
def decorator(func):
func.ha_events = events
if kwargs.get('system', False):
func.ha_vm = None
elif ('vm' in kwargs):
func.ha_vm = kwargs['vm']
else:
func.ha_vm = vanir.vm.BaseVM
return func
return decorator | def handler(*events, **kwargs):
'Event handler decorator factory.\n To hook an event, decorate a method in your plugin class with this\n decorator. You may hook both per-vm-class and global events.\n .. note::\n This decorator is intended only for extensions! For regular use in the\n core, see :py:func:`vanir.events.handler`.\n :param str event: event type\n :param type vm: VM to hook (leave as None to hook all VMs)\n :param bool system: when :py:obj:`True`, hook is system-wide (not attached to any VM)\n '
def decorator(func):
func.ha_events = events
if kwargs.get('system', False):
func.ha_vm = None
elif ('vm' in kwargs):
func.ha_vm = kwargs['vm']
else:
func.ha_vm = vanir.vm.BaseVM
return func
return decorator<|docstring|>Event handler decorator factory.
To hook an event, decorate a method in your plugin class with this
decorator. You may hook both per-vm-class and global events.
.. note::
This decorator is intended only for extensions! For regular use in the
core, see :py:func:`vanir.events.handler`.
:param str event: event type
:param type vm: VM to hook (leave as None to hook all VMs)
:param bool system: when :py:obj:`True`, hook is system-wide (not attached to any VM)<|endoftext|> |
9639018f4256fb5b0785b6c5be63a60d7c4ddaeecb74adfb2f40b8c2a790d8d8 | def run_all(name='fl', num=5, path='./results'):
'\n run experiment according to the configuration of experiment.yaml\n\n :param out_name:\n :param err_name:\n :param num:\n :param path:\n :return:\n '
err_path = os.path.join(path, (name + '-err.txt'))
out_path = os.path.join(path, (name + '-out.txt'))
with open(out_path, 'wb+') as out, open(err_path, 'wb+') as err:
subprocess.Popen('python3 -m fltk single configs/experiment.yaml --rank=0', shell=True, stdout=out, stderr=err)
for i in range(num):
subprocess.Popen('python3 -m fltk single configs/experiment.yaml --rank={}'.format(str((i + 1))), shell=True, stdout=out, stderr=err) | run experiment according to the configuration of experiment.yaml
:param out_name:
:param err_name:
:param num:
:param path:
:return: | run_script.py | run_all | tudelft-eemcs-dml/fltk-testbed-group-6 | 0 | python | def run_all(name='fl', num=5, path='./results'):
'\n run experiment according to the configuration of experiment.yaml\n\n :param out_name:\n :param err_name:\n :param num:\n :param path:\n :return:\n '
err_path = os.path.join(path, (name + '-err.txt'))
out_path = os.path.join(path, (name + '-out.txt'))
with open(out_path, 'wb+') as out, open(err_path, 'wb+') as err:
subprocess.Popen('python3 -m fltk single configs/experiment.yaml --rank=0', shell=True, stdout=out, stderr=err)
for i in range(num):
subprocess.Popen('python3 -m fltk single configs/experiment.yaml --rank={}'.format(str((i + 1))), shell=True, stdout=out, stderr=err) | def run_all(name='fl', num=5, path='./results'):
'\n run experiment according to the configuration of experiment.yaml\n\n :param out_name:\n :param err_name:\n :param num:\n :param path:\n :return:\n '
err_path = os.path.join(path, (name + '-err.txt'))
out_path = os.path.join(path, (name + '-out.txt'))
with open(out_path, 'wb+') as out, open(err_path, 'wb+') as err:
subprocess.Popen('python3 -m fltk single configs/experiment.yaml --rank=0', shell=True, stdout=out, stderr=err)
for i in range(num):
subprocess.Popen('python3 -m fltk single configs/experiment.yaml --rank={}'.format(str((i + 1))), shell=True, stdout=out, stderr=err)<|docstring|>run experiment according to the configuration of experiment.yaml
:param out_name:
:param err_name:
:param num:
:param path:
:return:<|endoftext|> |
115ff6f2f138fd6599ebe5d8905979c03b645bfa901f6c8004e8f81d93226eec | def test_happy(self) -> None:
'Tests the happy path.'
parsed = accept_language.parse_accept_language('en-US,el;q=0.8')
self.assertEqual(parsed[0].language, 'en') | Tests the happy path. | tests/test_accept_language.py | test_happy | feke-te/osm-gimmisn | 0 | python | def test_happy(self) -> None:
parsed = accept_language.parse_accept_language('en-US,el;q=0.8')
self.assertEqual(parsed[0].language, 'en') | def test_happy(self) -> None:
parsed = accept_language.parse_accept_language('en-US,el;q=0.8')
self.assertEqual(parsed[0].language, 'en')<|docstring|>Tests the happy path.<|endoftext|> |
10b51fcb1e2b2cb7fdf1c17dda8fc4fc89fd9e1653dc2738f0431ca94b6cf6cb | def test_empty(self) -> None:
'Tests empty input.'
parsed = accept_language.parse_accept_language('')
self.assertEqual(parsed, []) | Tests empty input. | tests/test_accept_language.py | test_empty | feke-te/osm-gimmisn | 0 | python | def test_empty(self) -> None:
parsed = accept_language.parse_accept_language()
self.assertEqual(parsed, []) | def test_empty(self) -> None:
parsed = accept_language.parse_accept_language()
self.assertEqual(parsed, [])<|docstring|>Tests empty input.<|endoftext|> |
5fb0a62521a29e0da12a786e2dd6e46a3e4432dd3e2e3f20a73ea04ba1fb768e | def test_too_long(self) -> None:
'Tests too long input.'
with unittest.mock.patch('accept_language.MAX_HEADER_LEN', 3):
with self.assertRaises(ValueError):
accept_language.parse_accept_language('en-US') | Tests too long input. | tests/test_accept_language.py | test_too_long | feke-te/osm-gimmisn | 0 | python | def test_too_long(self) -> None:
with unittest.mock.patch('accept_language.MAX_HEADER_LEN', 3):
with self.assertRaises(ValueError):
accept_language.parse_accept_language('en-US') | def test_too_long(self) -> None:
with unittest.mock.patch('accept_language.MAX_HEADER_LEN', 3):
with self.assertRaises(ValueError):
accept_language.parse_accept_language('en-US')<|docstring|>Tests too long input.<|endoftext|> |
2c94d4d65659a218025db829e939667ce6187cf4fe2821e4b25755bee6c8863e | def test_invalid_lang(self) -> None:
'Tests the case when a language string is invalid.'
parsed = accept_language.parse_accept_language('en42-US,el;q=0.8')
self.assertEqual(parsed[0].language, 'el') | Tests the case when a language string is invalid. | tests/test_accept_language.py | test_invalid_lang | feke-te/osm-gimmisn | 0 | python | def test_invalid_lang(self) -> None:
parsed = accept_language.parse_accept_language('en42-US,el;q=0.8')
self.assertEqual(parsed[0].language, 'el') | def test_invalid_lang(self) -> None:
parsed = accept_language.parse_accept_language('en42-US,el;q=0.8')
self.assertEqual(parsed[0].language, 'el')<|docstring|>Tests the case when a language string is invalid.<|endoftext|> |
786da65a37d0f15f6bc61408966dfbaaddfc3687445d15092b2bc7c561ae8b47 | def serialise_system(self):
'Create the OpenMM system; parametrise using frost; serialise the system.'
off_molecule = Molecule.from_rdkit(self.molecule.rdkit_mol, allow_undefined_stereo=True)
off_topology = off_molecule.to_topology()
forcefield = ForceField('openff_unconstrained-1.0.0.offxml')
try:
system = forcefield.create_openmm_system(off_topology)
except (UnassignedValenceParameterException, UnassignedBondParameterException, UnassignedProperTorsionParameterException, UnassignedAngleParameterException, UnassignedMoleculeChargeException, TypeError):
new_bond = BondHandler.BondType(smirks='[*:1]~[*:2]', length='0 * angstrom', k='0.0 * angstrom**-2 * mole**-1 * kilocalorie')
new_angle = AngleHandler.AngleType(smirks='[*:1]~[*:2]~[*:3]', angle='0.0 * degree', k='0.0 * mole**-1 * radian**-2 * kilocalorie')
new_torsion = ProperTorsionHandler.ProperTorsionType(smirks='[*:1]~[*:2]~[*:3]~[*:4]', periodicity1='1', phase1='0.0 * degree', k1='0.0 * mole**-1 * kilocalorie', periodicity2='2', phase2='180.0 * degree', k2='0.0 * mole**-1 * kilocalorie', periodicity3='3', phase3='0.0 * degree', k3='0.0 * mole**-1 * kilocalorie', periodicity4='4', phase4='180.0 * degree', k4='0.0 * mole**-1 * kilocalorie', idivf1='1.0', idivf2='1.0', idivf3='1.0', idivf4='1.0')
new_vdw = vdWHandler.vdWType(smirks='[*:1]', epsilon=(0 * unit.kilocalories_per_mole), sigma=(0 * unit.angstroms))
new_generics = {'Bonds': new_bond, 'Angles': new_angle, 'ProperTorsions': new_torsion, 'vdW': new_vdw}
for (key, val) in new_generics.items():
forcefield.get_parameter_handler(key).parameters.insert(0, val)
del forcefield._parameter_handlers['ToolkitAM1BCC']
del forcefield._parameter_handlers['Electrostatics']
system = forcefield.create_openmm_system(off_topology)
self.fftype = 'generics'
with open('serialised.xml', 'w+') as out:
out.write(XmlSerializer.serializeSystem(system)) | Create the OpenMM system; parametrise using frost; serialise the system. | QUBEKit/parametrisation/openff.py | serialise_system | cole-group/QUBEK | 14 | python | def serialise_system(self):
off_molecule = Molecule.from_rdkit(self.molecule.rdkit_mol, allow_undefined_stereo=True)
off_topology = off_molecule.to_topology()
forcefield = ForceField('openff_unconstrained-1.0.0.offxml')
try:
system = forcefield.create_openmm_system(off_topology)
except (UnassignedValenceParameterException, UnassignedBondParameterException, UnassignedProperTorsionParameterException, UnassignedAngleParameterException, UnassignedMoleculeChargeException, TypeError):
new_bond = BondHandler.BondType(smirks='[*:1]~[*:2]', length='0 * angstrom', k='0.0 * angstrom**-2 * mole**-1 * kilocalorie')
new_angle = AngleHandler.AngleType(smirks='[*:1]~[*:2]~[*:3]', angle='0.0 * degree', k='0.0 * mole**-1 * radian**-2 * kilocalorie')
new_torsion = ProperTorsionHandler.ProperTorsionType(smirks='[*:1]~[*:2]~[*:3]~[*:4]', periodicity1='1', phase1='0.0 * degree', k1='0.0 * mole**-1 * kilocalorie', periodicity2='2', phase2='180.0 * degree', k2='0.0 * mole**-1 * kilocalorie', periodicity3='3', phase3='0.0 * degree', k3='0.0 * mole**-1 * kilocalorie', periodicity4='4', phase4='180.0 * degree', k4='0.0 * mole**-1 * kilocalorie', idivf1='1.0', idivf2='1.0', idivf3='1.0', idivf4='1.0')
new_vdw = vdWHandler.vdWType(smirks='[*:1]', epsilon=(0 * unit.kilocalories_per_mole), sigma=(0 * unit.angstroms))
new_generics = {'Bonds': new_bond, 'Angles': new_angle, 'ProperTorsions': new_torsion, 'vdW': new_vdw}
for (key, val) in new_generics.items():
forcefield.get_parameter_handler(key).parameters.insert(0, val)
del forcefield._parameter_handlers['ToolkitAM1BCC']
del forcefield._parameter_handlers['Electrostatics']
system = forcefield.create_openmm_system(off_topology)
self.fftype = 'generics'
with open('serialised.xml', 'w+') as out:
out.write(XmlSerializer.serializeSystem(system)) | def serialise_system(self):
off_molecule = Molecule.from_rdkit(self.molecule.rdkit_mol, allow_undefined_stereo=True)
off_topology = off_molecule.to_topology()
forcefield = ForceField('openff_unconstrained-1.0.0.offxml')
try:
system = forcefield.create_openmm_system(off_topology)
except (UnassignedValenceParameterException, UnassignedBondParameterException, UnassignedProperTorsionParameterException, UnassignedAngleParameterException, UnassignedMoleculeChargeException, TypeError):
new_bond = BondHandler.BondType(smirks='[*:1]~[*:2]', length='0 * angstrom', k='0.0 * angstrom**-2 * mole**-1 * kilocalorie')
new_angle = AngleHandler.AngleType(smirks='[*:1]~[*:2]~[*:3]', angle='0.0 * degree', k='0.0 * mole**-1 * radian**-2 * kilocalorie')
new_torsion = ProperTorsionHandler.ProperTorsionType(smirks='[*:1]~[*:2]~[*:3]~[*:4]', periodicity1='1', phase1='0.0 * degree', k1='0.0 * mole**-1 * kilocalorie', periodicity2='2', phase2='180.0 * degree', k2='0.0 * mole**-1 * kilocalorie', periodicity3='3', phase3='0.0 * degree', k3='0.0 * mole**-1 * kilocalorie', periodicity4='4', phase4='180.0 * degree', k4='0.0 * mole**-1 * kilocalorie', idivf1='1.0', idivf2='1.0', idivf3='1.0', idivf4='1.0')
new_vdw = vdWHandler.vdWType(smirks='[*:1]', epsilon=(0 * unit.kilocalories_per_mole), sigma=(0 * unit.angstroms))
new_generics = {'Bonds': new_bond, 'Angles': new_angle, 'ProperTorsions': new_torsion, 'vdW': new_vdw}
for (key, val) in new_generics.items():
forcefield.get_parameter_handler(key).parameters.insert(0, val)
del forcefield._parameter_handlers['ToolkitAM1BCC']
del forcefield._parameter_handlers['Electrostatics']
system = forcefield.create_openmm_system(off_topology)
self.fftype = 'generics'
with open('serialised.xml', 'w+') as out:
out.write(XmlSerializer.serializeSystem(system))<|docstring|>Create the OpenMM system; parametrise using frost; serialise the system.<|endoftext|> |
8a56442b88c32f3ca8bb47a3e8ace3d05010ad2d5c0358f449e7b4e672750e06 | def deconstruct(self) -> Tuple[(str, str, Any, Any)]:
'\n Return a 4-tuple with enough information to recreate the field.\n '
(name, path, args, kwargs) = super().deconstruct()
del kwargs['max_length']
return (name, path, args, kwargs) | Return a 4-tuple with enough information to recreate the field. | cl_sii/extras/dj_model_fields.py | deconstruct | fyntex/lib-cl-sii-python | 8 | python | def deconstruct(self) -> Tuple[(str, str, Any, Any)]:
'\n \n '
(name, path, args, kwargs) = super().deconstruct()
del kwargs['max_length']
return (name, path, args, kwargs) | def deconstruct(self) -> Tuple[(str, str, Any, Any)]:
'\n \n '
(name, path, args, kwargs) = super().deconstruct()
del kwargs['max_length']
return (name, path, args, kwargs)<|docstring|>Return a 4-tuple with enough information to recreate the field.<|endoftext|> |
b81a095265fee05cc11c9c4fbc8c994c2c50139284b1679540600b65ebf91f3d | def from_db_value(self, value: Optional[str], expression: object, connection: object) -> Optional[Rut]:
'\n Convert a value as returned by the database to a Python object.\n\n > It is the reverse of :meth:`get_prep_value`.\n\n > If present for the field subclass, :meth:`from_db_value` will be\n > called in all circumstances when the data is loaded from the\n > database, including in aggregates and ``values()`` calls.\n\n It needs to be able to process ``None``.\n\n .. seealso::\n https://docs.djangoproject.com/en/2.1/howto/custom-model-fields/#converting-values-to-python-objects\n https://docs.djangoproject.com/en/2.1/ref/models/fields/#django.db.models.Field.from_db_value\n\n '
return self.to_python(value) | Convert a value as returned by the database to a Python object.
> It is the reverse of :meth:`get_prep_value`.
> If present for the field subclass, :meth:`from_db_value` will be
> called in all circumstances when the data is loaded from the
> database, including in aggregates and ``values()`` calls.
It needs to be able to process ``None``.
.. seealso::
https://docs.djangoproject.com/en/2.1/howto/custom-model-fields/#converting-values-to-python-objects
https://docs.djangoproject.com/en/2.1/ref/models/fields/#django.db.models.Field.from_db_value | cl_sii/extras/dj_model_fields.py | from_db_value | fyntex/lib-cl-sii-python | 8 | python | def from_db_value(self, value: Optional[str], expression: object, connection: object) -> Optional[Rut]:
'\n Convert a value as returned by the database to a Python object.\n\n > It is the reverse of :meth:`get_prep_value`.\n\n > If present for the field subclass, :meth:`from_db_value` will be\n > called in all circumstances when the data is loaded from the\n > database, including in aggregates and ``values()`` calls.\n\n It needs to be able to process ``None``.\n\n .. seealso::\n https://docs.djangoproject.com/en/2.1/howto/custom-model-fields/#converting-values-to-python-objects\n https://docs.djangoproject.com/en/2.1/ref/models/fields/#django.db.models.Field.from_db_value\n\n '
return self.to_python(value) | def from_db_value(self, value: Optional[str], expression: object, connection: object) -> Optional[Rut]:
'\n Convert a value as returned by the database to a Python object.\n\n > It is the reverse of :meth:`get_prep_value`.\n\n > If present for the field subclass, :meth:`from_db_value` will be\n > called in all circumstances when the data is loaded from the\n > database, including in aggregates and ``values()`` calls.\n\n It needs to be able to process ``None``.\n\n .. seealso::\n https://docs.djangoproject.com/en/2.1/howto/custom-model-fields/#converting-values-to-python-objects\n https://docs.djangoproject.com/en/2.1/ref/models/fields/#django.db.models.Field.from_db_value\n\n '
return self.to_python(value)<|docstring|>Convert a value as returned by the database to a Python object.
> It is the reverse of :meth:`get_prep_value`.
> If present for the field subclass, :meth:`from_db_value` will be
> called in all circumstances when the data is loaded from the
> database, including in aggregates and ``values()`` calls.
It needs to be able to process ``None``.
.. seealso::
https://docs.djangoproject.com/en/2.1/howto/custom-model-fields/#converting-values-to-python-objects
https://docs.djangoproject.com/en/2.1/ref/models/fields/#django.db.models.Field.from_db_value<|endoftext|> |
c0b79db8d0c3f98a6c4082c6cbcef9467947eb41bea2603c7906e1bdf2d752d4 | def get_prep_value(self, value: Optional[object]) -> Optional[str]:
"\n Convert the model's attribute value to a format suitable for the DB.\n\n i.e. prepared for use as a parameter in a query.\n It is the reverse of :meth:`from_db_value`.\n\n However, these are preliminary non-DB specific value checks and\n conversions (otherwise customize :meth:`get_db_prep_value`).\n\n Note: Before returning, ``value`` will be passed to :meth:`to_python` so that, if needed, it\n will be converted to an instance of :class:`Rut`, which is very convenient in cases such\n as when the type of ``value`` is :class:`str`.\n "
value = super().get_prep_value(value)
value_rut: Optional[Rut] = self.to_python(value)
return (value_rut if (value_rut is None) else value_rut.canonical) | Convert the model's attribute value to a format suitable for the DB.
i.e. prepared for use as a parameter in a query.
It is the reverse of :meth:`from_db_value`.
However, these are preliminary non-DB specific value checks and
conversions (otherwise customize :meth:`get_db_prep_value`).
Note: Before returning, ``value`` will be passed to :meth:`to_python` so that, if needed, it
will be converted to an instance of :class:`Rut`, which is very convenient in cases such
as when the type of ``value`` is :class:`str`. | cl_sii/extras/dj_model_fields.py | get_prep_value | fyntex/lib-cl-sii-python | 8 | python | def get_prep_value(self, value: Optional[object]) -> Optional[str]:
"\n Convert the model's attribute value to a format suitable for the DB.\n\n i.e. prepared for use as a parameter in a query.\n It is the reverse of :meth:`from_db_value`.\n\n However, these are preliminary non-DB specific value checks and\n conversions (otherwise customize :meth:`get_db_prep_value`).\n\n Note: Before returning, ``value`` will be passed to :meth:`to_python` so that, if needed, it\n will be converted to an instance of :class:`Rut`, which is very convenient in cases such\n as when the type of ``value`` is :class:`str`.\n "
value = super().get_prep_value(value)
value_rut: Optional[Rut] = self.to_python(value)
return (value_rut if (value_rut is None) else value_rut.canonical) | def get_prep_value(self, value: Optional[object]) -> Optional[str]:
"\n Convert the model's attribute value to a format suitable for the DB.\n\n i.e. prepared for use as a parameter in a query.\n It is the reverse of :meth:`from_db_value`.\n\n However, these are preliminary non-DB specific value checks and\n conversions (otherwise customize :meth:`get_db_prep_value`).\n\n Note: Before returning, ``value`` will be passed to :meth:`to_python` so that, if needed, it\n will be converted to an instance of :class:`Rut`, which is very convenient in cases such\n as when the type of ``value`` is :class:`str`.\n "
value = super().get_prep_value(value)
value_rut: Optional[Rut] = self.to_python(value)
return (value_rut if (value_rut is None) else value_rut.canonical)<|docstring|>Convert the model's attribute value to a format suitable for the DB.
i.e. prepared for use as a parameter in a query.
It is the reverse of :meth:`from_db_value`.
However, these are preliminary non-DB specific value checks and
conversions (otherwise customize :meth:`get_db_prep_value`).
Note: Before returning, ``value`` will be passed to :meth:`to_python` so that, if needed, it
will be converted to an instance of :class:`Rut`, which is very convenient in cases such
as when the type of ``value`` is :class:`str`.<|endoftext|> |
8dcdc35deacce5268708bb120008be61d561821f5451bf1362aa09b9d946e2ea | def to_python(self, value: Optional[object]) -> Optional[Rut]:
"\n Convert the input value to the correct Python object (:class:`Rut`).\n\n > It acts as the reverse of :meth:`value_to_string`, and is also\n called in :meth`clean`.\n\n It needs to be able to process ``None``.\n\n .. seealso::\n https://docs.djangoproject.com/en/2.1/howto/custom-model-fields/#converting-values-to-python-objects\n https://docs.djangoproject.com/en/2.1/ref/models/fields/#django.db.models.Field.to_python\n\n :raises django.core.exceptions.ValidationError:\n if the data can't be converted\n\n "
if ((value is None) or isinstance(value, Rut)):
converted_value = value
else:
try:
converted_value = Rut(value, validate_dv=False)
except (AttributeError, TypeError, ValueError):
raise django.core.exceptions.ValidationError(self.error_messages['invalid'], code='invalid', params={'value': value})
return converted_value | Convert the input value to the correct Python object (:class:`Rut`).
> It acts as the reverse of :meth:`value_to_string`, and is also
called in :meth`clean`.
It needs to be able to process ``None``.
.. seealso::
https://docs.djangoproject.com/en/2.1/howto/custom-model-fields/#converting-values-to-python-objects
https://docs.djangoproject.com/en/2.1/ref/models/fields/#django.db.models.Field.to_python
:raises django.core.exceptions.ValidationError:
if the data can't be converted | cl_sii/extras/dj_model_fields.py | to_python | fyntex/lib-cl-sii-python | 8 | python | def to_python(self, value: Optional[object]) -> Optional[Rut]:
"\n Convert the input value to the correct Python object (:class:`Rut`).\n\n > It acts as the reverse of :meth:`value_to_string`, and is also\n called in :meth`clean`.\n\n It needs to be able to process ``None``.\n\n .. seealso::\n https://docs.djangoproject.com/en/2.1/howto/custom-model-fields/#converting-values-to-python-objects\n https://docs.djangoproject.com/en/2.1/ref/models/fields/#django.db.models.Field.to_python\n\n :raises django.core.exceptions.ValidationError:\n if the data can't be converted\n\n "
if ((value is None) or isinstance(value, Rut)):
converted_value = value
else:
try:
converted_value = Rut(value, validate_dv=False)
except (AttributeError, TypeError, ValueError):
raise django.core.exceptions.ValidationError(self.error_messages['invalid'], code='invalid', params={'value': value})
return converted_value | def to_python(self, value: Optional[object]) -> Optional[Rut]:
"\n Convert the input value to the correct Python object (:class:`Rut`).\n\n > It acts as the reverse of :meth:`value_to_string`, and is also\n called in :meth`clean`.\n\n It needs to be able to process ``None``.\n\n .. seealso::\n https://docs.djangoproject.com/en/2.1/howto/custom-model-fields/#converting-values-to-python-objects\n https://docs.djangoproject.com/en/2.1/ref/models/fields/#django.db.models.Field.to_python\n\n :raises django.core.exceptions.ValidationError:\n if the data can't be converted\n\n "
if ((value is None) or isinstance(value, Rut)):
converted_value = value
else:
try:
converted_value = Rut(value, validate_dv=False)
except (AttributeError, TypeError, ValueError):
raise django.core.exceptions.ValidationError(self.error_messages['invalid'], code='invalid', params={'value': value})
return converted_value<|docstring|>Convert the input value to the correct Python object (:class:`Rut`).
> It acts as the reverse of :meth:`value_to_string`, and is also
called in :meth`clean`.
It needs to be able to process ``None``.
.. seealso::
https://docs.djangoproject.com/en/2.1/howto/custom-model-fields/#converting-values-to-python-objects
https://docs.djangoproject.com/en/2.1/ref/models/fields/#django.db.models.Field.to_python
:raises django.core.exceptions.ValidationError:
if the data can't be converted<|endoftext|> |
c25ca2175ae69d16ef64b7553df816e29581a651a3b5e80b2ce8f750efdbfdd4 | def value_to_string(self, obj: django.db.models.Model) -> str:
'\n Convert to a string the field value of model instance``obj``.\n\n Used to serialize the value of the field.\n\n .. seealso::\n https://docs.djangoproject.com/en/2.1/howto/custom-model-fields/#converting-field-data-for-serialization\n\n '
value: Optional[Rut] = self.value_from_object(obj)
return ('' if (value is None) else value.canonical) | Convert to a string the field value of model instance``obj``.
Used to serialize the value of the field.
.. seealso::
https://docs.djangoproject.com/en/2.1/howto/custom-model-fields/#converting-field-data-for-serialization | cl_sii/extras/dj_model_fields.py | value_to_string | fyntex/lib-cl-sii-python | 8 | python | def value_to_string(self, obj: django.db.models.Model) -> str:
'\n Convert to a string the field value of model instance``obj``.\n\n Used to serialize the value of the field.\n\n .. seealso::\n https://docs.djangoproject.com/en/2.1/howto/custom-model-fields/#converting-field-data-for-serialization\n\n '
value: Optional[Rut] = self.value_from_object(obj)
return ( if (value is None) else value.canonical) | def value_to_string(self, obj: django.db.models.Model) -> str:
'\n Convert to a string the field value of model instance``obj``.\n\n Used to serialize the value of the field.\n\n .. seealso::\n https://docs.djangoproject.com/en/2.1/howto/custom-model-fields/#converting-field-data-for-serialization\n\n '
value: Optional[Rut] = self.value_from_object(obj)
return ( if (value is None) else value.canonical)<|docstring|>Convert to a string the field value of model instance``obj``.
Used to serialize the value of the field.
.. seealso::
https://docs.djangoproject.com/en/2.1/howto/custom-model-fields/#converting-field-data-for-serialization<|endoftext|> |
dcab9de67823a23aabc7a2349c61f419d2714f53d9a2f2e14a4198d56e15899e | def test_basic_addition(self):
'\n Complex mathematical proof.\n '
self.assertEqual((1 + 1), 2) | Complex mathematical proof. | frepple/custom/customer/tests.py | test_basic_addition | ursais/frepple-template | 0 | python | def test_basic_addition(self):
'\n \n '
self.assertEqual((1 + 1), 2) | def test_basic_addition(self):
'\n \n '
self.assertEqual((1 + 1), 2)<|docstring|>Complex mathematical proof.<|endoftext|> |
80dae8c48ded253ae112fc4e8407a155f5278ffbdcd42259395098856c4a7cf5 | def tlist_grouped(type, group_type=False, dbif=None):
"List of temporal elements grouped by mapsets.\n\n Returns a dictionary where the keys are mapset\n names and the values are lists of space time datasets in that\n mapset. Example:\n\n .. code-block:: python\n\n >>> import grass.temporalas tgis\n >>> tgis.tlist_grouped('strds')['PERMANENT']\n ['precipitation', 'temperature']\n\n :param type: element type (strds, str3ds, stvds)\n :param group_type: TBD\n\n :return: directory of mapsets/elements\n "
result = {}
(dbif, connected) = init_dbif(dbif)
mapset = None
if (type == 'stds'):
types = ['strds', 'str3ds', 'stvds']
else:
types = [type]
for type in types:
try:
tlist_result = tlist(type=type, dbif=dbif)
except gscript.ScriptError as e:
warning(e)
continue
for line in tlist_result:
try:
(name, mapset) = line.split('@')
except ValueError:
warning((_("Invalid element '%s'") % line))
continue
if (mapset not in result):
if group_type:
result[mapset] = {}
else:
result[mapset] = []
if group_type:
if (type in result[mapset]):
result[mapset][type].append(name)
else:
result[mapset][type] = [name]
else:
result[mapset].append(name)
if (connected is True):
dbif.close()
return result | List of temporal elements grouped by mapsets.
Returns a dictionary where the keys are mapset
names and the values are lists of space time datasets in that
mapset. Example:
.. code-block:: python
>>> import grass.temporalas tgis
>>> tgis.tlist_grouped('strds')['PERMANENT']
['precipitation', 'temperature']
:param type: element type (strds, str3ds, stvds)
:param group_type: TBD
:return: directory of mapsets/elements | temporal/gui_support.py | tlist_grouped | karttur/geoimagine03-grass | 0 | python | def tlist_grouped(type, group_type=False, dbif=None):
"List of temporal elements grouped by mapsets.\n\n Returns a dictionary where the keys are mapset\n names and the values are lists of space time datasets in that\n mapset. Example:\n\n .. code-block:: python\n\n >>> import grass.temporalas tgis\n >>> tgis.tlist_grouped('strds')['PERMANENT']\n ['precipitation', 'temperature']\n\n :param type: element type (strds, str3ds, stvds)\n :param group_type: TBD\n\n :return: directory of mapsets/elements\n "
result = {}
(dbif, connected) = init_dbif(dbif)
mapset = None
if (type == 'stds'):
types = ['strds', 'str3ds', 'stvds']
else:
types = [type]
for type in types:
try:
tlist_result = tlist(type=type, dbif=dbif)
except gscript.ScriptError as e:
warning(e)
continue
for line in tlist_result:
try:
(name, mapset) = line.split('@')
except ValueError:
warning((_("Invalid element '%s'") % line))
continue
if (mapset not in result):
if group_type:
result[mapset] = {}
else:
result[mapset] = []
if group_type:
if (type in result[mapset]):
result[mapset][type].append(name)
else:
result[mapset][type] = [name]
else:
result[mapset].append(name)
if (connected is True):
dbif.close()
return result | def tlist_grouped(type, group_type=False, dbif=None):
"List of temporal elements grouped by mapsets.\n\n Returns a dictionary where the keys are mapset\n names and the values are lists of space time datasets in that\n mapset. Example:\n\n .. code-block:: python\n\n >>> import grass.temporalas tgis\n >>> tgis.tlist_grouped('strds')['PERMANENT']\n ['precipitation', 'temperature']\n\n :param type: element type (strds, str3ds, stvds)\n :param group_type: TBD\n\n :return: directory of mapsets/elements\n "
result = {}
(dbif, connected) = init_dbif(dbif)
mapset = None
if (type == 'stds'):
types = ['strds', 'str3ds', 'stvds']
else:
types = [type]
for type in types:
try:
tlist_result = tlist(type=type, dbif=dbif)
except gscript.ScriptError as e:
warning(e)
continue
for line in tlist_result:
try:
(name, mapset) = line.split('@')
except ValueError:
warning((_("Invalid element '%s'") % line))
continue
if (mapset not in result):
if group_type:
result[mapset] = {}
else:
result[mapset] = []
if group_type:
if (type in result[mapset]):
result[mapset][type].append(name)
else:
result[mapset][type] = [name]
else:
result[mapset].append(name)
if (connected is True):
dbif.close()
return result<|docstring|>List of temporal elements grouped by mapsets.
Returns a dictionary where the keys are mapset
names and the values are lists of space time datasets in that
mapset. Example:
.. code-block:: python
>>> import grass.temporalas tgis
>>> tgis.tlist_grouped('strds')['PERMANENT']
['precipitation', 'temperature']
:param type: element type (strds, str3ds, stvds)
:param group_type: TBD
:return: directory of mapsets/elements<|endoftext|> |
f07acde8a96718e5e1335cf84bb5596e74621ce3c79c3496361bcfd3add31291 | def tlist(type, dbif=None):
'Return a list of space time datasets of absolute and relative time\n\n :param type: element type (strds, str3ds, stvds)\n\n :return: a list of space time dataset ids\n '
id = None
sp = dataset_factory(type, id)
(dbif, connected) = init_dbif(dbif)
mapsets = get_available_temporal_mapsets()
output = []
temporal_type = ['absolute', 'relative']
for type in temporal_type:
for mapset in mapsets.keys():
if (type == 'absolute'):
table = (sp.get_type() + '_view_abs_time')
else:
table = (sp.get_type() + '_view_rel_time')
sql = ('SELECT id FROM ' + table)
sql += (" WHERE mapset = '%s'" % mapset)
sql += ' ORDER BY id'
dbif.execute(sql, mapset=mapset)
rows = dbif.fetchall(mapset=mapset)
for row in rows:
for col in row:
output.append(str(col))
if (connected is True):
dbif.close()
return output | Return a list of space time datasets of absolute and relative time
:param type: element type (strds, str3ds, stvds)
:return: a list of space time dataset ids | temporal/gui_support.py | tlist | karttur/geoimagine03-grass | 0 | python | def tlist(type, dbif=None):
'Return a list of space time datasets of absolute and relative time\n\n :param type: element type (strds, str3ds, stvds)\n\n :return: a list of space time dataset ids\n '
id = None
sp = dataset_factory(type, id)
(dbif, connected) = init_dbif(dbif)
mapsets = get_available_temporal_mapsets()
output = []
temporal_type = ['absolute', 'relative']
for type in temporal_type:
for mapset in mapsets.keys():
if (type == 'absolute'):
table = (sp.get_type() + '_view_abs_time')
else:
table = (sp.get_type() + '_view_rel_time')
sql = ('SELECT id FROM ' + table)
sql += (" WHERE mapset = '%s'" % mapset)
sql += ' ORDER BY id'
dbif.execute(sql, mapset=mapset)
rows = dbif.fetchall(mapset=mapset)
for row in rows:
for col in row:
output.append(str(col))
if (connected is True):
dbif.close()
return output | def tlist(type, dbif=None):
'Return a list of space time datasets of absolute and relative time\n\n :param type: element type (strds, str3ds, stvds)\n\n :return: a list of space time dataset ids\n '
id = None
sp = dataset_factory(type, id)
(dbif, connected) = init_dbif(dbif)
mapsets = get_available_temporal_mapsets()
output = []
temporal_type = ['absolute', 'relative']
for type in temporal_type:
for mapset in mapsets.keys():
if (type == 'absolute'):
table = (sp.get_type() + '_view_abs_time')
else:
table = (sp.get_type() + '_view_rel_time')
sql = ('SELECT id FROM ' + table)
sql += (" WHERE mapset = '%s'" % mapset)
sql += ' ORDER BY id'
dbif.execute(sql, mapset=mapset)
rows = dbif.fetchall(mapset=mapset)
for row in rows:
for col in row:
output.append(str(col))
if (connected is True):
dbif.close()
return output<|docstring|>Return a list of space time datasets of absolute and relative time
:param type: element type (strds, str3ds, stvds)
:return: a list of space time dataset ids<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.