body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
6b4e25695b40673bfcf1bc66a8fa5f29e3a7528caca966a2f23a8727eadd40f8 | def get_data_magnification(self, bad=False):
'\n Calculates the model magnification for each data point.\n\n Arguments :\n bad: *boolean*\n If *True*, calculates the magnification for all points.\n If *False*, only calculates the magnification for good data\n points. Values for bad data points are set to 0. Default is\n *False*.\n\n Returns :\n data_magnification: *np.ndarray*\n The model magnification evaluated for each datapoint. If there\n is more than one source, the magnification of each source is\n reported separately.\n '
self._calculate_magnifications(bad=bad)
return self._data_magnification | Calculates the model magnification for each data point.
Arguments :
bad: *boolean*
If *True*, calculates the magnification for all points.
If *False*, only calculates the magnification for good data
points. Values for bad data points are set to 0. Default is
*False*.
Returns :
data_magnification: *np.ndarray*
The model magnification evaluated for each datapoint. If there
is more than one source, the magnification of each source is
reported separately. | source/MulensModel/fitdata.py | get_data_magnification | rpoleski/MulensModel | 30 | python | def get_data_magnification(self, bad=False):
'\n Calculates the model magnification for each data point.\n\n Arguments :\n bad: *boolean*\n If *True*, calculates the magnification for all points.\n If *False*, only calculates the magnification for good data\n points. Values for bad data points are set to 0. Default is\n *False*.\n\n Returns :\n data_magnification: *np.ndarray*\n The model magnification evaluated for each datapoint. If there\n is more than one source, the magnification of each source is\n reported separately.\n '
self._calculate_magnifications(bad=bad)
return self._data_magnification | def get_data_magnification(self, bad=False):
'\n Calculates the model magnification for each data point.\n\n Arguments :\n bad: *boolean*\n If *True*, calculates the magnification for all points.\n If *False*, only calculates the magnification for good data\n points. Values for bad data points are set to 0. Default is\n *False*.\n\n Returns :\n data_magnification: *np.ndarray*\n The model magnification evaluated for each datapoint. If there\n is more than one source, the magnification of each source is\n reported separately.\n '
self._calculate_magnifications(bad=bad)
return self._data_magnification<|docstring|>Calculates the model magnification for each data point.
Arguments :
bad: *boolean*
If *True*, calculates the magnification for all points.
If *False*, only calculates the magnification for good data
points. Values for bad data points are set to 0. Default is
*False*.
Returns :
data_magnification: *np.ndarray*
The model magnification evaluated for each datapoint. If there
is more than one source, the magnification of each source is
reported separately.<|endoftext|> |
15dd0ded6046238c6d9428e585a0a9dc6f548f893790d5d96fb5d3a1d547d702 | def get_model_fluxes(self, bad=False):
'\n Calculate model in flux space.\n\n Keywords :\n bad: *bool*\n Default is *False*. If *True* recalculates the data\n magnification for each point to ensure that the values\n for bad datapoints are calculated (otherwise, they are set to\n the magnitude of the blend).\n\n Returns :\n model_flux: *np.ndarray*\n The model flux evaluated for each datapoint.\n '
if (self.source_fluxes is None):
raise AttributeError(('you need to run FitData.fit_fluxes() first to execute the' + 'linear fit.'))
if bad:
self._calculate_magnifications(bad=True)
model_flux = (np.ones(self._dataset.n_epochs) * self.blend_flux)
if (self._model.n_sources == 1):
model_flux += (self.source_flux * self._data_magnification)
else:
for i in range(self._model.n_sources):
model_flux += (self.source_fluxes[i] * self._data_magnification[i])
return model_flux | Calculate model in flux space.
Keywords :
bad: *bool*
Default is *False*. If *True* recalculates the data
magnification for each point to ensure that the values
for bad datapoints are calculated (otherwise, they are set to
the magnitude of the blend).
Returns :
model_flux: *np.ndarray*
The model flux evaluated for each datapoint. | source/MulensModel/fitdata.py | get_model_fluxes | rpoleski/MulensModel | 30 | python | def get_model_fluxes(self, bad=False):
'\n Calculate model in flux space.\n\n Keywords :\n bad: *bool*\n Default is *False*. If *True* recalculates the data\n magnification for each point to ensure that the values\n for bad datapoints are calculated (otherwise, they are set to\n the magnitude of the blend).\n\n Returns :\n model_flux: *np.ndarray*\n The model flux evaluated for each datapoint.\n '
if (self.source_fluxes is None):
raise AttributeError(('you need to run FitData.fit_fluxes() first to execute the' + 'linear fit.'))
if bad:
self._calculate_magnifications(bad=True)
model_flux = (np.ones(self._dataset.n_epochs) * self.blend_flux)
if (self._model.n_sources == 1):
model_flux += (self.source_flux * self._data_magnification)
else:
for i in range(self._model.n_sources):
model_flux += (self.source_fluxes[i] * self._data_magnification[i])
return model_flux | def get_model_fluxes(self, bad=False):
'\n Calculate model in flux space.\n\n Keywords :\n bad: *bool*\n Default is *False*. If *True* recalculates the data\n magnification for each point to ensure that the values\n for bad datapoints are calculated (otherwise, they are set to\n the magnitude of the blend).\n\n Returns :\n model_flux: *np.ndarray*\n The model flux evaluated for each datapoint.\n '
if (self.source_fluxes is None):
raise AttributeError(('you need to run FitData.fit_fluxes() first to execute the' + 'linear fit.'))
if bad:
self._calculate_magnifications(bad=True)
model_flux = (np.ones(self._dataset.n_epochs) * self.blend_flux)
if (self._model.n_sources == 1):
model_flux += (self.source_flux * self._data_magnification)
else:
for i in range(self._model.n_sources):
model_flux += (self.source_fluxes[i] * self._data_magnification[i])
return model_flux<|docstring|>Calculate model in flux space.
Keywords :
bad: *bool*
Default is *False*. If *True* recalculates the data
magnification for each point to ensure that the values
for bad datapoints are calculated (otherwise, they are set to
the magnitude of the blend).
Returns :
model_flux: *np.ndarray*
The model flux evaluated for each datapoint.<|endoftext|> |
3464ae98c57670868c6970f15cf27fe9950758269ceb7ce79bf375a3249828d3 | def get_model_magnitudes(self, **kwargs):
'\n Calculate model in magnitude space\n\n Arguments :\n ``**kwargs``:\n see :py:func:`get_model_fluxes()`\n\n Returns :\n model_mag: *np.ndarray*\n The model magnitude evaluated for each datapoint.\n '
model_flux = self.get_model_fluxes(**kwargs)
model_mag = Utils.get_mag_from_flux(model_flux)
return model_mag | Calculate model in magnitude space
Arguments :
``**kwargs``:
see :py:func:`get_model_fluxes()`
Returns :
model_mag: *np.ndarray*
The model magnitude evaluated for each datapoint. | source/MulensModel/fitdata.py | get_model_magnitudes | rpoleski/MulensModel | 30 | python | def get_model_magnitudes(self, **kwargs):
'\n Calculate model in magnitude space\n\n Arguments :\n ``**kwargs``:\n see :py:func:`get_model_fluxes()`\n\n Returns :\n model_mag: *np.ndarray*\n The model magnitude evaluated for each datapoint.\n '
model_flux = self.get_model_fluxes(**kwargs)
model_mag = Utils.get_mag_from_flux(model_flux)
return model_mag | def get_model_magnitudes(self, **kwargs):
'\n Calculate model in magnitude space\n\n Arguments :\n ``**kwargs``:\n see :py:func:`get_model_fluxes()`\n\n Returns :\n model_mag: *np.ndarray*\n The model magnitude evaluated for each datapoint.\n '
model_flux = self.get_model_fluxes(**kwargs)
model_mag = Utils.get_mag_from_flux(model_flux)
return model_mag<|docstring|>Calculate model in magnitude space
Arguments :
``**kwargs``:
see :py:func:`get_model_fluxes()`
Returns :
model_mag: *np.ndarray*
The model magnitude evaluated for each datapoint.<|endoftext|> |
367fe883a5ed827e07862af5c0d43b255ba489c36958065101d9ee9f96103889 | def scale_fluxes(self, source_flux, blend_flux):
'\n Rescale the data fluxes to an arbitrary flux scale:\n flux = source_flux_0 * (data.flux - blend_flux) / source_flux\n flux += blend_flux_0\n err_flux = source_flux_0 * data.err_flux / source_flux\n\n Arguments :\n source_flux: *float*, *list*, *np.array*\n Flux of the source in the desired system. If n_sources > 1 and\n source_flux has more than one element, the elements are\n summed to produce the overall scaling flux.\n\n blend_flux: *float*\n Flux of the blend in the desired system\n\n Returns :\n flux: *np.ndarray*\n Fluxes from the data rescaled to the desired system.\n\n err_flux: *np.ndarray*\n Uncertainties of fluxes from the data rescaled to the desired\n system.\n '
if (self.model.n_sources == 1):
data_source_flux = self.source_flux
else:
data_source_flux = np.sum(self.source_fluxes)
if (len(source_flux) > 1):
source_flux = np.sum(source_flux)
flux = (source_flux * (self._dataset.flux - self.blend_flux))
flux /= data_source_flux
flux += blend_flux
err_flux = ((source_flux * self._dataset.err_flux) / data_source_flux)
return (flux, err_flux) | Rescale the data fluxes to an arbitrary flux scale:
flux = source_flux_0 * (data.flux - blend_flux) / source_flux
flux += blend_flux_0
err_flux = source_flux_0 * data.err_flux / source_flux
Arguments :
source_flux: *float*, *list*, *np.array*
Flux of the source in the desired system. If n_sources > 1 and
source_flux has more than one element, the elements are
summed to produce the overall scaling flux.
blend_flux: *float*
Flux of the blend in the desired system
Returns :
flux: *np.ndarray*
Fluxes from the data rescaled to the desired system.
err_flux: *np.ndarray*
Uncertainties of fluxes from the data rescaled to the desired
system. | source/MulensModel/fitdata.py | scale_fluxes | rpoleski/MulensModel | 30 | python | def scale_fluxes(self, source_flux, blend_flux):
'\n Rescale the data fluxes to an arbitrary flux scale:\n flux = source_flux_0 * (data.flux - blend_flux) / source_flux\n flux += blend_flux_0\n err_flux = source_flux_0 * data.err_flux / source_flux\n\n Arguments :\n source_flux: *float*, *list*, *np.array*\n Flux of the source in the desired system. If n_sources > 1 and\n source_flux has more than one element, the elements are\n summed to produce the overall scaling flux.\n\n blend_flux: *float*\n Flux of the blend in the desired system\n\n Returns :\n flux: *np.ndarray*\n Fluxes from the data rescaled to the desired system.\n\n err_flux: *np.ndarray*\n Uncertainties of fluxes from the data rescaled to the desired\n system.\n '
if (self.model.n_sources == 1):
data_source_flux = self.source_flux
else:
data_source_flux = np.sum(self.source_fluxes)
if (len(source_flux) > 1):
source_flux = np.sum(source_flux)
flux = (source_flux * (self._dataset.flux - self.blend_flux))
flux /= data_source_flux
flux += blend_flux
err_flux = ((source_flux * self._dataset.err_flux) / data_source_flux)
return (flux, err_flux) | def scale_fluxes(self, source_flux, blend_flux):
'\n Rescale the data fluxes to an arbitrary flux scale:\n flux = source_flux_0 * (data.flux - blend_flux) / source_flux\n flux += blend_flux_0\n err_flux = source_flux_0 * data.err_flux / source_flux\n\n Arguments :\n source_flux: *float*, *list*, *np.array*\n Flux of the source in the desired system. If n_sources > 1 and\n source_flux has more than one element, the elements are\n summed to produce the overall scaling flux.\n\n blend_flux: *float*\n Flux of the blend in the desired system\n\n Returns :\n flux: *np.ndarray*\n Fluxes from the data rescaled to the desired system.\n\n err_flux: *np.ndarray*\n Uncertainties of fluxes from the data rescaled to the desired\n system.\n '
if (self.model.n_sources == 1):
data_source_flux = self.source_flux
else:
data_source_flux = np.sum(self.source_fluxes)
if (len(source_flux) > 1):
source_flux = np.sum(source_flux)
flux = (source_flux * (self._dataset.flux - self.blend_flux))
flux /= data_source_flux
flux += blend_flux
err_flux = ((source_flux * self._dataset.err_flux) / data_source_flux)
return (flux, err_flux)<|docstring|>Rescale the data fluxes to an arbitrary flux scale:
flux = source_flux_0 * (data.flux - blend_flux) / source_flux
flux += blend_flux_0
err_flux = source_flux_0 * data.err_flux / source_flux
Arguments :
source_flux: *float*, *list*, *np.array*
Flux of the source in the desired system. If n_sources > 1 and
source_flux has more than one element, the elements are
summed to produce the overall scaling flux.
blend_flux: *float*
Flux of the blend in the desired system
Returns :
flux: *np.ndarray*
Fluxes from the data rescaled to the desired system.
err_flux: *np.ndarray*
Uncertainties of fluxes from the data rescaled to the desired
system.<|endoftext|> |
c1fe51b61c158e493410f050b788e0cecc37cfcd2ce2f46bf4087c2b1faa5079 | def get_residuals(self, phot_fmt=None, source_flux=None, blend_flux=None, bad=False, type=None):
'\n Calculate the residuals for each datapoint relative to the model.\n\n Keywords :\n phot_fmt: *str*, optional\n specify whether the residuals should be returned in\n magnitudes (\'mag\') or in flux (\'flux\'). Default is\n \'mag\'. If \'scaled\', will return the residuals in magnitudes\n scaled to source_flux, blend_flux.\n\n source_flux, blend_flux: *float*\n reference source and blend fluxes for scaling the residuals\n\n bad: *bool*\n Default is *False*. If *True* recalculates the data\n magnification for each point to ensure that there are values\n even for bad datapoints.\n\n type: \n DEPRECATED, see "phot_fmt" above.\n\n Returns :\n residuals: *np.ndarray*\n the residuals for the corresponding dataset.\n\n errorbars: *np.ndarray*\n the scaled errorbars for each point. For plotting\n errorbars for the residuals.\n '
if (type is not None):
if (type == 'mag'):
warnings.warn(('"mag" returns residuals in the original data flux' + 'system. To scale the residuals, use "scaled".'))
warnings.warn('type keyword will be deprecated. Use "phot_fmt" instead.', FutureWarning)
phot_fmt = type
if bad:
self._calculate_magnifications(bad=True)
if (phot_fmt == 'mag'):
residuals = (self._dataset.mag - self.get_model_magnitudes())
errorbars = self._dataset.err_mag
elif (phot_fmt == 'flux'):
residuals = (self._dataset.flux - self.get_model_fluxes())
errorbars = self._dataset.err_flux
elif (phot_fmt == 'scaled'):
if ((source_flux is None) or (blend_flux is None)):
raise ValueError(('If phot_fmt=scaled, source_flux and blend_flux must ' + 'also be specified.'))
magnification = self._data_magnification
if (self._model.n_sources == 1):
model_flux = (source_flux * magnification)
else:
model_flux = (source_flux[0] * magnification[0])
model_flux += (source_flux[1] * magnification[1])
model_flux += blend_flux
model_mag = Utils.get_mag_from_flux(model_flux)
(flux, err_flux) = self.scale_fluxes(source_flux, blend_flux)
(mag, errorbars) = Utils.get_mag_and_err_from_flux(flux, err_flux)
residuals = (mag - model_mag)
else:
raise ValueError(('phot_fmt must be one of "mag", "flux", or "scaled". Your ' + 'value: {0}'.format(phot_fmt)))
return (residuals, errorbars) | Calculate the residuals for each datapoint relative to the model.
Keywords :
phot_fmt: *str*, optional
specify whether the residuals should be returned in
magnitudes ('mag') or in flux ('flux'). Default is
'mag'. If 'scaled', will return the residuals in magnitudes
scaled to source_flux, blend_flux.
source_flux, blend_flux: *float*
reference source and blend fluxes for scaling the residuals
bad: *bool*
Default is *False*. If *True* recalculates the data
magnification for each point to ensure that there are values
even for bad datapoints.
type:
DEPRECATED, see "phot_fmt" above.
Returns :
residuals: *np.ndarray*
the residuals for the corresponding dataset.
errorbars: *np.ndarray*
the scaled errorbars for each point. For plotting
errorbars for the residuals. | source/MulensModel/fitdata.py | get_residuals | rpoleski/MulensModel | 30 | python | def get_residuals(self, phot_fmt=None, source_flux=None, blend_flux=None, bad=False, type=None):
'\n Calculate the residuals for each datapoint relative to the model.\n\n Keywords :\n phot_fmt: *str*, optional\n specify whether the residuals should be returned in\n magnitudes (\'mag\') or in flux (\'flux\'). Default is\n \'mag\'. If \'scaled\', will return the residuals in magnitudes\n scaled to source_flux, blend_flux.\n\n source_flux, blend_flux: *float*\n reference source and blend fluxes for scaling the residuals\n\n bad: *bool*\n Default is *False*. If *True* recalculates the data\n magnification for each point to ensure that there are values\n even for bad datapoints.\n\n type: \n DEPRECATED, see "phot_fmt" above.\n\n Returns :\n residuals: *np.ndarray*\n the residuals for the corresponding dataset.\n\n errorbars: *np.ndarray*\n the scaled errorbars for each point. For plotting\n errorbars for the residuals.\n '
if (type is not None):
if (type == 'mag'):
warnings.warn(('"mag" returns residuals in the original data flux' + 'system. To scale the residuals, use "scaled".'))
warnings.warn('type keyword will be deprecated. Use "phot_fmt" instead.', FutureWarning)
phot_fmt = type
if bad:
self._calculate_magnifications(bad=True)
if (phot_fmt == 'mag'):
residuals = (self._dataset.mag - self.get_model_magnitudes())
errorbars = self._dataset.err_mag
elif (phot_fmt == 'flux'):
residuals = (self._dataset.flux - self.get_model_fluxes())
errorbars = self._dataset.err_flux
elif (phot_fmt == 'scaled'):
if ((source_flux is None) or (blend_flux is None)):
raise ValueError(('If phot_fmt=scaled, source_flux and blend_flux must ' + 'also be specified.'))
magnification = self._data_magnification
if (self._model.n_sources == 1):
model_flux = (source_flux * magnification)
else:
model_flux = (source_flux[0] * magnification[0])
model_flux += (source_flux[1] * magnification[1])
model_flux += blend_flux
model_mag = Utils.get_mag_from_flux(model_flux)
(flux, err_flux) = self.scale_fluxes(source_flux, blend_flux)
(mag, errorbars) = Utils.get_mag_and_err_from_flux(flux, err_flux)
residuals = (mag - model_mag)
else:
raise ValueError(('phot_fmt must be one of "mag", "flux", or "scaled". Your ' + 'value: {0}'.format(phot_fmt)))
return (residuals, errorbars) | def get_residuals(self, phot_fmt=None, source_flux=None, blend_flux=None, bad=False, type=None):
'\n Calculate the residuals for each datapoint relative to the model.\n\n Keywords :\n phot_fmt: *str*, optional\n specify whether the residuals should be returned in\n magnitudes (\'mag\') or in flux (\'flux\'). Default is\n \'mag\'. If \'scaled\', will return the residuals in magnitudes\n scaled to source_flux, blend_flux.\n\n source_flux, blend_flux: *float*\n reference source and blend fluxes for scaling the residuals\n\n bad: *bool*\n Default is *False*. If *True* recalculates the data\n magnification for each point to ensure that there are values\n even for bad datapoints.\n\n type: \n DEPRECATED, see "phot_fmt" above.\n\n Returns :\n residuals: *np.ndarray*\n the residuals for the corresponding dataset.\n\n errorbars: *np.ndarray*\n the scaled errorbars for each point. For plotting\n errorbars for the residuals.\n '
if (type is not None):
if (type == 'mag'):
warnings.warn(('"mag" returns residuals in the original data flux' + 'system. To scale the residuals, use "scaled".'))
warnings.warn('type keyword will be deprecated. Use "phot_fmt" instead.', FutureWarning)
phot_fmt = type
if bad:
self._calculate_magnifications(bad=True)
if (phot_fmt == 'mag'):
residuals = (self._dataset.mag - self.get_model_magnitudes())
errorbars = self._dataset.err_mag
elif (phot_fmt == 'flux'):
residuals = (self._dataset.flux - self.get_model_fluxes())
errorbars = self._dataset.err_flux
elif (phot_fmt == 'scaled'):
if ((source_flux is None) or (blend_flux is None)):
raise ValueError(('If phot_fmt=scaled, source_flux and blend_flux must ' + 'also be specified.'))
magnification = self._data_magnification
if (self._model.n_sources == 1):
model_flux = (source_flux * magnification)
else:
model_flux = (source_flux[0] * magnification[0])
model_flux += (source_flux[1] * magnification[1])
model_flux += blend_flux
model_mag = Utils.get_mag_from_flux(model_flux)
(flux, err_flux) = self.scale_fluxes(source_flux, blend_flux)
(mag, errorbars) = Utils.get_mag_and_err_from_flux(flux, err_flux)
residuals = (mag - model_mag)
else:
raise ValueError(('phot_fmt must be one of "mag", "flux", or "scaled". Your ' + 'value: {0}'.format(phot_fmt)))
return (residuals, errorbars)<|docstring|>Calculate the residuals for each datapoint relative to the model.
Keywords :
phot_fmt: *str*, optional
specify whether the residuals should be returned in
magnitudes ('mag') or in flux ('flux'). Default is
'mag'. If 'scaled', will return the residuals in magnitudes
scaled to source_flux, blend_flux.
source_flux, blend_flux: *float*
reference source and blend fluxes for scaling the residuals
bad: *bool*
Default is *False*. If *True* recalculates the data
magnification for each point to ensure that there are values
even for bad datapoints.
type:
DEPRECATED, see "phot_fmt" above.
Returns :
residuals: *np.ndarray*
the residuals for the corresponding dataset.
errorbars: *np.ndarray*
the scaled errorbars for each point. For plotting
errorbars for the residuals.<|endoftext|> |
a8e1a4cee9dd6bd5c2a188d008b1f8e1eb1eec1045630de0b5632063739666bf | def _check_for_gradient_implementation(self, parameters):
' Check that the gradient methods are implemented for the requested\n values. '
if (not isinstance(parameters, list)):
parameters = [parameters]
implemented = {'t_0', 't_E', 'u_0', 't_eff', 'pi_E_N', 'pi_E_E'}
if (len((set(parameters) - implemented)) > 0):
raise NotImplementedError(('chi^2 gradient is implemented only for {:}\nCannot work ' + 'with {:}').format(implemented, parameters))
gradient = {param: 0 for param in parameters}
if (self.model.n_lenses != 1):
raise NotImplementedError('chi2_gradient() only implemented for single lens models')
if (('rho' in parameters) or ('t_star' in parameters)):
as_dict = self.model.parameters.as_dict()
if (('rho' in as_dict) or ('t_star' in as_dict)):
raise NotImplementedError(('Event.chi2_gradient() is not working ' + 'for finite source models yet')) | Check that the gradient methods are implemented for the requested
values. | source/MulensModel/fitdata.py | _check_for_gradient_implementation | rpoleski/MulensModel | 30 | python | def _check_for_gradient_implementation(self, parameters):
' Check that the gradient methods are implemented for the requested\n values. '
if (not isinstance(parameters, list)):
parameters = [parameters]
implemented = {'t_0', 't_E', 'u_0', 't_eff', 'pi_E_N', 'pi_E_E'}
if (len((set(parameters) - implemented)) > 0):
raise NotImplementedError(('chi^2 gradient is implemented only for {:}\nCannot work ' + 'with {:}').format(implemented, parameters))
gradient = {param: 0 for param in parameters}
if (self.model.n_lenses != 1):
raise NotImplementedError('chi2_gradient() only implemented for single lens models')
if (('rho' in parameters) or ('t_star' in parameters)):
as_dict = self.model.parameters.as_dict()
if (('rho' in as_dict) or ('t_star' in as_dict)):
raise NotImplementedError(('Event.chi2_gradient() is not working ' + 'for finite source models yet')) | def _check_for_gradient_implementation(self, parameters):
' Check that the gradient methods are implemented for the requested\n values. '
if (not isinstance(parameters, list)):
parameters = [parameters]
implemented = {'t_0', 't_E', 'u_0', 't_eff', 'pi_E_N', 'pi_E_E'}
if (len((set(parameters) - implemented)) > 0):
raise NotImplementedError(('chi^2 gradient is implemented only for {:}\nCannot work ' + 'with {:}').format(implemented, parameters))
gradient = {param: 0 for param in parameters}
if (self.model.n_lenses != 1):
raise NotImplementedError('chi2_gradient() only implemented for single lens models')
if (('rho' in parameters) or ('t_star' in parameters)):
as_dict = self.model.parameters.as_dict()
if (('rho' in as_dict) or ('t_star' in as_dict)):
raise NotImplementedError(('Event.chi2_gradient() is not working ' + 'for finite source models yet'))<|docstring|>Check that the gradient methods are implemented for the requested
values.<|endoftext|> |
a46a08d94f802dd8b85fc229a2344b73024f392c9a9b3f861a3173a8897a08f2 | def get_chi2_gradient(self, parameters):
'\n Fits fluxes and calculates chi^2 gradient (also called Jacobian), i.e.,\n :math:`d chi^2/d parameter`.\n\n Parameters :\n parameters: *str* or *list*, required\n Parameters with respect to which gradient is calculated.\n Currently accepted parameters are: ``t_0``, ``u_0``, ``t_eff``,\n ``t_E``, ``pi_E_N``, and ``pi_E_E``. The parameters for\n which you request gradient must be defined in py:attr:`~model`.\n\n Returns :\n gradient: *float* or *np.ndarray*\n chi^2 gradient\n '
self.fit_fluxes()
self.calculate_chi2_gradient(parameters)
return self.chi2_gradient | Fits fluxes and calculates chi^2 gradient (also called Jacobian), i.e.,
:math:`d chi^2/d parameter`.
Parameters :
parameters: *str* or *list*, required
Parameters with respect to which gradient is calculated.
Currently accepted parameters are: ``t_0``, ``u_0``, ``t_eff``,
``t_E``, ``pi_E_N``, and ``pi_E_E``. The parameters for
which you request gradient must be defined in py:attr:`~model`.
Returns :
gradient: *float* or *np.ndarray*
chi^2 gradient | source/MulensModel/fitdata.py | get_chi2_gradient | rpoleski/MulensModel | 30 | python | def get_chi2_gradient(self, parameters):
'\n Fits fluxes and calculates chi^2 gradient (also called Jacobian), i.e.,\n :math:`d chi^2/d parameter`.\n\n Parameters :\n parameters: *str* or *list*, required\n Parameters with respect to which gradient is calculated.\n Currently accepted parameters are: ``t_0``, ``u_0``, ``t_eff``,\n ``t_E``, ``pi_E_N``, and ``pi_E_E``. The parameters for\n which you request gradient must be defined in py:attr:`~model`.\n\n Returns :\n gradient: *float* or *np.ndarray*\n chi^2 gradient\n '
self.fit_fluxes()
self.calculate_chi2_gradient(parameters)
return self.chi2_gradient | def get_chi2_gradient(self, parameters):
'\n Fits fluxes and calculates chi^2 gradient (also called Jacobian), i.e.,\n :math:`d chi^2/d parameter`.\n\n Parameters :\n parameters: *str* or *list*, required\n Parameters with respect to which gradient is calculated.\n Currently accepted parameters are: ``t_0``, ``u_0``, ``t_eff``,\n ``t_E``, ``pi_E_N``, and ``pi_E_E``. The parameters for\n which you request gradient must be defined in py:attr:`~model`.\n\n Returns :\n gradient: *float* or *np.ndarray*\n chi^2 gradient\n '
self.fit_fluxes()
self.calculate_chi2_gradient(parameters)
return self.chi2_gradient<|docstring|>Fits fluxes and calculates chi^2 gradient (also called Jacobian), i.e.,
:math:`d chi^2/d parameter`.
Parameters :
parameters: *str* or *list*, required
Parameters with respect to which gradient is calculated.
Currently accepted parameters are: ``t_0``, ``u_0``, ``t_eff``,
``t_E``, ``pi_E_N``, and ``pi_E_E``. The parameters for
which you request gradient must be defined in py:attr:`~model`.
Returns :
gradient: *float* or *np.ndarray*
chi^2 gradient<|endoftext|> |
c93e0e600269061c2eb7f5b9b97213e8147051330bbb49041a25e616cb9f07f9 | def calculate_chi2_gradient(self, parameters):
'\n Calculates chi^2 gradient (also called Jacobian), i.e.,\n :math:`d chi^2/d parameter` WITHOUT refitting for the fluxes. Saves\n computations if, e.g., you want to retrieve both py:attr:`~chi2` and\n py:attr:`~chi2_gradient`.\n\n Parameters :\n parameters: *str* or *list*, required\n Parameters with respect to which gradient is calculated.\n Currently accepted parameters are: ``t_0``, ``u_0``, ``t_eff``,\n ``t_E``, ``pi_E_N``, and ``pi_E_E``. The parameters for\n which you request gradient must be defined in py:attr:`~model`.\n\n Returns :\n gradient: *float* or *np.ndarray*\n chi^2 gradient\n '
self._check_for_gradient_implementation(parameters)
gradient = {param: 0 for param in parameters}
as_dict = self.model.parameters.as_dict()
factor = (self.dataset.flux - self.get_model_fluxes())
factor *= ((- 2.0) / (self.dataset.err_flux ** 2))
factor *= self.source_flux
trajectory = self.model.get_trajectory(self.dataset.time)
u_2 = ((trajectory.x ** 2) + (trajectory.y ** 2))
u_ = np.sqrt(u_2)
d_A_d_u = ((- 8.0) / ((u_2 * (u_2 + 4)) * np.sqrt((u_2 + 4))))
factor *= d_A_d_u
factor_d_x_d_u = ((factor * trajectory.x) / u_)[self.dataset.good]
sum_d_x_d_u = np.sum(factor_d_x_d_u)
factor_d_y_d_u = ((factor * trajectory.y) / u_)[self.dataset.good]
sum_d_y_d_u = np.sum(factor_d_y_d_u)
dt = (self.dataset.time[self.dataset.good] - as_dict['t_0'])
if ('t_eff' not in as_dict):
t_E = as_dict['t_E'].to(u.day).value
if ('t_0' in parameters):
gradient['t_0'] += ((- sum_d_x_d_u) / t_E)
if ('u_0' in parameters):
gradient['u_0'] += sum_d_y_d_u
if ('t_E' in parameters):
gradient['t_E'] += np.sum(((factor_d_x_d_u * (- dt)) / (t_E ** 2)))
elif ('t_E' not in as_dict):
t_eff = as_dict['t_eff'].to(u.day).value
if ('t_0' in parameters):
gradient['t_0'] += (((- sum_d_x_d_u) * as_dict['u_0']) / t_eff)
if ('u_0' in parameters):
gradient['u_0'] += (sum_d_y_d_u + np.sum(((factor_d_x_d_u * dt) / t_eff)))
if ('t_eff' in parameters):
gradient['t_eff'] += np.sum((((factor_d_x_d_u * (- dt)) * as_dict['u_0']) / (t_eff ** 2)))
elif ('u_0' not in as_dict):
t_E = as_dict['t_E'].to(u.day).value
t_eff = as_dict['t_eff'].to(u.day).value
if ('t_0' in parameters):
gradient['t_0'] += ((- sum_d_x_d_u) / t_E)
if ('t_E' in parameters):
gradient['t_E'] += ((np.sum((factor_d_x_d_u * dt)) - (sum_d_y_d_u * t_eff)) / (t_E ** 2))
if ('t_eff' in parameters):
gradient['t_eff'] += (sum_d_y_d_u / t_E)
else:
raise KeyError(('Something is wrong with ModelParameters in ' + 'Event.chi2_gradient():\n'), as_dict)
if (('pi_E_N' in parameters) or ('pi_E_E' in parameters)):
parallax = {'earth_orbital': False, 'satellite': False, 'topocentric': False}
kwargs = {}
if (self.dataset.ephemerides_file is not None):
kwargs['satellite_skycoord'] = self.dataset.satellite_skycoord
trajectory_no_piE = Trajectory(self.dataset.time, self.model.parameters, parallax, self.model.coords, **kwargs)
dx = (trajectory.x - trajectory_no_piE.x)[self.dataset.good]
dy = (trajectory.y - trajectory_no_piE.y)[self.dataset.good]
delta_E = ((dx * as_dict['pi_E_E']) + (dy * as_dict['pi_E_N']))
delta_N = ((dx * as_dict['pi_E_N']) - (dy * as_dict['pi_E_E']))
det = ((as_dict['pi_E_N'] ** 2) + (as_dict['pi_E_E'] ** 2))
if ('pi_E_N' in parameters):
gradient['pi_E_N'] += np.sum(((factor_d_x_d_u * delta_N) + (factor_d_y_d_u * delta_E)))
gradient['pi_E_N'] /= det
if ('pi_E_E' in parameters):
gradient['pi_E_E'] += np.sum(((factor_d_x_d_u * delta_E) - (factor_d_y_d_u * delta_N)))
gradient['pi_E_E'] /= det
if (len(parameters) == 1):
out = gradient[parameters[0]]
else:
out = np.array([gradient[p] for p in parameters])
self._chi2_gradient = out
return self._chi2_gradient | Calculates chi^2 gradient (also called Jacobian), i.e.,
:math:`d chi^2/d parameter` WITHOUT refitting for the fluxes. Saves
computations if, e.g., you want to retrieve both py:attr:`~chi2` and
py:attr:`~chi2_gradient`.
Parameters :
parameters: *str* or *list*, required
Parameters with respect to which gradient is calculated.
Currently accepted parameters are: ``t_0``, ``u_0``, ``t_eff``,
``t_E``, ``pi_E_N``, and ``pi_E_E``. The parameters for
which you request gradient must be defined in py:attr:`~model`.
Returns :
gradient: *float* or *np.ndarray*
chi^2 gradient | source/MulensModel/fitdata.py | calculate_chi2_gradient | rpoleski/MulensModel | 30 | python | def calculate_chi2_gradient(self, parameters):
'\n Calculates chi^2 gradient (also called Jacobian), i.e.,\n :math:`d chi^2/d parameter` WITHOUT refitting for the fluxes. Saves\n computations if, e.g., you want to retrieve both py:attr:`~chi2` and\n py:attr:`~chi2_gradient`.\n\n Parameters :\n parameters: *str* or *list*, required\n Parameters with respect to which gradient is calculated.\n Currently accepted parameters are: ``t_0``, ``u_0``, ``t_eff``,\n ``t_E``, ``pi_E_N``, and ``pi_E_E``. The parameters for\n which you request gradient must be defined in py:attr:`~model`.\n\n Returns :\n gradient: *float* or *np.ndarray*\n chi^2 gradient\n '
self._check_for_gradient_implementation(parameters)
gradient = {param: 0 for param in parameters}
as_dict = self.model.parameters.as_dict()
factor = (self.dataset.flux - self.get_model_fluxes())
factor *= ((- 2.0) / (self.dataset.err_flux ** 2))
factor *= self.source_flux
trajectory = self.model.get_trajectory(self.dataset.time)
u_2 = ((trajectory.x ** 2) + (trajectory.y ** 2))
u_ = np.sqrt(u_2)
d_A_d_u = ((- 8.0) / ((u_2 * (u_2 + 4)) * np.sqrt((u_2 + 4))))
factor *= d_A_d_u
factor_d_x_d_u = ((factor * trajectory.x) / u_)[self.dataset.good]
sum_d_x_d_u = np.sum(factor_d_x_d_u)
factor_d_y_d_u = ((factor * trajectory.y) / u_)[self.dataset.good]
sum_d_y_d_u = np.sum(factor_d_y_d_u)
dt = (self.dataset.time[self.dataset.good] - as_dict['t_0'])
if ('t_eff' not in as_dict):
t_E = as_dict['t_E'].to(u.day).value
if ('t_0' in parameters):
gradient['t_0'] += ((- sum_d_x_d_u) / t_E)
if ('u_0' in parameters):
gradient['u_0'] += sum_d_y_d_u
if ('t_E' in parameters):
gradient['t_E'] += np.sum(((factor_d_x_d_u * (- dt)) / (t_E ** 2)))
elif ('t_E' not in as_dict):
t_eff = as_dict['t_eff'].to(u.day).value
if ('t_0' in parameters):
gradient['t_0'] += (((- sum_d_x_d_u) * as_dict['u_0']) / t_eff)
if ('u_0' in parameters):
gradient['u_0'] += (sum_d_y_d_u + np.sum(((factor_d_x_d_u * dt) / t_eff)))
if ('t_eff' in parameters):
gradient['t_eff'] += np.sum((((factor_d_x_d_u * (- dt)) * as_dict['u_0']) / (t_eff ** 2)))
elif ('u_0' not in as_dict):
t_E = as_dict['t_E'].to(u.day).value
t_eff = as_dict['t_eff'].to(u.day).value
if ('t_0' in parameters):
gradient['t_0'] += ((- sum_d_x_d_u) / t_E)
if ('t_E' in parameters):
gradient['t_E'] += ((np.sum((factor_d_x_d_u * dt)) - (sum_d_y_d_u * t_eff)) / (t_E ** 2))
if ('t_eff' in parameters):
gradient['t_eff'] += (sum_d_y_d_u / t_E)
else:
raise KeyError(('Something is wrong with ModelParameters in ' + 'Event.chi2_gradient():\n'), as_dict)
if (('pi_E_N' in parameters) or ('pi_E_E' in parameters)):
parallax = {'earth_orbital': False, 'satellite': False, 'topocentric': False}
kwargs = {}
if (self.dataset.ephemerides_file is not None):
kwargs['satellite_skycoord'] = self.dataset.satellite_skycoord
trajectory_no_piE = Trajectory(self.dataset.time, self.model.parameters, parallax, self.model.coords, **kwargs)
dx = (trajectory.x - trajectory_no_piE.x)[self.dataset.good]
dy = (trajectory.y - trajectory_no_piE.y)[self.dataset.good]
delta_E = ((dx * as_dict['pi_E_E']) + (dy * as_dict['pi_E_N']))
delta_N = ((dx * as_dict['pi_E_N']) - (dy * as_dict['pi_E_E']))
det = ((as_dict['pi_E_N'] ** 2) + (as_dict['pi_E_E'] ** 2))
if ('pi_E_N' in parameters):
gradient['pi_E_N'] += np.sum(((factor_d_x_d_u * delta_N) + (factor_d_y_d_u * delta_E)))
gradient['pi_E_N'] /= det
if ('pi_E_E' in parameters):
gradient['pi_E_E'] += np.sum(((factor_d_x_d_u * delta_E) - (factor_d_y_d_u * delta_N)))
gradient['pi_E_E'] /= det
if (len(parameters) == 1):
out = gradient[parameters[0]]
else:
out = np.array([gradient[p] for p in parameters])
self._chi2_gradient = out
return self._chi2_gradient | def calculate_chi2_gradient(self, parameters):
'\n Calculates chi^2 gradient (also called Jacobian), i.e.,\n :math:`d chi^2/d parameter` WITHOUT refitting for the fluxes. Saves\n computations if, e.g., you want to retrieve both py:attr:`~chi2` and\n py:attr:`~chi2_gradient`.\n\n Parameters :\n parameters: *str* or *list*, required\n Parameters with respect to which gradient is calculated.\n Currently accepted parameters are: ``t_0``, ``u_0``, ``t_eff``,\n ``t_E``, ``pi_E_N``, and ``pi_E_E``. The parameters for\n which you request gradient must be defined in py:attr:`~model`.\n\n Returns :\n gradient: *float* or *np.ndarray*\n chi^2 gradient\n '
self._check_for_gradient_implementation(parameters)
gradient = {param: 0 for param in parameters}
as_dict = self.model.parameters.as_dict()
factor = (self.dataset.flux - self.get_model_fluxes())
factor *= ((- 2.0) / (self.dataset.err_flux ** 2))
factor *= self.source_flux
trajectory = self.model.get_trajectory(self.dataset.time)
u_2 = ((trajectory.x ** 2) + (trajectory.y ** 2))
u_ = np.sqrt(u_2)
d_A_d_u = ((- 8.0) / ((u_2 * (u_2 + 4)) * np.sqrt((u_2 + 4))))
factor *= d_A_d_u
factor_d_x_d_u = ((factor * trajectory.x) / u_)[self.dataset.good]
sum_d_x_d_u = np.sum(factor_d_x_d_u)
factor_d_y_d_u = ((factor * trajectory.y) / u_)[self.dataset.good]
sum_d_y_d_u = np.sum(factor_d_y_d_u)
dt = (self.dataset.time[self.dataset.good] - as_dict['t_0'])
if ('t_eff' not in as_dict):
t_E = as_dict['t_E'].to(u.day).value
if ('t_0' in parameters):
gradient['t_0'] += ((- sum_d_x_d_u) / t_E)
if ('u_0' in parameters):
gradient['u_0'] += sum_d_y_d_u
if ('t_E' in parameters):
gradient['t_E'] += np.sum(((factor_d_x_d_u * (- dt)) / (t_E ** 2)))
elif ('t_E' not in as_dict):
t_eff = as_dict['t_eff'].to(u.day).value
if ('t_0' in parameters):
gradient['t_0'] += (((- sum_d_x_d_u) * as_dict['u_0']) / t_eff)
if ('u_0' in parameters):
gradient['u_0'] += (sum_d_y_d_u + np.sum(((factor_d_x_d_u * dt) / t_eff)))
if ('t_eff' in parameters):
gradient['t_eff'] += np.sum((((factor_d_x_d_u * (- dt)) * as_dict['u_0']) / (t_eff ** 2)))
elif ('u_0' not in as_dict):
t_E = as_dict['t_E'].to(u.day).value
t_eff = as_dict['t_eff'].to(u.day).value
if ('t_0' in parameters):
gradient['t_0'] += ((- sum_d_x_d_u) / t_E)
if ('t_E' in parameters):
gradient['t_E'] += ((np.sum((factor_d_x_d_u * dt)) - (sum_d_y_d_u * t_eff)) / (t_E ** 2))
if ('t_eff' in parameters):
gradient['t_eff'] += (sum_d_y_d_u / t_E)
else:
raise KeyError(('Something is wrong with ModelParameters in ' + 'Event.chi2_gradient():\n'), as_dict)
if (('pi_E_N' in parameters) or ('pi_E_E' in parameters)):
parallax = {'earth_orbital': False, 'satellite': False, 'topocentric': False}
kwargs = {}
if (self.dataset.ephemerides_file is not None):
kwargs['satellite_skycoord'] = self.dataset.satellite_skycoord
trajectory_no_piE = Trajectory(self.dataset.time, self.model.parameters, parallax, self.model.coords, **kwargs)
dx = (trajectory.x - trajectory_no_piE.x)[self.dataset.good]
dy = (trajectory.y - trajectory_no_piE.y)[self.dataset.good]
delta_E = ((dx * as_dict['pi_E_E']) + (dy * as_dict['pi_E_N']))
delta_N = ((dx * as_dict['pi_E_N']) - (dy * as_dict['pi_E_E']))
det = ((as_dict['pi_E_N'] ** 2) + (as_dict['pi_E_E'] ** 2))
if ('pi_E_N' in parameters):
gradient['pi_E_N'] += np.sum(((factor_d_x_d_u * delta_N) + (factor_d_y_d_u * delta_E)))
gradient['pi_E_N'] /= det
if ('pi_E_E' in parameters):
gradient['pi_E_E'] += np.sum(((factor_d_x_d_u * delta_E) - (factor_d_y_d_u * delta_N)))
gradient['pi_E_E'] /= det
if (len(parameters) == 1):
out = gradient[parameters[0]]
else:
out = np.array([gradient[p] for p in parameters])
self._chi2_gradient = out
return self._chi2_gradient<|docstring|>Calculates chi^2 gradient (also called Jacobian), i.e.,
:math:`d chi^2/d parameter` WITHOUT refitting for the fluxes. Saves
computations if, e.g., you want to retrieve both py:attr:`~chi2` and
py:attr:`~chi2_gradient`.
Parameters :
parameters: *str* or *list*, required
Parameters with respect to which gradient is calculated.
Currently accepted parameters are: ``t_0``, ``u_0``, ``t_eff``,
``t_E``, ``pi_E_N``, and ``pi_E_E``. The parameters for
which you request gradient must be defined in py:attr:`~model`.
Returns :
gradient: *float* or *np.ndarray*
chi^2 gradient<|endoftext|> |
5658b422fa504a5de4c25f8f07e59a78a3217743a6fc915dee056e7a91e24a0d | @property
def chi2_gradient(self):
'\n Return previously calculated chi^2 gradient (also called Jacobian),\n i.e., :math:`d chi^2/d parameter`. See :py:func:`~get_chi2_gradient()`\n and :py:func:`~calculate_chi2_gradient()`.\n\n Returns :\n gradient: *float* or *np.ndarray*\n chi^2 gradient. Will return None if the chi2 gradient was not\n previously calculated using one of the functions mentioned\n above.\n\n '
try:
return self._chi2_gradient
except AttributeError:
return None | Return previously calculated chi^2 gradient (also called Jacobian),
i.e., :math:`d chi^2/d parameter`. See :py:func:`~get_chi2_gradient()`
and :py:func:`~calculate_chi2_gradient()`.
Returns :
gradient: *float* or *np.ndarray*
chi^2 gradient. Will return None if the chi2 gradient was not
previously calculated using one of the functions mentioned
above. | source/MulensModel/fitdata.py | chi2_gradient | rpoleski/MulensModel | 30 | python | @property
def chi2_gradient(self):
'\n Return previously calculated chi^2 gradient (also called Jacobian),\n i.e., :math:`d chi^2/d parameter`. See :py:func:`~get_chi2_gradient()`\n and :py:func:`~calculate_chi2_gradient()`.\n\n Returns :\n gradient: *float* or *np.ndarray*\n chi^2 gradient. Will return None if the chi2 gradient was not\n previously calculated using one of the functions mentioned\n above.\n\n '
try:
return self._chi2_gradient
except AttributeError:
return None | @property
def chi2_gradient(self):
'\n Return previously calculated chi^2 gradient (also called Jacobian),\n i.e., :math:`d chi^2/d parameter`. See :py:func:`~get_chi2_gradient()`\n and :py:func:`~calculate_chi2_gradient()`.\n\n Returns :\n gradient: *float* or *np.ndarray*\n chi^2 gradient. Will return None if the chi2 gradient was not\n previously calculated using one of the functions mentioned\n above.\n\n '
try:
return self._chi2_gradient
except AttributeError:
return None<|docstring|>Return previously calculated chi^2 gradient (also called Jacobian),
i.e., :math:`d chi^2/d parameter`. See :py:func:`~get_chi2_gradient()`
and :py:func:`~calculate_chi2_gradient()`.
Returns :
gradient: *float* or *np.ndarray*
chi^2 gradient. Will return None if the chi2 gradient was not
previously calculated using one of the functions mentioned
above.<|endoftext|> |
949a73944bac125e0f847028a4178d87457d53eaca714434900133e9b050b091 | @property
def chi2(self):
'\n Returns :\n chi2: *float*\n the total chi2 for the fitted dataset. Good points only. See\n :py:obj:`~MulensModel.mulensdata.MulensData.good`.\n\n If None, you need to run :py:func:`~update()` to execute the\n linear fit and calculate the chi2.\n '
if (self.chi2_per_point is None):
return None
else:
return np.sum(self.chi2_per_point[self._dataset.good]) | Returns :
chi2: *float*
the total chi2 for the fitted dataset. Good points only. See
:py:obj:`~MulensModel.mulensdata.MulensData.good`.
If None, you need to run :py:func:`~update()` to execute the
linear fit and calculate the chi2. | source/MulensModel/fitdata.py | chi2 | rpoleski/MulensModel | 30 | python | @property
def chi2(self):
'\n Returns :\n chi2: *float*\n the total chi2 for the fitted dataset. Good points only. See\n :py:obj:`~MulensModel.mulensdata.MulensData.good`.\n\n If None, you need to run :py:func:`~update()` to execute the\n linear fit and calculate the chi2.\n '
if (self.chi2_per_point is None):
return None
else:
return np.sum(self.chi2_per_point[self._dataset.good]) | @property
def chi2(self):
'\n Returns :\n chi2: *float*\n the total chi2 for the fitted dataset. Good points only. See\n :py:obj:`~MulensModel.mulensdata.MulensData.good`.\n\n If None, you need to run :py:func:`~update()` to execute the\n linear fit and calculate the chi2.\n '
if (self.chi2_per_point is None):
return None
else:
return np.sum(self.chi2_per_point[self._dataset.good])<|docstring|>Returns :
chi2: *float*
the total chi2 for the fitted dataset. Good points only. See
:py:obj:`~MulensModel.mulensdata.MulensData.good`.
If None, you need to run :py:func:`~update()` to execute the
linear fit and calculate the chi2.<|endoftext|> |
85382f0366c2df8848e998e05c1a794a06ebbe225f3b7a9bb693f0cb73572f2d | @property
def chi2_per_point(self):
'\n Returns :\n chi2_per_point: *np.ndarray*\n Chi^2 contribution from each data point,\n e.g. ``chi2_per_point[k]`` returns the chi2 contribution\n from the *k*-th point of :py:obj:`dataset`. Includes bad\n datapoints.\n\n If None, you need to run :py:func:`~update()` to execute the\n linear fit and calculate the chi2.\n '
return self._chi2_per_point | Returns :
chi2_per_point: *np.ndarray*
Chi^2 contribution from each data point,
e.g. ``chi2_per_point[k]`` returns the chi2 contribution
from the *k*-th point of :py:obj:`dataset`. Includes bad
datapoints.
If None, you need to run :py:func:`~update()` to execute the
linear fit and calculate the chi2. | source/MulensModel/fitdata.py | chi2_per_point | rpoleski/MulensModel | 30 | python | @property
def chi2_per_point(self):
'\n Returns :\n chi2_per_point: *np.ndarray*\n Chi^2 contribution from each data point,\n e.g. ``chi2_per_point[k]`` returns the chi2 contribution\n from the *k*-th point of :py:obj:`dataset`. Includes bad\n datapoints.\n\n If None, you need to run :py:func:`~update()` to execute the\n linear fit and calculate the chi2.\n '
return self._chi2_per_point | @property
def chi2_per_point(self):
'\n Returns :\n chi2_per_point: *np.ndarray*\n Chi^2 contribution from each data point,\n e.g. ``chi2_per_point[k]`` returns the chi2 contribution\n from the *k*-th point of :py:obj:`dataset`. Includes bad\n datapoints.\n\n If None, you need to run :py:func:`~update()` to execute the\n linear fit and calculate the chi2.\n '
return self._chi2_per_point<|docstring|>Returns :
chi2_per_point: *np.ndarray*
Chi^2 contribution from each data point,
e.g. ``chi2_per_point[k]`` returns the chi2 contribution
from the *k*-th point of :py:obj:`dataset`. Includes bad
datapoints.
If None, you need to run :py:func:`~update()` to execute the
linear fit and calculate the chi2.<|endoftext|> |
c7b474468c353c138fffa19e1be7263091a8300a0df9f7ab78d36e1cf0e76b81 | @property
def source_flux(self):
'\n Returns :\n source_flux: *float*\n the fitted source flux. Only defined for models with a single\n source. See also :py:obj:`~source_fluxes`\n\n If None, you need to run :py:func:`~fit_fluxes()` or\n :py:func:`~update()` to execute the linear fit.\n '
if (self._model.n_sources == 1):
return self.source_fluxes[0]
else:
msg = ((('source_flux is defined only for models' + ' with ONE source, you have') + ' {0}'.format(self._model.n_sources)) + ' sources. Try FitData.source_fluxes instead')
raise NameError(msg) | Returns :
source_flux: *float*
the fitted source flux. Only defined for models with a single
source. See also :py:obj:`~source_fluxes`
If None, you need to run :py:func:`~fit_fluxes()` or
:py:func:`~update()` to execute the linear fit. | source/MulensModel/fitdata.py | source_flux | rpoleski/MulensModel | 30 | python | @property
def source_flux(self):
'\n Returns :\n source_flux: *float*\n the fitted source flux. Only defined for models with a single\n source. See also :py:obj:`~source_fluxes`\n\n If None, you need to run :py:func:`~fit_fluxes()` or\n :py:func:`~update()` to execute the linear fit.\n '
if (self._model.n_sources == 1):
return self.source_fluxes[0]
else:
msg = ((('source_flux is defined only for models' + ' with ONE source, you have') + ' {0}'.format(self._model.n_sources)) + ' sources. Try FitData.source_fluxes instead')
raise NameError(msg) | @property
def source_flux(self):
'\n Returns :\n source_flux: *float*\n the fitted source flux. Only defined for models with a single\n source. See also :py:obj:`~source_fluxes`\n\n If None, you need to run :py:func:`~fit_fluxes()` or\n :py:func:`~update()` to execute the linear fit.\n '
if (self._model.n_sources == 1):
return self.source_fluxes[0]
else:
msg = ((('source_flux is defined only for models' + ' with ONE source, you have') + ' {0}'.format(self._model.n_sources)) + ' sources. Try FitData.source_fluxes instead')
raise NameError(msg)<|docstring|>Returns :
source_flux: *float*
the fitted source flux. Only defined for models with a single
source. See also :py:obj:`~source_fluxes`
If None, you need to run :py:func:`~fit_fluxes()` or
:py:func:`~update()` to execute the linear fit.<|endoftext|> |
ad62833df35bfd49679c80cd70eb025dfab9e73b9568466b743f38eda6de6f35 | @property
def source_fluxes(self):
'\n Returns :\n source_fluxes: *np.array*\n the fitted source flux(es).\n\n If None, you need to run :py:func:`~fit_fluxes()` or\n :py:func:`~update()` to execute the linear fit.\n '
return self._source_fluxes | Returns :
source_fluxes: *np.array*
the fitted source flux(es).
If None, you need to run :py:func:`~fit_fluxes()` or
:py:func:`~update()` to execute the linear fit. | source/MulensModel/fitdata.py | source_fluxes | rpoleski/MulensModel | 30 | python | @property
def source_fluxes(self):
'\n Returns :\n source_fluxes: *np.array*\n the fitted source flux(es).\n\n If None, you need to run :py:func:`~fit_fluxes()` or\n :py:func:`~update()` to execute the linear fit.\n '
return self._source_fluxes | @property
def source_fluxes(self):
'\n Returns :\n source_fluxes: *np.array*\n the fitted source flux(es).\n\n If None, you need to run :py:func:`~fit_fluxes()` or\n :py:func:`~update()` to execute the linear fit.\n '
return self._source_fluxes<|docstring|>Returns :
source_fluxes: *np.array*
the fitted source flux(es).
If None, you need to run :py:func:`~fit_fluxes()` or
:py:func:`~update()` to execute the linear fit.<|endoftext|> |
2dbac7dde4c2faff145595ebaa2a92cac9ae39d774a91376e07004c6888a7451 | @property
def blend_flux(self):
'\n Returns :\n blend_flux: *float*\n the fitted blend flux or the value set by\n fix_blend_flux (see :ref:`keywords`).\n\n If None, you need to run :py:func:`~fit_fluxes()` or\n :py:func:`~update()` to execute the linear fit.\n '
return self._blend_flux | Returns :
blend_flux: *float*
the fitted blend flux or the value set by
fix_blend_flux (see :ref:`keywords`).
If None, you need to run :py:func:`~fit_fluxes()` or
:py:func:`~update()` to execute the linear fit. | source/MulensModel/fitdata.py | blend_flux | rpoleski/MulensModel | 30 | python | @property
def blend_flux(self):
'\n Returns :\n blend_flux: *float*\n the fitted blend flux or the value set by\n fix_blend_flux (see :ref:`keywords`).\n\n If None, you need to run :py:func:`~fit_fluxes()` or\n :py:func:`~update()` to execute the linear fit.\n '
return self._blend_flux | @property
def blend_flux(self):
'\n Returns :\n blend_flux: *float*\n the fitted blend flux or the value set by\n fix_blend_flux (see :ref:`keywords`).\n\n If None, you need to run :py:func:`~fit_fluxes()` or\n :py:func:`~update()` to execute the linear fit.\n '
return self._blend_flux<|docstring|>Returns :
blend_flux: *float*
the fitted blend flux or the value set by
fix_blend_flux (see :ref:`keywords`).
If None, you need to run :py:func:`~fit_fluxes()` or
:py:func:`~update()` to execute the linear fit.<|endoftext|> |
dd1a23afbb6e672f9b9f1e35197931c8b2e95ecd1f67de2c89a22601d05b8240 | @property
def source_flux_ratio(self):
'\n source_flux_ratio = source_flux_1 / source_flux_0\n\n Returns :\n source_flux_ratio: *float*\n the ratio of the fitted source fluxes or the value set by\n fix_source_flux_ratio (see :ref:`keywords`).\n\n If None, you need to run :py:func:`~fit_fluxes()` or\n :py:func:`~update()` to execute the linear fit.\n '
if (self._model.n_sources != 2):
msg = ((('source_flux is defined only for models' + ' with TWO sources, you have') + ' {0}'.format(self._model.n_sources)) + ' sources.')
raise NameError(msg)
if self.fix_source_flux_ratio:
return self.fix_source_flux_ratio
else:
return (self.source_fluxes[1] / self.source_fluxes[0]) | source_flux_ratio = source_flux_1 / source_flux_0
Returns :
source_flux_ratio: *float*
the ratio of the fitted source fluxes or the value set by
fix_source_flux_ratio (see :ref:`keywords`).
If None, you need to run :py:func:`~fit_fluxes()` or
:py:func:`~update()` to execute the linear fit. | source/MulensModel/fitdata.py | source_flux_ratio | rpoleski/MulensModel | 30 | python | @property
def source_flux_ratio(self):
'\n source_flux_ratio = source_flux_1 / source_flux_0\n\n Returns :\n source_flux_ratio: *float*\n the ratio of the fitted source fluxes or the value set by\n fix_source_flux_ratio (see :ref:`keywords`).\n\n If None, you need to run :py:func:`~fit_fluxes()` or\n :py:func:`~update()` to execute the linear fit.\n '
if (self._model.n_sources != 2):
msg = ((('source_flux is defined only for models' + ' with TWO sources, you have') + ' {0}'.format(self._model.n_sources)) + ' sources.')
raise NameError(msg)
if self.fix_source_flux_ratio:
return self.fix_source_flux_ratio
else:
return (self.source_fluxes[1] / self.source_fluxes[0]) | @property
def source_flux_ratio(self):
'\n source_flux_ratio = source_flux_1 / source_flux_0\n\n Returns :\n source_flux_ratio: *float*\n the ratio of the fitted source fluxes or the value set by\n fix_source_flux_ratio (see :ref:`keywords`).\n\n If None, you need to run :py:func:`~fit_fluxes()` or\n :py:func:`~update()` to execute the linear fit.\n '
if (self._model.n_sources != 2):
msg = ((('source_flux is defined only for models' + ' with TWO sources, you have') + ' {0}'.format(self._model.n_sources)) + ' sources.')
raise NameError(msg)
if self.fix_source_flux_ratio:
return self.fix_source_flux_ratio
else:
return (self.source_fluxes[1] / self.source_fluxes[0])<|docstring|>source_flux_ratio = source_flux_1 / source_flux_0
Returns :
source_flux_ratio: *float*
the ratio of the fitted source fluxes or the value set by
fix_source_flux_ratio (see :ref:`keywords`).
If None, you need to run :py:func:`~fit_fluxes()` or
:py:func:`~update()` to execute the linear fit.<|endoftext|> |
a1c4dacce6a29fef5c3afaa96239ac3a56b26ba2443f727d98136d93e6de1b24 | @property
def dataset(self):
'\n :py:class:`~MulensModel.mulensdata.MulensData` object\n\n A single photometric dataset to be fitted.\n '
return self._dataset | :py:class:`~MulensModel.mulensdata.MulensData` object
A single photometric dataset to be fitted. | source/MulensModel/fitdata.py | dataset | rpoleski/MulensModel | 30 | python | @property
def dataset(self):
'\n :py:class:`~MulensModel.mulensdata.MulensData` object\n\n A single photometric dataset to be fitted.\n '
return self._dataset | @property
def dataset(self):
'\n :py:class:`~MulensModel.mulensdata.MulensData` object\n\n A single photometric dataset to be fitted.\n '
return self._dataset<|docstring|>:py:class:`~MulensModel.mulensdata.MulensData` object
A single photometric dataset to be fitted.<|endoftext|> |
5c1ff6ce49dd4625adc41ed51fcbc0ed6e378ecca6b5737cc7ba8140976acc0b | @property
def model(self):
'\n :py:class:`~MulensModel.model.Model` object\n\n The model to fit to the data.\n '
return self._model | :py:class:`~MulensModel.model.Model` object
The model to fit to the data. | source/MulensModel/fitdata.py | model | rpoleski/MulensModel | 30 | python | @property
def model(self):
'\n :py:class:`~MulensModel.model.Model` object\n\n The model to fit to the data.\n '
return self._model | @property
def model(self):
'\n :py:class:`~MulensModel.model.Model` object\n\n The model to fit to the data.\n '
return self._model<|docstring|>:py:class:`~MulensModel.model.Model` object
The model to fit to the data.<|endoftext|> |
6e99bfafe17cbce9d2071ec88ffb50317368edd2d746ea9d8a1bf43fb04444c1 | @property
def gamma(self):
"\n *float*\n\n Limb-darkening coefficient for this fit. Set by\n :py:attr:`~dataset.bandpass` and\n :py:func:`~model.get_limb_coeff_gamma()`.\n\n *** CHECK LINKS IN SPHINX. PROBABLY WON'T WORK. ***\n "
return self._gamma | *float*
Limb-darkening coefficient for this fit. Set by
:py:attr:`~dataset.bandpass` and
:py:func:`~model.get_limb_coeff_gamma()`.
*** CHECK LINKS IN SPHINX. PROBABLY WON'T WORK. *** | source/MulensModel/fitdata.py | gamma | rpoleski/MulensModel | 30 | python | @property
def gamma(self):
"\n *float*\n\n Limb-darkening coefficient for this fit. Set by\n :py:attr:`~dataset.bandpass` and\n :py:func:`~model.get_limb_coeff_gamma()`.\n\n *** CHECK LINKS IN SPHINX. PROBABLY WON'T WORK. ***\n "
return self._gamma | @property
def gamma(self):
"\n *float*\n\n Limb-darkening coefficient for this fit. Set by\n :py:attr:`~dataset.bandpass` and\n :py:func:`~model.get_limb_coeff_gamma()`.\n\n *** CHECK LINKS IN SPHINX. PROBABLY WON'T WORK. ***\n "
return self._gamma<|docstring|>*float*
Limb-darkening coefficient for this fit. Set by
:py:attr:`~dataset.bandpass` and
:py:func:`~model.get_limb_coeff_gamma()`.
*** CHECK LINKS IN SPHINX. PROBABLY WON'T WORK. ***<|endoftext|> |
d168801738909e9288d07d71ea59e67e6818b5b0400296fc98a22cb9ab77b649 | @abstractmethod
def apply_state_transition_without_block(self, state: BeaconState, slot: Slot, parent_root: Hash32) -> BeaconState:
'\n Advance the ``state`` to the beginning of the requested ``slot``.\n Return the resulting state at that slot assuming there are no\n intervening blocks. This method provides callers with some lookahead into\n the future state of the chain, useful for generating RANDAO reveals or\n computing future committee assignments.\n\n NOTE: Inserting blocks in intervening slots will (among other things) change the\n ``parent_root``, invalidating the returned state.\n '
pass | Advance the ``state`` to the beginning of the requested ``slot``.
Return the resulting state at that slot assuming there are no
intervening blocks. This method provides callers with some lookahead into
the future state of the chain, useful for generating RANDAO reveals or
computing future committee assignments.
NOTE: Inserting blocks in intervening slots will (among other things) change the
``parent_root``, invalidating the returned state. | eth2/beacon/state_machines/state_transitions.py | apply_state_transition_without_block | hwwhww/trinity | 2 | python | @abstractmethod
def apply_state_transition_without_block(self, state: BeaconState, slot: Slot, parent_root: Hash32) -> BeaconState:
'\n Advance the ``state`` to the beginning of the requested ``slot``.\n Return the resulting state at that slot assuming there are no\n intervening blocks. This method provides callers with some lookahead into\n the future state of the chain, useful for generating RANDAO reveals or\n computing future committee assignments.\n\n NOTE: Inserting blocks in intervening slots will (among other things) change the\n ``parent_root``, invalidating the returned state.\n '
pass | @abstractmethod
def apply_state_transition_without_block(self, state: BeaconState, slot: Slot, parent_root: Hash32) -> BeaconState:
'\n Advance the ``state`` to the beginning of the requested ``slot``.\n Return the resulting state at that slot assuming there are no\n intervening blocks. This method provides callers with some lookahead into\n the future state of the chain, useful for generating RANDAO reveals or\n computing future committee assignments.\n\n NOTE: Inserting blocks in intervening slots will (among other things) change the\n ``parent_root``, invalidating the returned state.\n '
pass<|docstring|>Advance the ``state`` to the beginning of the requested ``slot``.
Return the resulting state at that slot assuming there are no
intervening blocks. This method provides callers with some lookahead into
the future state of the chain, useful for generating RANDAO reveals or
computing future committee assignments.
NOTE: Inserting blocks in intervening slots will (among other things) change the
``parent_root``, invalidating the returned state.<|endoftext|> |
5b2dcefe1e3e32b58af2a5b5bfe30b02091ca1e423b6f8b6f66afe9ca8939b3f | def start_response(self, status, headers, exc_info=None):
'Emulate WSGI start_response.'
self.headers += headers | Emulate WSGI start_response. | tests/test_middleware_cors.py | start_response | checkmate/simpl | 6 | python | def start_response(self, status, headers, exc_info=None):
self.headers += headers | def start_response(self, status, headers, exc_info=None):
self.headers += headers<|docstring|>Emulate WSGI start_response.<|endoftext|> |
74ef2f2467db392ad65f9c870d00532c3b23b30c6da4ab827f420f7e598549f1 | def test_default_bypass(self):
'Default config ignores all calls.'
middleware = cors.CORSMiddleware(debug_app)
env = {'PATH_INFO': '/something/'}
wsgiref.util.setup_testing_defaults(env)
middleware(env.copy(), self.start_response)
self.assertEqual(env['PATH_INFO'], '/something/')
self.assertFalse(any(((h[0] == 'Access-Control-Allow-Origin') for h in self.headers))) | Default config ignores all calls. | tests/test_middleware_cors.py | test_default_bypass | checkmate/simpl | 6 | python | def test_default_bypass(self):
middleware = cors.CORSMiddleware(debug_app)
env = {'PATH_INFO': '/something/'}
wsgiref.util.setup_testing_defaults(env)
middleware(env.copy(), self.start_response)
self.assertEqual(env['PATH_INFO'], '/something/')
self.assertFalse(any(((h[0] == 'Access-Control-Allow-Origin') for h in self.headers))) | def test_default_bypass(self):
middleware = cors.CORSMiddleware(debug_app)
env = {'PATH_INFO': '/something/'}
wsgiref.util.setup_testing_defaults(env)
middleware(env.copy(), self.start_response)
self.assertEqual(env['PATH_INFO'], '/something/')
self.assertFalse(any(((h[0] == 'Access-Control-Allow-Origin') for h in self.headers)))<|docstring|>Default config ignores all calls.<|endoftext|> |
ddba70cadf0db1b1b621bf76a29386ff4599db37721fded4194231415c0460d3 | def test_netloc(self):
'Netloc match adds headers.'
middleware = cors.CORSMiddleware(debug_app, allowed_netlocs=['localhost'])
env = {'REQUEST_METHOD': 'OPTIONS', 'HTTP_ORIGIN': 'http://localhost'}
wsgiref.util.setup_testing_defaults(env)
middleware(env.copy(), self.start_response)
six.assertCountEqual(self, self.headers, [('Access-Control-Allow-Methods', ', '.join(cors.CORSMiddleware.default_methods)), ('Access-Control-Allow-Headers', ', '.join(cors.CORSMiddleware.default_headers)), ('Access-Control-Allow-Credentials', 'true'), ('Access-Control-Allow-Origin', 'http://localhost')]) | Netloc match adds headers. | tests/test_middleware_cors.py | test_netloc | checkmate/simpl | 6 | python | def test_netloc(self):
middleware = cors.CORSMiddleware(debug_app, allowed_netlocs=['localhost'])
env = {'REQUEST_METHOD': 'OPTIONS', 'HTTP_ORIGIN': 'http://localhost'}
wsgiref.util.setup_testing_defaults(env)
middleware(env.copy(), self.start_response)
six.assertCountEqual(self, self.headers, [('Access-Control-Allow-Methods', ', '.join(cors.CORSMiddleware.default_methods)), ('Access-Control-Allow-Headers', ', '.join(cors.CORSMiddleware.default_headers)), ('Access-Control-Allow-Credentials', 'true'), ('Access-Control-Allow-Origin', 'http://localhost')]) | def test_netloc(self):
middleware = cors.CORSMiddleware(debug_app, allowed_netlocs=['localhost'])
env = {'REQUEST_METHOD': 'OPTIONS', 'HTTP_ORIGIN': 'http://localhost'}
wsgiref.util.setup_testing_defaults(env)
middleware(env.copy(), self.start_response)
six.assertCountEqual(self, self.headers, [('Access-Control-Allow-Methods', ', '.join(cors.CORSMiddleware.default_methods)), ('Access-Control-Allow-Headers', ', '.join(cors.CORSMiddleware.default_headers)), ('Access-Control-Allow-Credentials', 'true'), ('Access-Control-Allow-Origin', 'http://localhost')])<|docstring|>Netloc match adds headers.<|endoftext|> |
0f363cf5180aefa0a042416dc2c438d11d37f04066e82e86f35d40cde1b429e7 | def test_netloc_mismatch(self):
'Netloc match checks ports.'
middleware = cors.CORSMiddleware(debug_app, allowed_netlocs=['localhost:9000'])
env = {'REQUEST_METHOD': 'OPTIONS', 'HTTP_ORIGIN': 'http://localhost:8080'}
wsgiref.util.setup_testing_defaults(env)
middleware(env.copy(), self.start_response)
self.assertFalse(any(((h[0] == 'Access-Control-Allow-Origin') for h in self.headers))) | Netloc match checks ports. | tests/test_middleware_cors.py | test_netloc_mismatch | checkmate/simpl | 6 | python | def test_netloc_mismatch(self):
middleware = cors.CORSMiddleware(debug_app, allowed_netlocs=['localhost:9000'])
env = {'REQUEST_METHOD': 'OPTIONS', 'HTTP_ORIGIN': 'http://localhost:8080'}
wsgiref.util.setup_testing_defaults(env)
middleware(env.copy(), self.start_response)
self.assertFalse(any(((h[0] == 'Access-Control-Allow-Origin') for h in self.headers))) | def test_netloc_mismatch(self):
middleware = cors.CORSMiddleware(debug_app, allowed_netlocs=['localhost:9000'])
env = {'REQUEST_METHOD': 'OPTIONS', 'HTTP_ORIGIN': 'http://localhost:8080'}
wsgiref.util.setup_testing_defaults(env)
middleware(env.copy(), self.start_response)
self.assertFalse(any(((h[0] == 'Access-Control-Allow-Origin') for h in self.headers)))<|docstring|>Netloc match checks ports.<|endoftext|> |
0084c372d845968fb059ccc1cce48f9fcc70eb41cac8a797f03584a3ca5168ed | def test_hostname(self):
'Hostname matches any port and adds headers.'
middleware = cors.CORSMiddleware(debug_app, allowed_hostnames=['localhost'])
env = {'REQUEST_METHOD': 'OPTIONS', 'HTTP_ORIGIN': 'http://localhost:8080'}
wsgiref.util.setup_testing_defaults(env)
middleware(env.copy(), self.start_response)
six.assertCountEqual(self, self.headers, [('Access-Control-Allow-Methods', ', '.join(cors.CORSMiddleware.default_methods)), ('Access-Control-Allow-Headers', ', '.join(cors.CORSMiddleware.default_headers)), ('Access-Control-Allow-Credentials', 'true'), ('Access-Control-Allow-Origin', 'http://localhost:8080')]) | Hostname matches any port and adds headers. | tests/test_middleware_cors.py | test_hostname | checkmate/simpl | 6 | python | def test_hostname(self):
middleware = cors.CORSMiddleware(debug_app, allowed_hostnames=['localhost'])
env = {'REQUEST_METHOD': 'OPTIONS', 'HTTP_ORIGIN': 'http://localhost:8080'}
wsgiref.util.setup_testing_defaults(env)
middleware(env.copy(), self.start_response)
six.assertCountEqual(self, self.headers, [('Access-Control-Allow-Methods', ', '.join(cors.CORSMiddleware.default_methods)), ('Access-Control-Allow-Headers', ', '.join(cors.CORSMiddleware.default_headers)), ('Access-Control-Allow-Credentials', 'true'), ('Access-Control-Allow-Origin', 'http://localhost:8080')]) | def test_hostname(self):
middleware = cors.CORSMiddleware(debug_app, allowed_hostnames=['localhost'])
env = {'REQUEST_METHOD': 'OPTIONS', 'HTTP_ORIGIN': 'http://localhost:8080'}
wsgiref.util.setup_testing_defaults(env)
middleware(env.copy(), self.start_response)
six.assertCountEqual(self, self.headers, [('Access-Control-Allow-Methods', ', '.join(cors.CORSMiddleware.default_methods)), ('Access-Control-Allow-Headers', ', '.join(cors.CORSMiddleware.default_headers)), ('Access-Control-Allow-Credentials', 'true'), ('Access-Control-Allow-Origin', 'http://localhost:8080')])<|docstring|>Hostname matches any port and adds headers.<|endoftext|> |
cb9893604dace50648302986d26ad66e9ed6f205ed8e5e578ee683620fd7ec36 | def test_regex(self):
'Regex matches origin and adds headers.'
middleware = cors.CORSMiddleware(debug_app, allowed_regexes=['.*'])
env = {'REQUEST_METHOD': 'OPTIONS', 'HTTP_ORIGIN': 'https://foo'}
wsgiref.util.setup_testing_defaults(env)
middleware(env.copy(), self.start_response)
six.assertCountEqual(self, self.headers, [('Access-Control-Allow-Methods', ', '.join(cors.CORSMiddleware.default_methods)), ('Access-Control-Allow-Headers', ', '.join(cors.CORSMiddleware.default_headers)), ('Access-Control-Allow-Credentials', 'true'), ('Access-Control-Allow-Origin', 'https://foo')]) | Regex matches origin and adds headers. | tests/test_middleware_cors.py | test_regex | checkmate/simpl | 6 | python | def test_regex(self):
middleware = cors.CORSMiddleware(debug_app, allowed_regexes=['.*'])
env = {'REQUEST_METHOD': 'OPTIONS', 'HTTP_ORIGIN': 'https://foo'}
wsgiref.util.setup_testing_defaults(env)
middleware(env.copy(), self.start_response)
six.assertCountEqual(self, self.headers, [('Access-Control-Allow-Methods', ', '.join(cors.CORSMiddleware.default_methods)), ('Access-Control-Allow-Headers', ', '.join(cors.CORSMiddleware.default_headers)), ('Access-Control-Allow-Credentials', 'true'), ('Access-Control-Allow-Origin', 'https://foo')]) | def test_regex(self):
middleware = cors.CORSMiddleware(debug_app, allowed_regexes=['.*'])
env = {'REQUEST_METHOD': 'OPTIONS', 'HTTP_ORIGIN': 'https://foo'}
wsgiref.util.setup_testing_defaults(env)
middleware(env.copy(), self.start_response)
six.assertCountEqual(self, self.headers, [('Access-Control-Allow-Methods', ', '.join(cors.CORSMiddleware.default_methods)), ('Access-Control-Allow-Headers', ', '.join(cors.CORSMiddleware.default_headers)), ('Access-Control-Allow-Credentials', 'true'), ('Access-Control-Allow-Origin', 'https://foo')])<|docstring|>Regex matches origin and adds headers.<|endoftext|> |
e5233e36159568d7381aedff219282cae48f58f000ca2a9cd74a43822b06ba5c | def test_conditional_import(self):
'Fail to init if webob not installed.'
with mock.patch('simpl.middleware.cors.webob', new=None):
app = cors.CORSMiddleware(None)
with self.assertRaises(RuntimeError):
app({}, self.start_response) | Fail to init if webob not installed. | tests/test_middleware_cors.py | test_conditional_import | checkmate/simpl | 6 | python | def test_conditional_import(self):
with mock.patch('simpl.middleware.cors.webob', new=None):
app = cors.CORSMiddleware(None)
with self.assertRaises(RuntimeError):
app({}, self.start_response) | def test_conditional_import(self):
with mock.patch('simpl.middleware.cors.webob', new=None):
app = cors.CORSMiddleware(None)
with self.assertRaises(RuntimeError):
app({}, self.start_response)<|docstring|>Fail to init if webob not installed.<|endoftext|> |
2ac2d979c6654b0861e4730048650f37ac536823e5127049db2332ad64ac805d | @skipIfWindows
@skipIfFreeBSD
@expectedFailureAll(oslist=['linux'], bugnumber='new failure after r256863')
def test_lldbmi_symbol_list_lines_file(self):
"Test that 'lldb-mi --interpreter' works for -symbol-list-lines when file exists."
self.spawnLldbMi(args=None)
self.runCmd(('-file-exec-and-symbols %s' % self.myexe))
self.expect('\\^done')
self.runCmd('-break-insert -f main')
self.expect('\\^done,bkpt={number="1"')
self.runCmd('-exec-run')
self.expect('\\^running')
self.expect('\\*stopped,reason="breakpoint-hit"')
self.runCmd('-data-evaluate-expression main')
self.expect('\\^done,value="0x[0-9a-f]+ \\(a.out`main at main.cpp:[0-9]+\\)"')
addr = int(self.child.after.split('"')[1].split(' ')[0], 16)
line = line_number('main.cpp', '// FUNC_main')
self.runCmd('-symbol-list-lines main.cpp')
self.expect(('\\^done,lines=\\[\\{pc="0x0*%x",line="%d"\\}(,\\{pc="0x[0-9a-f]+",line="\\d+"\\})+\\]' % (addr, line)))
sline = line_number('symbol_list_lines_inline_test2.cpp', '// FUNC_gfunc2')
eline = line_number('symbol_list_lines_inline_test2.cpp', '// END_gfunc2')
self.runCmd('-symbol-list-lines symbol_list_lines_inline_test2.cpp')
self.expect(('\\^done,lines=\\[\\{pc="0x[0-9a-f]+",line="%d"\\}(,\\{pc="0x[0-9a-f]+",line="3\\d"\\})*,\\{pc="0x[0-9a-f]+",line="%d"\\}(,\\{pc="0x[0-9a-f]+",line="3\\d"\\})*\\]' % (sline, eline)))
sline = line_number('symbol_list_lines_inline_test.h', '// FUNC_ifunc')
eline = line_number('symbol_list_lines_inline_test.h', '// FUNC_mfunc')
self.runCmd('-symbol-list-lines symbol_list_lines_inline_test.h')
self.expect(('\\^done,lines=\\[\\{pc="0x[0-9a-f]+",line="%d"\\}(,\\{pc="0x[0-9a-f]+",line="\\d"\\})*(,\\{pc="0x[0-9a-f]+",line="1\\d"\\})*,\\{pc="0x[0-9a-f]+",line="%d"\\}(,\\{pc="0x[0-9a-f]+",line="2\\d"\\})*\\]' % (sline, eline)))
self.runCmd('-symbol-list-lines unknown_file')
self.expect('\\^error,message="error: No source filenames matched \'unknown_file\'\\. "')
self.runCmd('-symbol-list-lines ./main.cpp')
self.expect('\\^error,message="error: No source filenames matched \'\\./main\\.cpp\'\\. "')
import os
path = os.path.join(os.getcwd(), 'main.cpp')
self.runCmd(('-symbol-list-lines "%s"' % path))
self.expect(('\\^done,lines=\\[\\{pc="0x0*%x",line="%d"\\}(,\\{pc="0x[0-9a-f]+",line="\\d+"\\})+\\]' % (addr, line)))
self.runCmd('-symbol-list-lines unknown_dir/main.cpp')
self.expect('\\^error,message="error: No source filenames matched \'unknown_dir/main\\.cpp\'\\. "') | Test that 'lldb-mi --interpreter' works for -symbol-list-lines when file exists. | SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/tools/lldb-mi/symbol/TestMiSymbol.py | test_lldbmi_symbol_list_lines_file | Polidea/SiriusObfuscator | 427 | python | @skipIfWindows
@skipIfFreeBSD
@expectedFailureAll(oslist=['linux'], bugnumber='new failure after r256863')
def test_lldbmi_symbol_list_lines_file(self):
self.spawnLldbMi(args=None)
self.runCmd(('-file-exec-and-symbols %s' % self.myexe))
self.expect('\\^done')
self.runCmd('-break-insert -f main')
self.expect('\\^done,bkpt={number="1"')
self.runCmd('-exec-run')
self.expect('\\^running')
self.expect('\\*stopped,reason="breakpoint-hit"')
self.runCmd('-data-evaluate-expression main')
self.expect('\\^done,value="0x[0-9a-f]+ \\(a.out`main at main.cpp:[0-9]+\\)"')
addr = int(self.child.after.split('"')[1].split(' ')[0], 16)
line = line_number('main.cpp', '// FUNC_main')
self.runCmd('-symbol-list-lines main.cpp')
self.expect(('\\^done,lines=\\[\\{pc="0x0*%x",line="%d"\\}(,\\{pc="0x[0-9a-f]+",line="\\d+"\\})+\\]' % (addr, line)))
sline = line_number('symbol_list_lines_inline_test2.cpp', '// FUNC_gfunc2')
eline = line_number('symbol_list_lines_inline_test2.cpp', '// END_gfunc2')
self.runCmd('-symbol-list-lines symbol_list_lines_inline_test2.cpp')
self.expect(('\\^done,lines=\\[\\{pc="0x[0-9a-f]+",line="%d"\\}(,\\{pc="0x[0-9a-f]+",line="3\\d"\\})*,\\{pc="0x[0-9a-f]+",line="%d"\\}(,\\{pc="0x[0-9a-f]+",line="3\\d"\\})*\\]' % (sline, eline)))
sline = line_number('symbol_list_lines_inline_test.h', '// FUNC_ifunc')
eline = line_number('symbol_list_lines_inline_test.h', '// FUNC_mfunc')
self.runCmd('-symbol-list-lines symbol_list_lines_inline_test.h')
self.expect(('\\^done,lines=\\[\\{pc="0x[0-9a-f]+",line="%d"\\}(,\\{pc="0x[0-9a-f]+",line="\\d"\\})*(,\\{pc="0x[0-9a-f]+",line="1\\d"\\})*,\\{pc="0x[0-9a-f]+",line="%d"\\}(,\\{pc="0x[0-9a-f]+",line="2\\d"\\})*\\]' % (sline, eline)))
self.runCmd('-symbol-list-lines unknown_file')
self.expect('\\^error,message="error: No source filenames matched \'unknown_file\'\\. "')
self.runCmd('-symbol-list-lines ./main.cpp')
self.expect('\\^error,message="error: No source filenames matched \'\\./main\\.cpp\'\\. "')
import os
path = os.path.join(os.getcwd(), 'main.cpp')
self.runCmd(('-symbol-list-lines "%s"' % path))
self.expect(('\\^done,lines=\\[\\{pc="0x0*%x",line="%d"\\}(,\\{pc="0x[0-9a-f]+",line="\\d+"\\})+\\]' % (addr, line)))
self.runCmd('-symbol-list-lines unknown_dir/main.cpp')
self.expect('\\^error,message="error: No source filenames matched \'unknown_dir/main\\.cpp\'\\. "') | @skipIfWindows
@skipIfFreeBSD
@expectedFailureAll(oslist=['linux'], bugnumber='new failure after r256863')
def test_lldbmi_symbol_list_lines_file(self):
self.spawnLldbMi(args=None)
self.runCmd(('-file-exec-and-symbols %s' % self.myexe))
self.expect('\\^done')
self.runCmd('-break-insert -f main')
self.expect('\\^done,bkpt={number="1"')
self.runCmd('-exec-run')
self.expect('\\^running')
self.expect('\\*stopped,reason="breakpoint-hit"')
self.runCmd('-data-evaluate-expression main')
self.expect('\\^done,value="0x[0-9a-f]+ \\(a.out`main at main.cpp:[0-9]+\\)"')
addr = int(self.child.after.split('"')[1].split(' ')[0], 16)
line = line_number('main.cpp', '// FUNC_main')
self.runCmd('-symbol-list-lines main.cpp')
self.expect(('\\^done,lines=\\[\\{pc="0x0*%x",line="%d"\\}(,\\{pc="0x[0-9a-f]+",line="\\d+"\\})+\\]' % (addr, line)))
sline = line_number('symbol_list_lines_inline_test2.cpp', '// FUNC_gfunc2')
eline = line_number('symbol_list_lines_inline_test2.cpp', '// END_gfunc2')
self.runCmd('-symbol-list-lines symbol_list_lines_inline_test2.cpp')
self.expect(('\\^done,lines=\\[\\{pc="0x[0-9a-f]+",line="%d"\\}(,\\{pc="0x[0-9a-f]+",line="3\\d"\\})*,\\{pc="0x[0-9a-f]+",line="%d"\\}(,\\{pc="0x[0-9a-f]+",line="3\\d"\\})*\\]' % (sline, eline)))
sline = line_number('symbol_list_lines_inline_test.h', '// FUNC_ifunc')
eline = line_number('symbol_list_lines_inline_test.h', '// FUNC_mfunc')
self.runCmd('-symbol-list-lines symbol_list_lines_inline_test.h')
self.expect(('\\^done,lines=\\[\\{pc="0x[0-9a-f]+",line="%d"\\}(,\\{pc="0x[0-9a-f]+",line="\\d"\\})*(,\\{pc="0x[0-9a-f]+",line="1\\d"\\})*,\\{pc="0x[0-9a-f]+",line="%d"\\}(,\\{pc="0x[0-9a-f]+",line="2\\d"\\})*\\]' % (sline, eline)))
self.runCmd('-symbol-list-lines unknown_file')
self.expect('\\^error,message="error: No source filenames matched \'unknown_file\'\\. "')
self.runCmd('-symbol-list-lines ./main.cpp')
self.expect('\\^error,message="error: No source filenames matched \'\\./main\\.cpp\'\\. "')
import os
path = os.path.join(os.getcwd(), 'main.cpp')
self.runCmd(('-symbol-list-lines "%s"' % path))
self.expect(('\\^done,lines=\\[\\{pc="0x0*%x",line="%d"\\}(,\\{pc="0x[0-9a-f]+",line="\\d+"\\})+\\]' % (addr, line)))
self.runCmd('-symbol-list-lines unknown_dir/main.cpp')
self.expect('\\^error,message="error: No source filenames matched \'unknown_dir/main\\.cpp\'\\. "')<|docstring|>Test that 'lldb-mi --interpreter' works for -symbol-list-lines when file exists.<|endoftext|> |
faa7acea4b2b428d30b1969edb5b8dfefb94c33a646f43cdbd0487d573eb6cb0 | def colorWipe(self, color, wait_ms=100):
' wipe color across display pixel a time '
for i in range(self.stick.numPixels()):
self.stick.setPixelColor(i, color)
self.stick.show()
time.sleep(1.5)
for i in range(self.stick.numPixels()):
self.stick.setPixelColor(i, Color(0, 0, 0))
self.stick.show()
time.sleep(0.5) | wipe color across display pixel a time | python/states/standalone/buttonPress.py | colorWipe | dride/dride-indicators | 0 | python | def colorWipe(self, color, wait_ms=100):
' '
for i in range(self.stick.numPixels()):
self.stick.setPixelColor(i, color)
self.stick.show()
time.sleep(1.5)
for i in range(self.stick.numPixels()):
self.stick.setPixelColor(i, Color(0, 0, 0))
self.stick.show()
time.sleep(0.5) | def colorWipe(self, color, wait_ms=100):
' '
for i in range(self.stick.numPixels()):
self.stick.setPixelColor(i, color)
self.stick.show()
time.sleep(1.5)
for i in range(self.stick.numPixels()):
self.stick.setPixelColor(i, Color(0, 0, 0))
self.stick.show()
time.sleep(0.5)<|docstring|>wipe color across display pixel a time<|endoftext|> |
01d82ef235a222d59a202d99ac4aa27f36525041ff4ef6ec6cbad83d371fad2d | def execute_task(task):
' Evaluate a nested task\n\n >>> inc = lambda x: x + 1\n >>> execute_task((inc, 1))\n 2\n >>> execute_task((sum, [1, 2, (inc, 3)]))\n 7\n '
if istask(task):
(func, args) = (task[0], task[1:])
return func(*map(execute_task, args))
elif isinstance(task, list):
return list(map(execute_task, task))
else:
return task | Evaluate a nested task
>>> inc = lambda x: x + 1
>>> execute_task((inc, 1))
2
>>> execute_task((sum, [1, 2, (inc, 3)]))
7 | distributed/worker.py | execute_task | gdementen/distributed | 1 | python | def execute_task(task):
' Evaluate a nested task\n\n >>> inc = lambda x: x + 1\n >>> execute_task((inc, 1))\n 2\n >>> execute_task((sum, [1, 2, (inc, 3)]))\n 7\n '
if istask(task):
(func, args) = (task[0], task[1:])
return func(*map(execute_task, args))
elif isinstance(task, list):
return list(map(execute_task, task))
else:
return task | def execute_task(task):
' Evaluate a nested task\n\n >>> inc = lambda x: x + 1\n >>> execute_task((inc, 1))\n 2\n >>> execute_task((sum, [1, 2, (inc, 3)]))\n 7\n '
if istask(task):
(func, args) = (task[0], task[1:])
return func(*map(execute_task, args))
elif isinstance(task, list):
return list(map(execute_task, task))
else:
return task<|docstring|>Evaluate a nested task
>>> inc = lambda x: x + 1
>>> execute_task((inc, 1))
2
>>> execute_task((sum, [1, 2, (inc, 3)]))
7<|endoftext|> |
bacf204f5b897109a90311cc113fbd692832ea2160232dc8746fcf8ce8ed7236 | def dumps_function(func):
' Dump a function to bytes, cache functions '
if (func not in cache):
b = dumps(func)
cache[func] = b
return cache[func] | Dump a function to bytes, cache functions | distributed/worker.py | dumps_function | gdementen/distributed | 1 | python | def dumps_function(func):
' '
if (func not in cache):
b = dumps(func)
cache[func] = b
return cache[func] | def dumps_function(func):
' '
if (func not in cache):
b = dumps(func)
cache[func] = b
return cache[func]<|docstring|>Dump a function to bytes, cache functions<|endoftext|> |
9e2a7bd28a39a9ef615502707ba8e6cfee53741b0ee8c1e7f7081de411aba499 | def dumps_task(task):
" Serialize a dask task\n\n Returns a dict of bytestrings that can each be loaded with ``loads``\n\n Examples\n --------\n Either returns a task as a function, args, kwargs dict\n\n >>> from operator import add\n >>> dumps_task((add, 1)) # doctest: +SKIP\n {'function': b'\x80\x04\x95\x00\x8c\t_operator\x94\x8c\x03add\x94\x93\x94.'\n 'args': b'\x80\x04\x95\x07\x00\x00\x00K\x01K\x02\x86\x94.'}\n\n Or as a single task blob if it can't easily decompose the result. This\n happens either if the task is highly nested, or if it isn't a task at all\n\n >>> dumps_task(1) # doctest: +SKIP\n {'task': b'\x80\x04\x95\x03\x00\x00\x00\x00\x00\x00\x00K\x01.'}\n "
if istask(task):
if ((task[0] is apply) and (not any(map(_maybe_complex, task[2:])))):
d = {'function': dumps_function(task[1]), 'args': dumps(task[2])}
if (len(task) == 4):
d['kwargs'] = dumps(task[3])
return d
elif (not any(map(_maybe_complex, task[1:]))):
return {'function': dumps_function(task[0]), 'args': dumps(task[1:])}
return {'task': dumps(task)} | Serialize a dask task
Returns a dict of bytestrings that can each be loaded with ``loads``
Examples
--------
Either returns a task as a function, args, kwargs dict
>>> from operator import add
>>> dumps_task((add, 1)) # doctest: +SKIP
{'function': b' _operatoradd.'
'args': b' KK.'}
Or as a single task blob if it can't easily decompose the result. This
happens either if the task is highly nested, or if it isn't a task at all
>>> dumps_task(1) # doctest: +SKIP
{'task': b' K.'} | distributed/worker.py | dumps_task | gdementen/distributed | 1 | python | def dumps_task(task):
" Serialize a dask task\n\n Returns a dict of bytestrings that can each be loaded with ``loads``\n\n Examples\n --------\n Either returns a task as a function, args, kwargs dict\n\n >>> from operator import add\n >>> dumps_task((add, 1)) # doctest: +SKIP\n {'function': b'\x80\x04\x95\x00\x8c\t_operator\x94\x8c\x03add\x94\x93\x94.'\n 'args': b'\x80\x04\x95\x07\x00\x00\x00K\x01K\x02\x86\x94.'}\n\n Or as a single task blob if it can't easily decompose the result. This\n happens either if the task is highly nested, or if it isn't a task at all\n\n >>> dumps_task(1) # doctest: +SKIP\n {'task': b'\x80\x04\x95\x03\x00\x00\x00\x00\x00\x00\x00K\x01.'}\n "
if istask(task):
if ((task[0] is apply) and (not any(map(_maybe_complex, task[2:])))):
d = {'function': dumps_function(task[1]), 'args': dumps(task[2])}
if (len(task) == 4):
d['kwargs'] = dumps(task[3])
return d
elif (not any(map(_maybe_complex, task[1:]))):
return {'function': dumps_function(task[0]), 'args': dumps(task[1:])}
return {'task': dumps(task)} | def dumps_task(task):
" Serialize a dask task\n\n Returns a dict of bytestrings that can each be loaded with ``loads``\n\n Examples\n --------\n Either returns a task as a function, args, kwargs dict\n\n >>> from operator import add\n >>> dumps_task((add, 1)) # doctest: +SKIP\n {'function': b'\x80\x04\x95\x00\x8c\t_operator\x94\x8c\x03add\x94\x93\x94.'\n 'args': b'\x80\x04\x95\x07\x00\x00\x00K\x01K\x02\x86\x94.'}\n\n Or as a single task blob if it can't easily decompose the result. This\n happens either if the task is highly nested, or if it isn't a task at all\n\n >>> dumps_task(1) # doctest: +SKIP\n {'task': b'\x80\x04\x95\x03\x00\x00\x00\x00\x00\x00\x00K\x01.'}\n "
if istask(task):
if ((task[0] is apply) and (not any(map(_maybe_complex, task[2:])))):
d = {'function': dumps_function(task[1]), 'args': dumps(task[2])}
if (len(task) == 4):
d['kwargs'] = dumps(task[3])
return d
elif (not any(map(_maybe_complex, task[1:]))):
return {'function': dumps_function(task[0]), 'args': dumps(task[1:])}
return {'task': dumps(task)}<|docstring|>Serialize a dask task
Returns a dict of bytestrings that can each be loaded with ``loads``
Examples
--------
Either returns a task as a function, args, kwargs dict
>>> from operator import add
>>> dumps_task((add, 1)) # doctest: +SKIP
{'function': b' _operatoradd.'
'args': b' KK.'}
Or as a single task blob if it can't easily decompose the result. This
happens either if the task is highly nested, or if it isn't a task at all
>>> dumps_task(1) # doctest: +SKIP
{'task': b' K.'}<|endoftext|> |
9ca851554f4403b36f02be268292663ba305e46ebd3e94ea442269a5da00a391 | def apply_function(function, args, kwargs):
' Run a function, collect information\n\n Returns\n -------\n msg: dictionary with status, result/error, timings, etc..\n '
start = time()
try:
result = function(*args, **kwargs)
except Exception as e:
msg = error_message(e)
else:
msg = {'status': 'OK', 'result': result, 'nbytes': sizeof(result), 'type': (dumps_function(type(result)) if (result is not None) else None)}
finally:
end = time()
msg['compute_start'] = start
msg['compute_stop'] = end
msg['thread'] = current_thread().ident
return msg | Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc.. | distributed/worker.py | apply_function | gdementen/distributed | 1 | python | def apply_function(function, args, kwargs):
' Run a function, collect information\n\n Returns\n -------\n msg: dictionary with status, result/error, timings, etc..\n '
start = time()
try:
result = function(*args, **kwargs)
except Exception as e:
msg = error_message(e)
else:
msg = {'status': 'OK', 'result': result, 'nbytes': sizeof(result), 'type': (dumps_function(type(result)) if (result is not None) else None)}
finally:
end = time()
msg['compute_start'] = start
msg['compute_stop'] = end
msg['thread'] = current_thread().ident
return msg | def apply_function(function, args, kwargs):
' Run a function, collect information\n\n Returns\n -------\n msg: dictionary with status, result/error, timings, etc..\n '
start = time()
try:
result = function(*args, **kwargs)
except Exception as e:
msg = error_message(e)
else:
msg = {'status': 'OK', 'result': result, 'nbytes': sizeof(result), 'type': (dumps_function(type(result)) if (result is not None) else None)}
finally:
end = time()
msg['compute_start'] = start
msg['compute_stop'] = end
msg['thread'] = current_thread().ident
return msg<|docstring|>Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..<|endoftext|> |
293ef1ba41caabe528c8b717ab9976f15ce36ac40f87d4e3e3f33944bc8083e5 | def get_msg_safe_str(msg):
' Make a worker msg, which contains args and kwargs, safe to cast to str:\n allowing for some arguments to raise exceptions during conversion and\n ignoring them.\n '
class Repr(object):
def __init__(self, f, val):
self._f = f
self._val = val
def __repr__(self):
return self._f(self._val)
msg = msg.copy()
if ('args' in msg):
msg['args'] = Repr(convert_args_to_str, msg['args'])
if ('kwargs' in msg):
msg['kwargs'] = Repr(convert_kwargs_to_str, msg['kwargs'])
return msg | Make a worker msg, which contains args and kwargs, safe to cast to str:
allowing for some arguments to raise exceptions during conversion and
ignoring them. | distributed/worker.py | get_msg_safe_str | gdementen/distributed | 1 | python | def get_msg_safe_str(msg):
' Make a worker msg, which contains args and kwargs, safe to cast to str:\n allowing for some arguments to raise exceptions during conversion and\n ignoring them.\n '
class Repr(object):
def __init__(self, f, val):
self._f = f
self._val = val
def __repr__(self):
return self._f(self._val)
msg = msg.copy()
if ('args' in msg):
msg['args'] = Repr(convert_args_to_str, msg['args'])
if ('kwargs' in msg):
msg['kwargs'] = Repr(convert_kwargs_to_str, msg['kwargs'])
return msg | def get_msg_safe_str(msg):
' Make a worker msg, which contains args and kwargs, safe to cast to str:\n allowing for some arguments to raise exceptions during conversion and\n ignoring them.\n '
class Repr(object):
def __init__(self, f, val):
self._f = f
self._val = val
def __repr__(self):
return self._f(self._val)
msg = msg.copy()
if ('args' in msg):
msg['args'] = Repr(convert_args_to_str, msg['args'])
if ('kwargs' in msg):
msg['kwargs'] = Repr(convert_kwargs_to_str, msg['kwargs'])
return msg<|docstring|>Make a worker msg, which contains args and kwargs, safe to cast to str:
allowing for some arguments to raise exceptions during conversion and
ignoring them.<|endoftext|> |
73d97c716f6fbdd766ed4e72e77a9313de2e29e39cfa5b20da8c221b3725f567 | def convert_args_to_str(args, max_len=None):
' Convert args to a string, allowing for some arguments to raise\n exceptions during conversion and ignoring them.\n '
length = 0
strs = ['' for i in range(len(args))]
for (i, arg) in enumerate(args):
try:
sarg = repr(arg)
except:
sarg = '< could not convert arg to str >'
strs[i] = sarg
length += (len(sarg) + 2)
if ((max_len is not None) and (length > max_len)):
return '({}'.format(', '.join(strs[:(i + 1)]))[:max_len]
else:
return '({})'.format(', '.join(strs)) | Convert args to a string, allowing for some arguments to raise
exceptions during conversion and ignoring them. | distributed/worker.py | convert_args_to_str | gdementen/distributed | 1 | python | def convert_args_to_str(args, max_len=None):
' Convert args to a string, allowing for some arguments to raise\n exceptions during conversion and ignoring them.\n '
length = 0
strs = [ for i in range(len(args))]
for (i, arg) in enumerate(args):
try:
sarg = repr(arg)
except:
sarg = '< could not convert arg to str >'
strs[i] = sarg
length += (len(sarg) + 2)
if ((max_len is not None) and (length > max_len)):
return '({}'.format(', '.join(strs[:(i + 1)]))[:max_len]
else:
return '({})'.format(', '.join(strs)) | def convert_args_to_str(args, max_len=None):
' Convert args to a string, allowing for some arguments to raise\n exceptions during conversion and ignoring them.\n '
length = 0
strs = [ for i in range(len(args))]
for (i, arg) in enumerate(args):
try:
sarg = repr(arg)
except:
sarg = '< could not convert arg to str >'
strs[i] = sarg
length += (len(sarg) + 2)
if ((max_len is not None) and (length > max_len)):
return '({}'.format(', '.join(strs[:(i + 1)]))[:max_len]
else:
return '({})'.format(', '.join(strs))<|docstring|>Convert args to a string, allowing for some arguments to raise
exceptions during conversion and ignoring them.<|endoftext|> |
1c3290b3deaf87ccb99d912cc6b98737506c55fa212b178293869a350ebf641e | def convert_kwargs_to_str(kwargs, max_len=None):
' Convert kwargs to a string, allowing for some arguments to raise\n exceptions during conversion and ignoring them.\n '
length = 0
strs = ['' for i in range(len(kwargs))]
for (i, (argname, arg)) in enumerate(kwargs.items()):
try:
sarg = repr(arg)
except:
sarg = '< could not convert arg to str >'
skwarg = ((repr(argname) + ': ') + sarg)
strs[i] = skwarg
length += (len(skwarg) + 2)
if ((max_len is not None) and (length > max_len)):
return '{{{}'.format(', '.join(strs[:(i + 1)]))[:max_len]
else:
return '{{{}}}'.format(', '.join(strs)) | Convert kwargs to a string, allowing for some arguments to raise
exceptions during conversion and ignoring them. | distributed/worker.py | convert_kwargs_to_str | gdementen/distributed | 1 | python | def convert_kwargs_to_str(kwargs, max_len=None):
' Convert kwargs to a string, allowing for some arguments to raise\n exceptions during conversion and ignoring them.\n '
length = 0
strs = [ for i in range(len(kwargs))]
for (i, (argname, arg)) in enumerate(kwargs.items()):
try:
sarg = repr(arg)
except:
sarg = '< could not convert arg to str >'
skwarg = ((repr(argname) + ': ') + sarg)
strs[i] = skwarg
length += (len(skwarg) + 2)
if ((max_len is not None) and (length > max_len)):
return '{{{}'.format(', '.join(strs[:(i + 1)]))[:max_len]
else:
return '{{{}}}'.format(', '.join(strs)) | def convert_kwargs_to_str(kwargs, max_len=None):
' Convert kwargs to a string, allowing for some arguments to raise\n exceptions during conversion and ignoring them.\n '
length = 0
strs = [ for i in range(len(kwargs))]
for (i, (argname, arg)) in enumerate(kwargs.items()):
try:
sarg = repr(arg)
except:
sarg = '< could not convert arg to str >'
skwarg = ((repr(argname) + ': ') + sarg)
strs[i] = skwarg
length += (len(skwarg) + 2)
if ((max_len is not None) and (length > max_len)):
return '{{{}'.format(', '.join(strs[:(i + 1)]))[:max_len]
else:
return '{{{}}}'.format(', '.join(strs))<|docstring|>Convert kwargs to a string, allowing for some arguments to raise
exceptions during conversion and ignoring them.<|endoftext|> |
25de69ce27a61e6ae062101a0b69a60847b010b37e7abb6c8a895d6e6d931658 | def deserialize(self, function=None, args=None, kwargs=None, task=None):
' Deserialize task inputs and regularize to func, args, kwargs '
if (task is not None):
task = loads(task)
if (function is not None):
function = loads(function)
if args:
args = loads(args)
if kwargs:
kwargs = loads(kwargs)
if (task is not None):
assert ((not function) and (not args) and (not kwargs))
function = execute_task
args = (task,)
return (function, (args or ()), (kwargs or {})) | Deserialize task inputs and regularize to func, args, kwargs | distributed/worker.py | deserialize | gdementen/distributed | 1 | python | def deserialize(self, function=None, args=None, kwargs=None, task=None):
' '
if (task is not None):
task = loads(task)
if (function is not None):
function = loads(function)
if args:
args = loads(args)
if kwargs:
kwargs = loads(kwargs)
if (task is not None):
assert ((not function) and (not args) and (not kwargs))
function = execute_task
args = (task,)
return (function, (args or ()), (kwargs or {})) | def deserialize(self, function=None, args=None, kwargs=None, task=None):
' '
if (task is not None):
task = loads(task)
if (function is not None):
function = loads(function)
if args:
args = loads(args)
if kwargs:
kwargs = loads(kwargs)
if (task is not None):
assert ((not function) and (not args) and (not kwargs))
function = execute_task
args = (task,)
return (function, (args or ()), (kwargs or {}))<|docstring|>Deserialize task inputs and regularize to func, args, kwargs<|endoftext|> |
5f0f6a54984a68873b0e5ca581c902f9458e1e2c1361deca46f830c56c7250cb | @gen.coroutine
def gather_many(self, msgs):
" Gather the data for many compute messages at once\n\n Returns\n -------\n good: the input messages for which we have data\n bad: a dict of task keys for which we could not find data\n data: The scope in which to run tasks\n len(remote): the number of new keys we've gathered\n "
who_has = merge((msg['who_has'] for msg in msgs if ('who_has' in msg)))
local = {k: self.data[k] for k in who_has if (k in self.data)}
who_has = {k: v for (k, v) in who_has.items() if (k not in local)}
(remote, bad_data) = (yield gather_from_workers(who_has, permissive=True, rpc=self.rpc, close=False))
if remote:
self.data.update(remote)
(yield self.scheduler.add_keys(address=self.address, keys=list(remote)))
data = merge(local, remote)
if bad_data:
missing = {msg['key']: {k for k in msg['who_has'] if (k in bad_data)} for msg in msgs if ('who_has' in msg)}
bad = {k: v for (k, v) in missing.items() if v}
good = [msg for msg in msgs if (not missing.get(msg['key']))]
else:
(good, bad) = (msgs, {})
raise Return([good, bad, data, len(remote)]) | Gather the data for many compute messages at once
Returns
-------
good: the input messages for which we have data
bad: a dict of task keys for which we could not find data
data: The scope in which to run tasks
len(remote): the number of new keys we've gathered | distributed/worker.py | gather_many | gdementen/distributed | 1 | python | @gen.coroutine
def gather_many(self, msgs):
" Gather the data for many compute messages at once\n\n Returns\n -------\n good: the input messages for which we have data\n bad: a dict of task keys for which we could not find data\n data: The scope in which to run tasks\n len(remote): the number of new keys we've gathered\n "
who_has = merge((msg['who_has'] for msg in msgs if ('who_has' in msg)))
local = {k: self.data[k] for k in who_has if (k in self.data)}
who_has = {k: v for (k, v) in who_has.items() if (k not in local)}
(remote, bad_data) = (yield gather_from_workers(who_has, permissive=True, rpc=self.rpc, close=False))
if remote:
self.data.update(remote)
(yield self.scheduler.add_keys(address=self.address, keys=list(remote)))
data = merge(local, remote)
if bad_data:
missing = {msg['key']: {k for k in msg['who_has'] if (k in bad_data)} for msg in msgs if ('who_has' in msg)}
bad = {k: v for (k, v) in missing.items() if v}
good = [msg for msg in msgs if (not missing.get(msg['key']))]
else:
(good, bad) = (msgs, {})
raise Return([good, bad, data, len(remote)]) | @gen.coroutine
def gather_many(self, msgs):
" Gather the data for many compute messages at once\n\n Returns\n -------\n good: the input messages for which we have data\n bad: a dict of task keys for which we could not find data\n data: The scope in which to run tasks\n len(remote): the number of new keys we've gathered\n "
who_has = merge((msg['who_has'] for msg in msgs if ('who_has' in msg)))
local = {k: self.data[k] for k in who_has if (k in self.data)}
who_has = {k: v for (k, v) in who_has.items() if (k not in local)}
(remote, bad_data) = (yield gather_from_workers(who_has, permissive=True, rpc=self.rpc, close=False))
if remote:
self.data.update(remote)
(yield self.scheduler.add_keys(address=self.address, keys=list(remote)))
data = merge(local, remote)
if bad_data:
missing = {msg['key']: {k for k in msg['who_has'] if (k in bad_data)} for msg in msgs if ('who_has' in msg)}
bad = {k: v for (k, v) in missing.items() if v}
good = [msg for msg in msgs if (not missing.get(msg['key']))]
else:
(good, bad) = (msgs, {})
raise Return([good, bad, data, len(remote)])<|docstring|>Gather the data for many compute messages at once
Returns
-------
good: the input messages for which we have data
bad: a dict of task keys for which we could not find data
data: The scope in which to run tasks
len(remote): the number of new keys we've gathered<|endoftext|> |
a9562a4e75f87cb8be4581e4a744f8a7250aa380e3d6728a481cbb0bb8598e66 | @gen.coroutine
def executor_submit(self, key, function, *args, **kwargs):
" Safely run function in thread pool executor\n\n We've run into issues running concurrent.future futures within\n tornado. Apparently it's advantageous to use timeouts and periodic\n callbacks to ensure things run smoothly. This can get tricky, so we\n pull it off into an separate method.\n "
job_counter[0] += 1
future = self.executor.submit(function, *args, **kwargs)
pc = PeriodicCallback((lambda : logger.debug('future state: %s - %s', key, future._state)), 1000, io_loop=self.loop)
pc.start()
try:
(yield future)
finally:
pc.stop()
pass
result = future.result()
raise gen.Return(result) | Safely run function in thread pool executor
We've run into issues running concurrent.future futures within
tornado. Apparently it's advantageous to use timeouts and periodic
callbacks to ensure things run smoothly. This can get tricky, so we
pull it off into an separate method. | distributed/worker.py | executor_submit | gdementen/distributed | 1 | python | @gen.coroutine
def executor_submit(self, key, function, *args, **kwargs):
" Safely run function in thread pool executor\n\n We've run into issues running concurrent.future futures within\n tornado. Apparently it's advantageous to use timeouts and periodic\n callbacks to ensure things run smoothly. This can get tricky, so we\n pull it off into an separate method.\n "
job_counter[0] += 1
future = self.executor.submit(function, *args, **kwargs)
pc = PeriodicCallback((lambda : logger.debug('future state: %s - %s', key, future._state)), 1000, io_loop=self.loop)
pc.start()
try:
(yield future)
finally:
pc.stop()
pass
result = future.result()
raise gen.Return(result) | @gen.coroutine
def executor_submit(self, key, function, *args, **kwargs):
" Safely run function in thread pool executor\n\n We've run into issues running concurrent.future futures within\n tornado. Apparently it's advantageous to use timeouts and periodic\n callbacks to ensure things run smoothly. This can get tricky, so we\n pull it off into an separate method.\n "
job_counter[0] += 1
future = self.executor.submit(function, *args, **kwargs)
pc = PeriodicCallback((lambda : logger.debug('future state: %s - %s', key, future._state)), 1000, io_loop=self.loop)
pc.start()
try:
(yield future)
finally:
pc.stop()
pass
result = future.result()
raise gen.Return(result)<|docstring|>Safely run function in thread pool executor
We've run into issues running concurrent.future futures within
tornado. Apparently it's advantageous to use timeouts and periodic
callbacks to ensure things run smoothly. This can get tricky, so we
pull it off into an separate method.<|endoftext|> |
0b5a5baf33c2fe4a102c354e8ed864e81a1c018d481a6639fa5b4ecdba75c7fc | @gen.coroutine
def compute(self, stream=None, function=None, key=None, args=(), kwargs={}, task=None, who_has=None, report=True):
' Execute function '
self.active.add(key)
msg = (yield self._ready_task(function=function, key=key, args=args, kwargs=kwargs, task=task, who_has=who_has))
if (msg['status'] != 'OK'):
try:
self.active.remove(key)
except KeyError:
pass
raise Return(msg)
else:
function = msg['function']
args = msg['args']
kwargs = msg['kwargs']
result = (yield self.executor_submit(key, apply_function, function, args, kwargs))
result['key'] = key
result.update(msg['diagnostics'])
if (result['status'] == 'OK'):
self.data[key] = result.pop('result')
if report:
response = (yield self.scheduler.add_keys(address=(self.ip, self.port), keys=[key]))
if (not (response == 'OK')):
logger.warn('Could not report results to scheduler: %s', str(response))
else:
logger.warn(' Compute Failed\nFunction: %s\nargs: %s\nkwargs: %s\n', str(funcname(function))[:1000], convert_args_to_str(args, max_len=1000), convert_kwargs_to_str(kwargs, max_len=1000), exc_info=True)
logger.debug('Send compute response to scheduler: %s, %s', key, get_msg_safe_str(msg))
try:
self.active.remove(key)
except KeyError:
pass
raise Return(result) | Execute function | distributed/worker.py | compute | gdementen/distributed | 1 | python | @gen.coroutine
def compute(self, stream=None, function=None, key=None, args=(), kwargs={}, task=None, who_has=None, report=True):
' '
self.active.add(key)
msg = (yield self._ready_task(function=function, key=key, args=args, kwargs=kwargs, task=task, who_has=who_has))
if (msg['status'] != 'OK'):
try:
self.active.remove(key)
except KeyError:
pass
raise Return(msg)
else:
function = msg['function']
args = msg['args']
kwargs = msg['kwargs']
result = (yield self.executor_submit(key, apply_function, function, args, kwargs))
result['key'] = key
result.update(msg['diagnostics'])
if (result['status'] == 'OK'):
self.data[key] = result.pop('result')
if report:
response = (yield self.scheduler.add_keys(address=(self.ip, self.port), keys=[key]))
if (not (response == 'OK')):
logger.warn('Could not report results to scheduler: %s', str(response))
else:
logger.warn(' Compute Failed\nFunction: %s\nargs: %s\nkwargs: %s\n', str(funcname(function))[:1000], convert_args_to_str(args, max_len=1000), convert_kwargs_to_str(kwargs, max_len=1000), exc_info=True)
logger.debug('Send compute response to scheduler: %s, %s', key, get_msg_safe_str(msg))
try:
self.active.remove(key)
except KeyError:
pass
raise Return(result) | @gen.coroutine
def compute(self, stream=None, function=None, key=None, args=(), kwargs={}, task=None, who_has=None, report=True):
' '
self.active.add(key)
msg = (yield self._ready_task(function=function, key=key, args=args, kwargs=kwargs, task=task, who_has=who_has))
if (msg['status'] != 'OK'):
try:
self.active.remove(key)
except KeyError:
pass
raise Return(msg)
else:
function = msg['function']
args = msg['args']
kwargs = msg['kwargs']
result = (yield self.executor_submit(key, apply_function, function, args, kwargs))
result['key'] = key
result.update(msg['diagnostics'])
if (result['status'] == 'OK'):
self.data[key] = result.pop('result')
if report:
response = (yield self.scheduler.add_keys(address=(self.ip, self.port), keys=[key]))
if (not (response == 'OK')):
logger.warn('Could not report results to scheduler: %s', str(response))
else:
logger.warn(' Compute Failed\nFunction: %s\nargs: %s\nkwargs: %s\n', str(funcname(function))[:1000], convert_args_to_str(args, max_len=1000), convert_kwargs_to_str(kwargs, max_len=1000), exc_info=True)
logger.debug('Send compute response to scheduler: %s, %s', key, get_msg_safe_str(msg))
try:
self.active.remove(key)
except KeyError:
pass
raise Return(result)<|docstring|>Execute function<|endoftext|> |
c5479042cdad4efeb9f00b6762a5471b13a8527feb0d9f0a5433b7ac9fc0255e | def start_ipython(self, stream):
'Start an IPython kernel\n\n Returns Jupyter connection info dictionary.\n '
if (self._ipython_kernel is None):
self._ipython_kernel = self._start_ipython()
return self._ipython_kernel.get_connection_info() | Start an IPython kernel
Returns Jupyter connection info dictionary. | distributed/worker.py | start_ipython | gdementen/distributed | 1 | python | def start_ipython(self, stream):
'Start an IPython kernel\n\n Returns Jupyter connection info dictionary.\n '
if (self._ipython_kernel is None):
self._ipython_kernel = self._start_ipython()
return self._ipython_kernel.get_connection_info() | def start_ipython(self, stream):
'Start an IPython kernel\n\n Returns Jupyter connection info dictionary.\n '
if (self._ipython_kernel is None):
self._ipython_kernel = self._start_ipython()
return self._ipython_kernel.get_connection_info()<|docstring|>Start an IPython kernel
Returns Jupyter connection info dictionary.<|endoftext|> |
ad31f832da57a638aa5900cb4cef47d28f80a314093640cbac4b541c77f61d83 | def host_health(self, stream=None):
' Information about worker '
d = {'time': time()}
try:
import psutil
mem = psutil.virtual_memory()
d.update({'cpu': psutil.cpu_percent(), 'memory': mem.total, 'memory-percent': mem.percent})
net_io = psutil.net_io_counters()
if self._last_net_io:
d['network-send'] = (net_io.bytes_sent - self._last_net_io.bytes_sent)
d['network-recv'] = (net_io.bytes_recv - self._last_net_io.bytes_recv)
else:
d['network-send'] = 0
d['network-recv'] = 0
self._last_net_io = net_io
try:
disk_io = psutil.disk_io_counters()
except RuntimeError:
pass
else:
if self._last_disk_io:
d['disk-read'] = (disk_io.read_bytes - self._last_disk_io.read_bytes)
d['disk-write'] = (disk_io.write_bytes - self._last_disk_io.write_bytes)
else:
d['disk-read'] = 0
d['disk-write'] = 0
self._last_disk_io = disk_io
except ImportError:
pass
return d | Information about worker | distributed/worker.py | host_health | gdementen/distributed | 1 | python | def host_health(self, stream=None):
' '
d = {'time': time()}
try:
import psutil
mem = psutil.virtual_memory()
d.update({'cpu': psutil.cpu_percent(), 'memory': mem.total, 'memory-percent': mem.percent})
net_io = psutil.net_io_counters()
if self._last_net_io:
d['network-send'] = (net_io.bytes_sent - self._last_net_io.bytes_sent)
d['network-recv'] = (net_io.bytes_recv - self._last_net_io.bytes_recv)
else:
d['network-send'] = 0
d['network-recv'] = 0
self._last_net_io = net_io
try:
disk_io = psutil.disk_io_counters()
except RuntimeError:
pass
else:
if self._last_disk_io:
d['disk-read'] = (disk_io.read_bytes - self._last_disk_io.read_bytes)
d['disk-write'] = (disk_io.write_bytes - self._last_disk_io.write_bytes)
else:
d['disk-read'] = 0
d['disk-write'] = 0
self._last_disk_io = disk_io
except ImportError:
pass
return d | def host_health(self, stream=None):
' '
d = {'time': time()}
try:
import psutil
mem = psutil.virtual_memory()
d.update({'cpu': psutil.cpu_percent(), 'memory': mem.total, 'memory-percent': mem.percent})
net_io = psutil.net_io_counters()
if self._last_net_io:
d['network-send'] = (net_io.bytes_sent - self._last_net_io.bytes_sent)
d['network-recv'] = (net_io.bytes_recv - self._last_net_io.bytes_recv)
else:
d['network-send'] = 0
d['network-recv'] = 0
self._last_net_io = net_io
try:
disk_io = psutil.disk_io_counters()
except RuntimeError:
pass
else:
if self._last_disk_io:
d['disk-read'] = (disk_io.read_bytes - self._last_disk_io.read_bytes)
d['disk-write'] = (disk_io.write_bytes - self._last_disk_io.write_bytes)
else:
d['disk-read'] = 0
d['disk-write'] = 0
self._last_disk_io = disk_io
except ImportError:
pass
return d<|docstring|>Information about worker<|endoftext|> |
e0ff678c32645f0a358463f544f97396a8ca3c8231af43392e85982d1fde9a5f | def parse(db: database.Database, logfile: pathlib.Path) -> None:
'\n Parse as much information as possible from the given log file into a DB.\n\n :param db:\n Target database to populate.\n :param logfile:\n Source jam log file containing debug output.\n\n '
for parser_cls in [DCParser, DDParser, DMParser]:
print('Running {}'.format(parser_cls.__name__))
with open(logfile) as logs:
parser_cls(db).parse(logs) | Parse as much information as possible from the given log file into a DB.
:param db:
Target database to populate.
:param logfile:
Source jam log file containing debug output. | jamjar/parsers/__init__.py | parse | ensoft/jamjar | 0 | python | def parse(db: database.Database, logfile: pathlib.Path) -> None:
'\n Parse as much information as possible from the given log file into a DB.\n\n :param db:\n Target database to populate.\n :param logfile:\n Source jam log file containing debug output.\n\n '
for parser_cls in [DCParser, DDParser, DMParser]:
print('Running {}'.format(parser_cls.__name__))
with open(logfile) as logs:
parser_cls(db).parse(logs) | def parse(db: database.Database, logfile: pathlib.Path) -> None:
'\n Parse as much information as possible from the given log file into a DB.\n\n :param db:\n Target database to populate.\n :param logfile:\n Source jam log file containing debug output.\n\n '
for parser_cls in [DCParser, DDParser, DMParser]:
print('Running {}'.format(parser_cls.__name__))
with open(logfile) as logs:
parser_cls(db).parse(logs)<|docstring|>Parse as much information as possible from the given log file into a DB.
:param db:
Target database to populate.
:param logfile:
Source jam log file containing debug output.<|endoftext|> |
7ebaa446d674e0440922983cee7b1b60d33374f3b656002918742cd1c60cfe7d | def __init__(self, parent, metadata=None):
'Groupbox for pynwb.base.TimeSeries fields filling form.'
super().__init__()
self.setTitle('TimeSeries')
self.parent = parent
self.group_type = 'TimeSeries'
self.lbl_name = QLabel((('name<span style="color:' + required_asterisk_color) + ';">*</span>:'))
self.form_name = QLineEdit('TimeSeries')
self.form_name.setToolTip('The unique name of this TimeSeries dataset')
self.lbl_unit = QLabel('unit:')
self.form_unit = QLineEdit('')
self.form_unit.setPlaceholderText('unit')
self.form_unit.setToolTip('The base unit of measurement (should be SI unit)')
self.lbl_conversion = QLabel('conversion:')
self.form_conversion = QLineEdit('')
self.form_conversion.setPlaceholderText('1.0')
self.form_conversion.setToolTip('Scalar to multiply each element in data to convert it to the specified unit')
self.lbl_resolution = QLabel('resolution:')
self.form_resolution = QLineEdit('')
self.form_resolution.setPlaceholderText('1.0')
self.form_resolution.setToolTip('The smallest meaningful difference (in specified unit) between values in data')
self.lbl_timestamps = QLabel('timestamps:')
self.chk_timestamps = QCheckBox('Get from source file')
self.chk_timestamps.setChecked(False)
self.chk_timestamps.setToolTip('Timestamps for samples stored in data.\nCheck box if this data will be retrieved from source file.\nUncheck box to ignore it.')
self.lbl_starting_time = QLabel('starting_time:')
self.form_starting_time = QLineEdit('')
self.form_starting_time.setPlaceholderText('0.0')
self.form_starting_time.setToolTip('The timestamp of the first sample')
self.lbl_rate = QLabel('rate:')
self.form_rate = QLineEdit('')
self.form_rate.setPlaceholderText('0.0')
self.form_rate.setToolTip('Sampling rate in Hz')
self.lbl_comments = QLabel('comments:')
self.form_comments = QLineEdit('')
self.form_comments.setPlaceholderText('comments')
self.form_comments.setToolTip('Human-readable comments about this TimeSeries dataset')
self.lbl_description = QLabel('description:')
self.form_description = QLineEdit('')
self.form_description.setPlaceholderText('description')
self.form_description.setToolTip(' Description of this TimeSeries dataset')
self.grid = QGridLayout()
self.grid.setColumnStretch(2, 1)
self.grid.addWidget(self.lbl_name, 0, 0, 1, 2)
self.grid.addWidget(self.form_name, 0, 2, 1, 4)
self.grid.addWidget(self.lbl_unit, 2, 0, 1, 2)
self.grid.addWidget(self.form_unit, 2, 2, 1, 4)
self.grid.addWidget(self.lbl_conversion, 3, 0, 1, 2)
self.grid.addWidget(self.form_conversion, 3, 2, 1, 4)
self.grid.addWidget(self.lbl_resolution, 4, 0, 1, 2)
self.grid.addWidget(self.form_resolution, 4, 2, 1, 4)
self.grid.addWidget(self.lbl_timestamps, 5, 0, 1, 2)
self.grid.addWidget(self.chk_timestamps, 5, 2, 1, 2)
self.grid.addWidget(self.lbl_starting_time, 6, 0, 1, 2)
self.grid.addWidget(self.form_starting_time, 6, 2, 1, 4)
self.grid.addWidget(self.lbl_rate, 7, 0, 1, 2)
self.grid.addWidget(self.form_rate, 7, 2, 1, 4)
self.grid.addWidget(self.lbl_comments, 8, 0, 1, 2)
self.grid.addWidget(self.form_comments, 8, 2, 1, 4)
self.grid.addWidget(self.lbl_description, 9, 0, 1, 2)
self.grid.addWidget(self.form_description, 9, 2, 1, 4)
self.setLayout(self.grid) | Groupbox for pynwb.base.TimeSeries fields filling form. | singer_lab_to_nwb/gui/classes/forms_base.py | __init__ | stephprince/singer-lab-to-nwb-hackathon | 0 | python | def __init__(self, parent, metadata=None):
super().__init__()
self.setTitle('TimeSeries')
self.parent = parent
self.group_type = 'TimeSeries'
self.lbl_name = QLabel((('name<span style="color:' + required_asterisk_color) + ';">*</span>:'))
self.form_name = QLineEdit('TimeSeries')
self.form_name.setToolTip('The unique name of this TimeSeries dataset')
self.lbl_unit = QLabel('unit:')
self.form_unit = QLineEdit()
self.form_unit.setPlaceholderText('unit')
self.form_unit.setToolTip('The base unit of measurement (should be SI unit)')
self.lbl_conversion = QLabel('conversion:')
self.form_conversion = QLineEdit()
self.form_conversion.setPlaceholderText('1.0')
self.form_conversion.setToolTip('Scalar to multiply each element in data to convert it to the specified unit')
self.lbl_resolution = QLabel('resolution:')
self.form_resolution = QLineEdit()
self.form_resolution.setPlaceholderText('1.0')
self.form_resolution.setToolTip('The smallest meaningful difference (in specified unit) between values in data')
self.lbl_timestamps = QLabel('timestamps:')
self.chk_timestamps = QCheckBox('Get from source file')
self.chk_timestamps.setChecked(False)
self.chk_timestamps.setToolTip('Timestamps for samples stored in data.\nCheck box if this data will be retrieved from source file.\nUncheck box to ignore it.')
self.lbl_starting_time = QLabel('starting_time:')
self.form_starting_time = QLineEdit()
self.form_starting_time.setPlaceholderText('0.0')
self.form_starting_time.setToolTip('The timestamp of the first sample')
self.lbl_rate = QLabel('rate:')
self.form_rate = QLineEdit()
self.form_rate.setPlaceholderText('0.0')
self.form_rate.setToolTip('Sampling rate in Hz')
self.lbl_comments = QLabel('comments:')
self.form_comments = QLineEdit()
self.form_comments.setPlaceholderText('comments')
self.form_comments.setToolTip('Human-readable comments about this TimeSeries dataset')
self.lbl_description = QLabel('description:')
self.form_description = QLineEdit()
self.form_description.setPlaceholderText('description')
self.form_description.setToolTip(' Description of this TimeSeries dataset')
self.grid = QGridLayout()
self.grid.setColumnStretch(2, 1)
self.grid.addWidget(self.lbl_name, 0, 0, 1, 2)
self.grid.addWidget(self.form_name, 0, 2, 1, 4)
self.grid.addWidget(self.lbl_unit, 2, 0, 1, 2)
self.grid.addWidget(self.form_unit, 2, 2, 1, 4)
self.grid.addWidget(self.lbl_conversion, 3, 0, 1, 2)
self.grid.addWidget(self.form_conversion, 3, 2, 1, 4)
self.grid.addWidget(self.lbl_resolution, 4, 0, 1, 2)
self.grid.addWidget(self.form_resolution, 4, 2, 1, 4)
self.grid.addWidget(self.lbl_timestamps, 5, 0, 1, 2)
self.grid.addWidget(self.chk_timestamps, 5, 2, 1, 2)
self.grid.addWidget(self.lbl_starting_time, 6, 0, 1, 2)
self.grid.addWidget(self.form_starting_time, 6, 2, 1, 4)
self.grid.addWidget(self.lbl_rate, 7, 0, 1, 2)
self.grid.addWidget(self.form_rate, 7, 2, 1, 4)
self.grid.addWidget(self.lbl_comments, 8, 0, 1, 2)
self.grid.addWidget(self.form_comments, 8, 2, 1, 4)
self.grid.addWidget(self.lbl_description, 9, 0, 1, 2)
self.grid.addWidget(self.form_description, 9, 2, 1, 4)
self.setLayout(self.grid) | def __init__(self, parent, metadata=None):
super().__init__()
self.setTitle('TimeSeries')
self.parent = parent
self.group_type = 'TimeSeries'
self.lbl_name = QLabel((('name<span style="color:' + required_asterisk_color) + ';">*</span>:'))
self.form_name = QLineEdit('TimeSeries')
self.form_name.setToolTip('The unique name of this TimeSeries dataset')
self.lbl_unit = QLabel('unit:')
self.form_unit = QLineEdit()
self.form_unit.setPlaceholderText('unit')
self.form_unit.setToolTip('The base unit of measurement (should be SI unit)')
self.lbl_conversion = QLabel('conversion:')
self.form_conversion = QLineEdit()
self.form_conversion.setPlaceholderText('1.0')
self.form_conversion.setToolTip('Scalar to multiply each element in data to convert it to the specified unit')
self.lbl_resolution = QLabel('resolution:')
self.form_resolution = QLineEdit()
self.form_resolution.setPlaceholderText('1.0')
self.form_resolution.setToolTip('The smallest meaningful difference (in specified unit) between values in data')
self.lbl_timestamps = QLabel('timestamps:')
self.chk_timestamps = QCheckBox('Get from source file')
self.chk_timestamps.setChecked(False)
self.chk_timestamps.setToolTip('Timestamps for samples stored in data.\nCheck box if this data will be retrieved from source file.\nUncheck box to ignore it.')
self.lbl_starting_time = QLabel('starting_time:')
self.form_starting_time = QLineEdit()
self.form_starting_time.setPlaceholderText('0.0')
self.form_starting_time.setToolTip('The timestamp of the first sample')
self.lbl_rate = QLabel('rate:')
self.form_rate = QLineEdit()
self.form_rate.setPlaceholderText('0.0')
self.form_rate.setToolTip('Sampling rate in Hz')
self.lbl_comments = QLabel('comments:')
self.form_comments = QLineEdit()
self.form_comments.setPlaceholderText('comments')
self.form_comments.setToolTip('Human-readable comments about this TimeSeries dataset')
self.lbl_description = QLabel('description:')
self.form_description = QLineEdit()
self.form_description.setPlaceholderText('description')
self.form_description.setToolTip(' Description of this TimeSeries dataset')
self.grid = QGridLayout()
self.grid.setColumnStretch(2, 1)
self.grid.addWidget(self.lbl_name, 0, 0, 1, 2)
self.grid.addWidget(self.form_name, 0, 2, 1, 4)
self.grid.addWidget(self.lbl_unit, 2, 0, 1, 2)
self.grid.addWidget(self.form_unit, 2, 2, 1, 4)
self.grid.addWidget(self.lbl_conversion, 3, 0, 1, 2)
self.grid.addWidget(self.form_conversion, 3, 2, 1, 4)
self.grid.addWidget(self.lbl_resolution, 4, 0, 1, 2)
self.grid.addWidget(self.form_resolution, 4, 2, 1, 4)
self.grid.addWidget(self.lbl_timestamps, 5, 0, 1, 2)
self.grid.addWidget(self.chk_timestamps, 5, 2, 1, 2)
self.grid.addWidget(self.lbl_starting_time, 6, 0, 1, 2)
self.grid.addWidget(self.form_starting_time, 6, 2, 1, 4)
self.grid.addWidget(self.lbl_rate, 7, 0, 1, 2)
self.grid.addWidget(self.form_rate, 7, 2, 1, 4)
self.grid.addWidget(self.lbl_comments, 8, 0, 1, 2)
self.grid.addWidget(self.form_comments, 8, 2, 1, 4)
self.grid.addWidget(self.lbl_description, 9, 0, 1, 2)
self.grid.addWidget(self.form_description, 9, 2, 1, 4)
self.setLayout(self.grid)<|docstring|>Groupbox for pynwb.base.TimeSeries fields filling form.<|endoftext|> |
afd321f4ba46d761b320e5cd077ca43c81987c9598d0001f577ccd7edfaccf8f | def refresh_objects_references(self, metadata=None):
'Refreshes references with existing objects in parent group.'
pass | Refreshes references with existing objects in parent group. | singer_lab_to_nwb/gui/classes/forms_base.py | refresh_objects_references | stephprince/singer-lab-to-nwb-hackathon | 0 | python | def refresh_objects_references(self, metadata=None):
pass | def refresh_objects_references(self, metadata=None):
pass<|docstring|>Refreshes references with existing objects in parent group.<|endoftext|> |
2cc3a12a29da1c6d9c034fa27c162c3c18116cacadc7e53b0f3f5297e65cf1dd | def read_fields(self):
'Reads fields and returns them structured in a dictionary.'
data = {}
data['name'] = self.form_name.text()
try:
data['conversion'] = float(self.form_conversion.text())
except ValueError:
pass
try:
data['resolution'] = float(self.form_resolution.text())
except ValueError as error:
print(error)
if self.chk_timestamps.isChecked():
data['timestamps'] = True
try:
data['starting_time'] = float(self.form_starting_time.text())
except ValueError as error:
print(error)
try:
data['rate'] = float(self.form_rate.text())
except ValueError as error:
print(error)
data['comments'] = self.form_comments.text()
data['description'] = self.form_description.text()
return data | Reads fields and returns them structured in a dictionary. | singer_lab_to_nwb/gui/classes/forms_base.py | read_fields | stephprince/singer-lab-to-nwb-hackathon | 0 | python | def read_fields(self):
data = {}
data['name'] = self.form_name.text()
try:
data['conversion'] = float(self.form_conversion.text())
except ValueError:
pass
try:
data['resolution'] = float(self.form_resolution.text())
except ValueError as error:
print(error)
if self.chk_timestamps.isChecked():
data['timestamps'] = True
try:
data['starting_time'] = float(self.form_starting_time.text())
except ValueError as error:
print(error)
try:
data['rate'] = float(self.form_rate.text())
except ValueError as error:
print(error)
data['comments'] = self.form_comments.text()
data['description'] = self.form_description.text()
return data | def read_fields(self):
data = {}
data['name'] = self.form_name.text()
try:
data['conversion'] = float(self.form_conversion.text())
except ValueError:
pass
try:
data['resolution'] = float(self.form_resolution.text())
except ValueError as error:
print(error)
if self.chk_timestamps.isChecked():
data['timestamps'] = True
try:
data['starting_time'] = float(self.form_starting_time.text())
except ValueError as error:
print(error)
try:
data['rate'] = float(self.form_rate.text())
except ValueError as error:
print(error)
data['comments'] = self.form_comments.text()
data['description'] = self.form_description.text()
return data<|docstring|>Reads fields and returns them structured in a dictionary.<|endoftext|> |
1e89c0b322b87c5f116fba1af1250c964ef07d92eea49ca9c71d74a01017d078 | def write_fields(self, metadata={}):
'Reads structured dictionary and write in form fields.'
self.form_name.setText(metadata['name'])
if ('unit' in metadata):
self.form_unit.setText(metadata['unit'])
if ('conversion' in metadata):
self.form_conversion.setText(str(metadata['conversion']))
if ('resolution' in metadata):
self.form_resolution.setText(str(metadata['resolution']))
if ('timestamps' in metadata):
self.chk_timestamps.setChecked(True)
if ('starting_time' in metadata):
self.form_starting_time.setText(str(metadata['starting_time']))
if ('rate' in metadata):
self.form_rate.setText(str(metadata['rate']))
if ('comments' in metadata):
self.form_comments.setText(metadata['comments'])
if ('description' in metadata):
self.form_description.setText(metadata['description']) | Reads structured dictionary and write in form fields. | singer_lab_to_nwb/gui/classes/forms_base.py | write_fields | stephprince/singer-lab-to-nwb-hackathon | 0 | python | def write_fields(self, metadata={}):
self.form_name.setText(metadata['name'])
if ('unit' in metadata):
self.form_unit.setText(metadata['unit'])
if ('conversion' in metadata):
self.form_conversion.setText(str(metadata['conversion']))
if ('resolution' in metadata):
self.form_resolution.setText(str(metadata['resolution']))
if ('timestamps' in metadata):
self.chk_timestamps.setChecked(True)
if ('starting_time' in metadata):
self.form_starting_time.setText(str(metadata['starting_time']))
if ('rate' in metadata):
self.form_rate.setText(str(metadata['rate']))
if ('comments' in metadata):
self.form_comments.setText(metadata['comments'])
if ('description' in metadata):
self.form_description.setText(metadata['description']) | def write_fields(self, metadata={}):
self.form_name.setText(metadata['name'])
if ('unit' in metadata):
self.form_unit.setText(metadata['unit'])
if ('conversion' in metadata):
self.form_conversion.setText(str(metadata['conversion']))
if ('resolution' in metadata):
self.form_resolution.setText(str(metadata['resolution']))
if ('timestamps' in metadata):
self.chk_timestamps.setChecked(True)
if ('starting_time' in metadata):
self.form_starting_time.setText(str(metadata['starting_time']))
if ('rate' in metadata):
self.form_rate.setText(str(metadata['rate']))
if ('comments' in metadata):
self.form_comments.setText(metadata['comments'])
if ('description' in metadata):
self.form_description.setText(metadata['description'])<|docstring|>Reads structured dictionary and write in form fields.<|endoftext|> |
9ce913db1e840090b38492891625219fbc2a23670eb20edc2bb79357330a35fc | def __init__(self, parent):
'Groupbox for pynwb.base.Image fields filling form.'
super().__init__()
self.setTitle('Image')
self.parent = parent
self.group_type = 'Image'
self.lbl_name = QLabel((('name<span style="color:' + required_asterisk_color) + ';">*</span>:'))
self.form_name = QLineEdit('Image')
self.form_name.setToolTip('The unique name of this Image dataset')
nInstances = 0
for grp in self.parent.groups_list:
if isinstance(grp, GroupImage):
nInstances += 1
if (nInstances > 0):
self.form_name.setText(('Image' + str(nInstances)))
self.lbl_resolution = QLabel('resolution:')
self.form_resolution = QLineEdit('')
self.form_resolution.setPlaceholderText('1.0')
self.form_resolution.setToolTip('Pixels / cm')
self.lbl_description = QLabel('description:')
self.form_description = QLineEdit('')
self.form_description.setPlaceholderText('description')
self.form_description.setToolTip(' Description of this Image dataset')
self.lbl_help = QLabel('help:')
self.form_help = QLineEdit('')
self.form_help.setPlaceholderText('help')
self.form_help.setToolTip('Helpful hint for user')
self.grid = QGridLayout()
self.grid.setColumnStretch(2, 1)
self.grid.addWidget(self.lbl_name, 0, 0, 1, 2)
self.grid.addWidget(self.form_name, 0, 2, 1, 4)
self.grid.addWidget(self.lbl_resolution, 2, 0, 1, 2)
self.grid.addWidget(self.form_resolution, 2, 2, 1, 4)
self.grid.addWidget(self.lbl_description, 3, 0, 1, 2)
self.grid.addWidget(self.form_description, 3, 2, 1, 4)
self.grid.addWidget(self.lbl_help, 4, 0, 1, 2)
self.grid.addWidget(self.form_help, 4, 2, 1, 4)
self.setLayout(self.grid) | Groupbox for pynwb.base.Image fields filling form. | singer_lab_to_nwb/gui/classes/forms_base.py | __init__ | stephprince/singer-lab-to-nwb-hackathon | 0 | python | def __init__(self, parent):
super().__init__()
self.setTitle('Image')
self.parent = parent
self.group_type = 'Image'
self.lbl_name = QLabel((('name<span style="color:' + required_asterisk_color) + ';">*</span>:'))
self.form_name = QLineEdit('Image')
self.form_name.setToolTip('The unique name of this Image dataset')
nInstances = 0
for grp in self.parent.groups_list:
if isinstance(grp, GroupImage):
nInstances += 1
if (nInstances > 0):
self.form_name.setText(('Image' + str(nInstances)))
self.lbl_resolution = QLabel('resolution:')
self.form_resolution = QLineEdit()
self.form_resolution.setPlaceholderText('1.0')
self.form_resolution.setToolTip('Pixels / cm')
self.lbl_description = QLabel('description:')
self.form_description = QLineEdit()
self.form_description.setPlaceholderText('description')
self.form_description.setToolTip(' Description of this Image dataset')
self.lbl_help = QLabel('help:')
self.form_help = QLineEdit()
self.form_help.setPlaceholderText('help')
self.form_help.setToolTip('Helpful hint for user')
self.grid = QGridLayout()
self.grid.setColumnStretch(2, 1)
self.grid.addWidget(self.lbl_name, 0, 0, 1, 2)
self.grid.addWidget(self.form_name, 0, 2, 1, 4)
self.grid.addWidget(self.lbl_resolution, 2, 0, 1, 2)
self.grid.addWidget(self.form_resolution, 2, 2, 1, 4)
self.grid.addWidget(self.lbl_description, 3, 0, 1, 2)
self.grid.addWidget(self.form_description, 3, 2, 1, 4)
self.grid.addWidget(self.lbl_help, 4, 0, 1, 2)
self.grid.addWidget(self.form_help, 4, 2, 1, 4)
self.setLayout(self.grid) | def __init__(self, parent):
super().__init__()
self.setTitle('Image')
self.parent = parent
self.group_type = 'Image'
self.lbl_name = QLabel((('name<span style="color:' + required_asterisk_color) + ';">*</span>:'))
self.form_name = QLineEdit('Image')
self.form_name.setToolTip('The unique name of this Image dataset')
nInstances = 0
for grp in self.parent.groups_list:
if isinstance(grp, GroupImage):
nInstances += 1
if (nInstances > 0):
self.form_name.setText(('Image' + str(nInstances)))
self.lbl_resolution = QLabel('resolution:')
self.form_resolution = QLineEdit()
self.form_resolution.setPlaceholderText('1.0')
self.form_resolution.setToolTip('Pixels / cm')
self.lbl_description = QLabel('description:')
self.form_description = QLineEdit()
self.form_description.setPlaceholderText('description')
self.form_description.setToolTip(' Description of this Image dataset')
self.lbl_help = QLabel('help:')
self.form_help = QLineEdit()
self.form_help.setPlaceholderText('help')
self.form_help.setToolTip('Helpful hint for user')
self.grid = QGridLayout()
self.grid.setColumnStretch(2, 1)
self.grid.addWidget(self.lbl_name, 0, 0, 1, 2)
self.grid.addWidget(self.form_name, 0, 2, 1, 4)
self.grid.addWidget(self.lbl_resolution, 2, 0, 1, 2)
self.grid.addWidget(self.form_resolution, 2, 2, 1, 4)
self.grid.addWidget(self.lbl_description, 3, 0, 1, 2)
self.grid.addWidget(self.form_description, 3, 2, 1, 4)
self.grid.addWidget(self.lbl_help, 4, 0, 1, 2)
self.grid.addWidget(self.form_help, 4, 2, 1, 4)
self.setLayout(self.grid)<|docstring|>Groupbox for pynwb.base.Image fields filling form.<|endoftext|> |
afd321f4ba46d761b320e5cd077ca43c81987c9598d0001f577ccd7edfaccf8f | def refresh_objects_references(self, metadata=None):
'Refreshes references with existing objects in parent group.'
pass | Refreshes references with existing objects in parent group. | singer_lab_to_nwb/gui/classes/forms_base.py | refresh_objects_references | stephprince/singer-lab-to-nwb-hackathon | 0 | python | def refresh_objects_references(self, metadata=None):
pass | def refresh_objects_references(self, metadata=None):
pass<|docstring|>Refreshes references with existing objects in parent group.<|endoftext|> |
90ee65f4bc364ec14eb1f73e62ea062f6de23202584518de1afcad474530160e | def read_fields(self):
'Reads fields and returns them structured in a dictionary.'
data = {}
data['name'] = self.form_name.text()
try:
data['resolution'] = float(self.form_resolution.text())
except ValueError as error:
print(error)
data['description'] = self.form_description.text()
data['help'] = self.form_help.text()
return data | Reads fields and returns them structured in a dictionary. | singer_lab_to_nwb/gui/classes/forms_base.py | read_fields | stephprince/singer-lab-to-nwb-hackathon | 0 | python | def read_fields(self):
data = {}
data['name'] = self.form_name.text()
try:
data['resolution'] = float(self.form_resolution.text())
except ValueError as error:
print(error)
data['description'] = self.form_description.text()
data['help'] = self.form_help.text()
return data | def read_fields(self):
data = {}
data['name'] = self.form_name.text()
try:
data['resolution'] = float(self.form_resolution.text())
except ValueError as error:
print(error)
data['description'] = self.form_description.text()
data['help'] = self.form_help.text()
return data<|docstring|>Reads fields and returns them structured in a dictionary.<|endoftext|> |
d07e0aae40a6b00c82ba33ea9ed7e5bdd240a7de5f468f15d883febabda9dabc | def write_fields(self, metadata={}):
'Reads structured dictionary and write in form fields.'
self.form_name.setText(metadata['name'])
if ('resolution' in metadata):
self.form_resolution.setText(str(metadata['resolution']))
if ('description' in metadata):
self.form_description.setText(metadata['description'])
if ('help' in metadata):
self.form_help.setText(metadata['help']) | Reads structured dictionary and write in form fields. | singer_lab_to_nwb/gui/classes/forms_base.py | write_fields | stephprince/singer-lab-to-nwb-hackathon | 0 | python | def write_fields(self, metadata={}):
self.form_name.setText(metadata['name'])
if ('resolution' in metadata):
self.form_resolution.setText(str(metadata['resolution']))
if ('description' in metadata):
self.form_description.setText(metadata['description'])
if ('help' in metadata):
self.form_help.setText(metadata['help']) | def write_fields(self, metadata={}):
self.form_name.setText(metadata['name'])
if ('resolution' in metadata):
self.form_resolution.setText(str(metadata['resolution']))
if ('description' in metadata):
self.form_description.setText(metadata['description'])
if ('help' in metadata):
self.form_help.setText(metadata['help'])<|docstring|>Reads structured dictionary and write in form fields.<|endoftext|> |
e8371eb4e9753c63938c825f8fa14e3165fa2934f8068e73d7d88c75d64bc10a | def __init__(self, parent):
'Groupbox for pynwb.base.Images fields filling form.'
super().__init__()
self.setTitle('Images')
self.parent = parent
self.group_type = 'Images'
self.lbl_name = QLabel((('name<span style="color:' + required_asterisk_color) + ';">*</span>:'))
self.form_name = QLineEdit('Images')
self.form_name.setToolTip('The name of this set of images')
nInstances = 0
for grp in self.parent.groups_list:
if isinstance(grp, GroupImages):
nInstances += 1
if (nInstances > 0):
self.form_name.setText(('Images' + str(nInstances)))
self.lbl_images = QLabel('images:')
self.combo_images = CustomComboBox()
self.combo_images.setToolTip('Image objects')
self.lbl_description = QLabel('description:')
self.form_description = QLineEdit('')
self.form_description.setPlaceholderText('description')
self.form_description.setToolTip('Description of images')
self.grid = QGridLayout()
self.grid.setColumnStretch(2, 1)
self.grid.addWidget(self.lbl_name, 0, 0, 1, 2)
self.grid.addWidget(self.form_name, 0, 2, 1, 4)
self.grid.addWidget(self.lbl_images, 1, 0, 1, 2)
self.grid.addWidget(self.combo_images, 1, 2, 1, 4)
self.grid.addWidget(self.lbl_description, 2, 0, 1, 2)
self.grid.addWidget(self.form_description, 2, 2, 1, 4)
self.setLayout(self.grid) | Groupbox for pynwb.base.Images fields filling form. | singer_lab_to_nwb/gui/classes/forms_base.py | __init__ | stephprince/singer-lab-to-nwb-hackathon | 0 | python | def __init__(self, parent):
super().__init__()
self.setTitle('Images')
self.parent = parent
self.group_type = 'Images'
self.lbl_name = QLabel((('name<span style="color:' + required_asterisk_color) + ';">*</span>:'))
self.form_name = QLineEdit('Images')
self.form_name.setToolTip('The name of this set of images')
nInstances = 0
for grp in self.parent.groups_list:
if isinstance(grp, GroupImages):
nInstances += 1
if (nInstances > 0):
self.form_name.setText(('Images' + str(nInstances)))
self.lbl_images = QLabel('images:')
self.combo_images = CustomComboBox()
self.combo_images.setToolTip('Image objects')
self.lbl_description = QLabel('description:')
self.form_description = QLineEdit()
self.form_description.setPlaceholderText('description')
self.form_description.setToolTip('Description of images')
self.grid = QGridLayout()
self.grid.setColumnStretch(2, 1)
self.grid.addWidget(self.lbl_name, 0, 0, 1, 2)
self.grid.addWidget(self.form_name, 0, 2, 1, 4)
self.grid.addWidget(self.lbl_images, 1, 0, 1, 2)
self.grid.addWidget(self.combo_images, 1, 2, 1, 4)
self.grid.addWidget(self.lbl_description, 2, 0, 1, 2)
self.grid.addWidget(self.form_description, 2, 2, 1, 4)
self.setLayout(self.grid) | def __init__(self, parent):
super().__init__()
self.setTitle('Images')
self.parent = parent
self.group_type = 'Images'
self.lbl_name = QLabel((('name<span style="color:' + required_asterisk_color) + ';">*</span>:'))
self.form_name = QLineEdit('Images')
self.form_name.setToolTip('The name of this set of images')
nInstances = 0
for grp in self.parent.groups_list:
if isinstance(grp, GroupImages):
nInstances += 1
if (nInstances > 0):
self.form_name.setText(('Images' + str(nInstances)))
self.lbl_images = QLabel('images:')
self.combo_images = CustomComboBox()
self.combo_images.setToolTip('Image objects')
self.lbl_description = QLabel('description:')
self.form_description = QLineEdit()
self.form_description.setPlaceholderText('description')
self.form_description.setToolTip('Description of images')
self.grid = QGridLayout()
self.grid.setColumnStretch(2, 1)
self.grid.addWidget(self.lbl_name, 0, 0, 1, 2)
self.grid.addWidget(self.form_name, 0, 2, 1, 4)
self.grid.addWidget(self.lbl_images, 1, 0, 1, 2)
self.grid.addWidget(self.combo_images, 1, 2, 1, 4)
self.grid.addWidget(self.lbl_description, 2, 0, 1, 2)
self.grid.addWidget(self.form_description, 2, 2, 1, 4)
self.setLayout(self.grid)<|docstring|>Groupbox for pynwb.base.Images fields filling form.<|endoftext|> |
b0b10945dd0c65a68e42fcb7832b2078f2e85629659915a5d7c72a5cfbd5527a | def refresh_objects_references(self, metadata=None):
'Refreshes references with existing objects in parent group.'
self.combo_images.clear()
for grp in self.parent.groups_list:
if isinstance(grp, GroupImage):
self.combo_images.addItem(grp.form_name.text()) | Refreshes references with existing objects in parent group. | singer_lab_to_nwb/gui/classes/forms_base.py | refresh_objects_references | stephprince/singer-lab-to-nwb-hackathon | 0 | python | def refresh_objects_references(self, metadata=None):
self.combo_images.clear()
for grp in self.parent.groups_list:
if isinstance(grp, GroupImage):
self.combo_images.addItem(grp.form_name.text()) | def refresh_objects_references(self, metadata=None):
self.combo_images.clear()
for grp in self.parent.groups_list:
if isinstance(grp, GroupImage):
self.combo_images.addItem(grp.form_name.text())<|docstring|>Refreshes references with existing objects in parent group.<|endoftext|> |
87271cf42eb3c7fd9eff2ed0f31a665f7d5e2c3ee0ff4269533206f5a767de54 | def read_fields(self):
'Reads fields and returns them structured in a dictionary.'
data = {}
data['name'] = self.form_name.text()
data['images'] = self.combo_images.currentText()
data['description'] = self.form_description.text()
return data | Reads fields and returns them structured in a dictionary. | singer_lab_to_nwb/gui/classes/forms_base.py | read_fields | stephprince/singer-lab-to-nwb-hackathon | 0 | python | def read_fields(self):
data = {}
data['name'] = self.form_name.text()
data['images'] = self.combo_images.currentText()
data['description'] = self.form_description.text()
return data | def read_fields(self):
data = {}
data['name'] = self.form_name.text()
data['images'] = self.combo_images.currentText()
data['description'] = self.form_description.text()
return data<|docstring|>Reads fields and returns them structured in a dictionary.<|endoftext|> |
6434b8a1ce67e2c22b965a16f45fca86963cf405664b3c645b05cd5598a49a91 | def write_fields(self, metadata={}):
'Reads structured dictionary and write in form fields.'
self.form_name.setText(metadata['name'])
self.combo_images.clear()
self.combo_images.addItem(metadata['images'])
if ('description' in metadata):
self.form_description.setText(metadata['description']) | Reads structured dictionary and write in form fields. | singer_lab_to_nwb/gui/classes/forms_base.py | write_fields | stephprince/singer-lab-to-nwb-hackathon | 0 | python | def write_fields(self, metadata={}):
self.form_name.setText(metadata['name'])
self.combo_images.clear()
self.combo_images.addItem(metadata['images'])
if ('description' in metadata):
self.form_description.setText(metadata['description']) | def write_fields(self, metadata={}):
self.form_name.setText(metadata['name'])
self.combo_images.clear()
self.combo_images.addItem(metadata['images'])
if ('description' in metadata):
self.form_description.setText(metadata['description'])<|docstring|>Reads structured dictionary and write in form fields.<|endoftext|> |
48c69b1d3a976691833e8c071a9be85106d9f8b6c596f647a9ffe355374eb823 | def __init__(self):
'Class created to ignore mouse wheel events on combobox.'
super().__init__() | Class created to ignore mouse wheel events on combobox. | singer_lab_to_nwb/gui/classes/forms_base.py | __init__ | stephprince/singer-lab-to-nwb-hackathon | 0 | python | def __init__(self):
super().__init__() | def __init__(self):
super().__init__()<|docstring|>Class created to ignore mouse wheel events on combobox.<|endoftext|> |
27a8c49a697a955a78e7264e2abb02e3dc525fb81d854b752a07bdbd3f8e29e6 | def write_json_data(data, filename):
'\n write data to a json file\n '
with open(filename, 'w+', encoding='utf-8') as f:
json.dump(data, f, indent=4, ensure_ascii=False)
f.close() | write data to a json file | dataset/new_dataset/get_single.py | write_json_data | ShubhamAnandJain/MWP-CS229 | 71 | python | def write_json_data(data, filename):
'\n \n '
with open(filename, 'w+', encoding='utf-8') as f:
json.dump(data, f, indent=4, ensure_ascii=False)
f.close() | def write_json_data(data, filename):
'\n \n '
with open(filename, 'w+', encoding='utf-8') as f:
json.dump(data, f, indent=4, ensure_ascii=False)
f.close()<|docstring|>write data to a json file<|endoftext|> |
a63faa279aa0472bd10138b0904055baf41cef4946867756a216fa7e1e76c918 | def read_json_data(filename):
'\n load data from a json file\n '
f = open(filename, 'r', encoding='utf-8')
return json.load(f) | load data from a json file | dataset/new_dataset/get_single.py | read_json_data | ShubhamAnandJain/MWP-CS229 | 71 | python | def read_json_data(filename):
'\n \n '
f = open(filename, 'r', encoding='utf-8')
return json.load(f) | def read_json_data(filename):
'\n \n '
f = open(filename, 'r', encoding='utf-8')
return json.load(f)<|docstring|>load data from a json file<|endoftext|> |
94060bf9780aa6c8037b2d50a14036cb4e58a0f35851f1f702d0f42552131548 | def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
'Create and send a transaction with a random fee.\n\n The transaction pays to a trivial P2SH script, and assumes that its inputs\n are of the same form.\n The function takes a list of confirmed outputs and unconfirmed outputs\n and attempts to use the confirmed list first for its inputs.\n It adds the newly created outputs to the unconfirmed list.\n Returns (raw transaction, fee).'
rand_fee = (float(fee_increment) * (1.1892 ** random.randint(0, 28)))
fee = ((min_fee - fee_increment) + satoshi_round(rand_fee))
tx = CTransaction()
total_in = Decimal('0.00000000')
while ((total_in <= (amount + fee)) and (len(conflist) > 0)):
t = conflist.pop(0)
total_in += t['amount']
tx.vin.append(CTxIn(COutPoint(int(t['txid'], 16), t['vout']), REDEEM_SCRIPT))
while ((total_in <= (amount + fee)) and (len(unconflist) > 0)):
t = unconflist.pop(0)
total_in += t['amount']
tx.vin.append(CTxIn(COutPoint(int(t['txid'], 16), t['vout']), REDEEM_SCRIPT))
if (total_in <= (amount + fee)):
raise RuntimeError(f'Insufficient funds: need {(amount + fee)}, have {total_in}')
tx.vout.append(CTxOut(int((((total_in - amount) - fee) * COIN)), P2SH))
tx.vout.append(CTxOut(int((amount * COIN)), P2SH))
txid = from_node.sendrawtransaction(hexstring=tx.serialize().hex(), maxfeerate=0)
unconflist.append({'txid': txid, 'vout': 0, 'amount': ((total_in - amount) - fee)})
unconflist.append({'txid': txid, 'vout': 1, 'amount': amount})
return (tx.serialize().hex(), fee) | Create and send a transaction with a random fee.
The transaction pays to a trivial P2SH script, and assumes that its inputs
are of the same form.
The function takes a list of confirmed outputs and unconfirmed outputs
and attempts to use the confirmed list first for its inputs.
It adds the newly created outputs to the unconfirmed list.
Returns (raw transaction, fee). | test/functional/feature_fee_estimation.py | small_txpuzzle_randfee | Jlemix/bitcoin | 32 | python | def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
'Create and send a transaction with a random fee.\n\n The transaction pays to a trivial P2SH script, and assumes that its inputs\n are of the same form.\n The function takes a list of confirmed outputs and unconfirmed outputs\n and attempts to use the confirmed list first for its inputs.\n It adds the newly created outputs to the unconfirmed list.\n Returns (raw transaction, fee).'
rand_fee = (float(fee_increment) * (1.1892 ** random.randint(0, 28)))
fee = ((min_fee - fee_increment) + satoshi_round(rand_fee))
tx = CTransaction()
total_in = Decimal('0.00000000')
while ((total_in <= (amount + fee)) and (len(conflist) > 0)):
t = conflist.pop(0)
total_in += t['amount']
tx.vin.append(CTxIn(COutPoint(int(t['txid'], 16), t['vout']), REDEEM_SCRIPT))
while ((total_in <= (amount + fee)) and (len(unconflist) > 0)):
t = unconflist.pop(0)
total_in += t['amount']
tx.vin.append(CTxIn(COutPoint(int(t['txid'], 16), t['vout']), REDEEM_SCRIPT))
if (total_in <= (amount + fee)):
raise RuntimeError(f'Insufficient funds: need {(amount + fee)}, have {total_in}')
tx.vout.append(CTxOut(int((((total_in - amount) - fee) * COIN)), P2SH))
tx.vout.append(CTxOut(int((amount * COIN)), P2SH))
txid = from_node.sendrawtransaction(hexstring=tx.serialize().hex(), maxfeerate=0)
unconflist.append({'txid': txid, 'vout': 0, 'amount': ((total_in - amount) - fee)})
unconflist.append({'txid': txid, 'vout': 1, 'amount': amount})
return (tx.serialize().hex(), fee) | def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
'Create and send a transaction with a random fee.\n\n The transaction pays to a trivial P2SH script, and assumes that its inputs\n are of the same form.\n The function takes a list of confirmed outputs and unconfirmed outputs\n and attempts to use the confirmed list first for its inputs.\n It adds the newly created outputs to the unconfirmed list.\n Returns (raw transaction, fee).'
rand_fee = (float(fee_increment) * (1.1892 ** random.randint(0, 28)))
fee = ((min_fee - fee_increment) + satoshi_round(rand_fee))
tx = CTransaction()
total_in = Decimal('0.00000000')
while ((total_in <= (amount + fee)) and (len(conflist) > 0)):
t = conflist.pop(0)
total_in += t['amount']
tx.vin.append(CTxIn(COutPoint(int(t['txid'], 16), t['vout']), REDEEM_SCRIPT))
while ((total_in <= (amount + fee)) and (len(unconflist) > 0)):
t = unconflist.pop(0)
total_in += t['amount']
tx.vin.append(CTxIn(COutPoint(int(t['txid'], 16), t['vout']), REDEEM_SCRIPT))
if (total_in <= (amount + fee)):
raise RuntimeError(f'Insufficient funds: need {(amount + fee)}, have {total_in}')
tx.vout.append(CTxOut(int((((total_in - amount) - fee) * COIN)), P2SH))
tx.vout.append(CTxOut(int((amount * COIN)), P2SH))
txid = from_node.sendrawtransaction(hexstring=tx.serialize().hex(), maxfeerate=0)
unconflist.append({'txid': txid, 'vout': 0, 'amount': ((total_in - amount) - fee)})
unconflist.append({'txid': txid, 'vout': 1, 'amount': amount})
return (tx.serialize().hex(), fee)<|docstring|>Create and send a transaction with a random fee.
The transaction pays to a trivial P2SH script, and assumes that its inputs
are of the same form.
The function takes a list of confirmed outputs and unconfirmed outputs
and attempts to use the confirmed list first for its inputs.
It adds the newly created outputs to the unconfirmed list.
Returns (raw transaction, fee).<|endoftext|> |
4f22de9e2142f5dad50056475b8f997a53a60ded3c88075c93b6beabf7373309 | def check_raw_estimates(node, fees_seen):
'Call estimaterawfee and verify that the estimates meet certain invariants.'
delta = 1e-06
for i in range(1, 26):
for (_, e) in node.estimaterawfee(i).items():
feerate = float(e['feerate'])
assert_greater_than(feerate, 0)
if (((feerate + delta) < min(fees_seen)) or ((feerate - delta) > max(fees_seen))):
raise AssertionError(f'Estimated fee ({feerate}) out of range ({min(fees_seen)},{max(fees_seen)})') | Call estimaterawfee and verify that the estimates meet certain invariants. | test/functional/feature_fee_estimation.py | check_raw_estimates | Jlemix/bitcoin | 32 | python | def check_raw_estimates(node, fees_seen):
delta = 1e-06
for i in range(1, 26):
for (_, e) in node.estimaterawfee(i).items():
feerate = float(e['feerate'])
assert_greater_than(feerate, 0)
if (((feerate + delta) < min(fees_seen)) or ((feerate - delta) > max(fees_seen))):
raise AssertionError(f'Estimated fee ({feerate}) out of range ({min(fees_seen)},{max(fees_seen)})') | def check_raw_estimates(node, fees_seen):
delta = 1e-06
for i in range(1, 26):
for (_, e) in node.estimaterawfee(i).items():
feerate = float(e['feerate'])
assert_greater_than(feerate, 0)
if (((feerate + delta) < min(fees_seen)) or ((feerate - delta) > max(fees_seen))):
raise AssertionError(f'Estimated fee ({feerate}) out of range ({min(fees_seen)},{max(fees_seen)})')<|docstring|>Call estimaterawfee and verify that the estimates meet certain invariants.<|endoftext|> |
17f1efb2dfea8081f4d5aa5439816016027880b099455ce3dac31dd520e8236d | def check_smart_estimates(node, fees_seen):
'Call estimatesmartfee and verify that the estimates meet certain invariants.'
delta = 1e-06
last_feerate = float(max(fees_seen))
all_smart_estimates = [node.estimatesmartfee(i) for i in range(1, 26)]
mempoolMinFee = node.getmempoolinfo()['mempoolminfee']
minRelaytxFee = node.getmempoolinfo()['minrelaytxfee']
for (i, e) in enumerate(all_smart_estimates):
feerate = float(e['feerate'])
assert_greater_than(feerate, 0)
assert_greater_than_or_equal(feerate, float(mempoolMinFee))
assert_greater_than_or_equal(feerate, float(minRelaytxFee))
if (((feerate + delta) < min(fees_seen)) or ((feerate - delta) > max(fees_seen))):
raise AssertionError(f'Estimated fee ({feerate}) out of range ({min(fees_seen)},{max(fees_seen)})')
if ((feerate - delta) > last_feerate):
raise AssertionError(f'Estimated fee ({feerate}) larger than last fee ({last_feerate}) for lower number of confirms')
last_feerate = feerate
if (i == 0):
assert_equal(e['blocks'], 2)
else:
assert_greater_than_or_equal((i + 1), e['blocks']) | Call estimatesmartfee and verify that the estimates meet certain invariants. | test/functional/feature_fee_estimation.py | check_smart_estimates | Jlemix/bitcoin | 32 | python | def check_smart_estimates(node, fees_seen):
delta = 1e-06
last_feerate = float(max(fees_seen))
all_smart_estimates = [node.estimatesmartfee(i) for i in range(1, 26)]
mempoolMinFee = node.getmempoolinfo()['mempoolminfee']
minRelaytxFee = node.getmempoolinfo()['minrelaytxfee']
for (i, e) in enumerate(all_smart_estimates):
feerate = float(e['feerate'])
assert_greater_than(feerate, 0)
assert_greater_than_or_equal(feerate, float(mempoolMinFee))
assert_greater_than_or_equal(feerate, float(minRelaytxFee))
if (((feerate + delta) < min(fees_seen)) or ((feerate - delta) > max(fees_seen))):
raise AssertionError(f'Estimated fee ({feerate}) out of range ({min(fees_seen)},{max(fees_seen)})')
if ((feerate - delta) > last_feerate):
raise AssertionError(f'Estimated fee ({feerate}) larger than last fee ({last_feerate}) for lower number of confirms')
last_feerate = feerate
if (i == 0):
assert_equal(e['blocks'], 2)
else:
assert_greater_than_or_equal((i + 1), e['blocks']) | def check_smart_estimates(node, fees_seen):
delta = 1e-06
last_feerate = float(max(fees_seen))
all_smart_estimates = [node.estimatesmartfee(i) for i in range(1, 26)]
mempoolMinFee = node.getmempoolinfo()['mempoolminfee']
minRelaytxFee = node.getmempoolinfo()['minrelaytxfee']
for (i, e) in enumerate(all_smart_estimates):
feerate = float(e['feerate'])
assert_greater_than(feerate, 0)
assert_greater_than_or_equal(feerate, float(mempoolMinFee))
assert_greater_than_or_equal(feerate, float(minRelaytxFee))
if (((feerate + delta) < min(fees_seen)) or ((feerate - delta) > max(fees_seen))):
raise AssertionError(f'Estimated fee ({feerate}) out of range ({min(fees_seen)},{max(fees_seen)})')
if ((feerate - delta) > last_feerate):
raise AssertionError(f'Estimated fee ({feerate}) larger than last fee ({last_feerate}) for lower number of confirms')
last_feerate = feerate
if (i == 0):
assert_equal(e['blocks'], 2)
else:
assert_greater_than_or_equal((i + 1), e['blocks'])<|docstring|>Call estimatesmartfee and verify that the estimates meet certain invariants.<|endoftext|> |
34462176d7b2f9af0faf022c1ed658fd5009db11fe527f791858381f59cb242b | def send_tx(node, utxo, feerate):
'Broadcast a 1in-1out transaction with a specific input and feerate (sat/vb).'
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout']), REDEEM_SCRIPT)]
tx.vout = [CTxOut(int((utxo['amount'] * COIN)), P2SH)]
fee = (tx.get_vsize() * feerate)
tx.vout[0].nValue -= fee
return node.sendrawtransaction(tx.serialize().hex()) | Broadcast a 1in-1out transaction with a specific input and feerate (sat/vb). | test/functional/feature_fee_estimation.py | send_tx | Jlemix/bitcoin | 32 | python | def send_tx(node, utxo, feerate):
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout']), REDEEM_SCRIPT)]
tx.vout = [CTxOut(int((utxo['amount'] * COIN)), P2SH)]
fee = (tx.get_vsize() * feerate)
tx.vout[0].nValue -= fee
return node.sendrawtransaction(tx.serialize().hex()) | def send_tx(node, utxo, feerate):
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout']), REDEEM_SCRIPT)]
tx.vout = [CTxOut(int((utxo['amount'] * COIN)), P2SH)]
fee = (tx.get_vsize() * feerate)
tx.vout[0].nValue -= fee
return node.sendrawtransaction(tx.serialize().hex())<|docstring|>Broadcast a 1in-1out transaction with a specific input and feerate (sat/vb).<|endoftext|> |
0590715800733dbf71b975c570b41ddb13df662c07049c0e8400a63f41158528 | def setup_network(self):
"\n We'll setup the network to have 3 nodes that all mine with different parameters.\n But first we need to use one node to create a lot of outputs\n which we will use to generate our transactions.\n "
self.add_nodes(3, extra_args=self.extra_args)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
self.stop_nodes() | We'll setup the network to have 3 nodes that all mine with different parameters.
But first we need to use one node to create a lot of outputs
which we will use to generate our transactions. | test/functional/feature_fee_estimation.py | setup_network | Jlemix/bitcoin | 32 | python | def setup_network(self):
"\n We'll setup the network to have 3 nodes that all mine with different parameters.\n But first we need to use one node to create a lot of outputs\n which we will use to generate our transactions.\n "
self.add_nodes(3, extra_args=self.extra_args)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
self.stop_nodes() | def setup_network(self):
"\n We'll setup the network to have 3 nodes that all mine with different parameters.\n But first we need to use one node to create a lot of outputs\n which we will use to generate our transactions.\n "
self.add_nodes(3, extra_args=self.extra_args)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
self.stop_nodes()<|docstring|>We'll setup the network to have 3 nodes that all mine with different parameters.
But first we need to use one node to create a lot of outputs
which we will use to generate our transactions.<|endoftext|> |
c16deb631103ead917563e2555c7b8a95bf702a47cd09421dc97266636f0c4b1 | def initial_split(self, node):
'Split two coinbase UTxOs into many small coins'
utxo_count = 2048
self.confutxo = []
splitted_amount = Decimal('0.04')
fee = Decimal('0.1')
change = ((Decimal('100') - (splitted_amount * utxo_count)) - fee)
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(cb['txid'], 16), cb['vout'])) for cb in node.listunspent()[:2]]
tx.vout = [CTxOut(int((splitted_amount * COIN)), P2SH) for _ in range(utxo_count)]
tx.vout.append(CTxOut(int((change * COIN)), P2SH))
txhex = node.signrawtransactionwithwallet(tx.serialize().hex())['hex']
txid = node.sendrawtransaction(txhex)
self.confutxo = [{'txid': txid, 'vout': i, 'amount': splitted_amount} for i in range(utxo_count)]
while (len(node.getrawmempool()) > 0):
self.generate(node, 1, sync_fun=self.no_op) | Split two coinbase UTxOs into many small coins | test/functional/feature_fee_estimation.py | initial_split | Jlemix/bitcoin | 32 | python | def initial_split(self, node):
utxo_count = 2048
self.confutxo = []
splitted_amount = Decimal('0.04')
fee = Decimal('0.1')
change = ((Decimal('100') - (splitted_amount * utxo_count)) - fee)
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(cb['txid'], 16), cb['vout'])) for cb in node.listunspent()[:2]]
tx.vout = [CTxOut(int((splitted_amount * COIN)), P2SH) for _ in range(utxo_count)]
tx.vout.append(CTxOut(int((change * COIN)), P2SH))
txhex = node.signrawtransactionwithwallet(tx.serialize().hex())['hex']
txid = node.sendrawtransaction(txhex)
self.confutxo = [{'txid': txid, 'vout': i, 'amount': splitted_amount} for i in range(utxo_count)]
while (len(node.getrawmempool()) > 0):
self.generate(node, 1, sync_fun=self.no_op) | def initial_split(self, node):
utxo_count = 2048
self.confutxo = []
splitted_amount = Decimal('0.04')
fee = Decimal('0.1')
change = ((Decimal('100') - (splitted_amount * utxo_count)) - fee)
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(cb['txid'], 16), cb['vout'])) for cb in node.listunspent()[:2]]
tx.vout = [CTxOut(int((splitted_amount * COIN)), P2SH) for _ in range(utxo_count)]
tx.vout.append(CTxOut(int((change * COIN)), P2SH))
txhex = node.signrawtransactionwithwallet(tx.serialize().hex())['hex']
txid = node.sendrawtransaction(txhex)
self.confutxo = [{'txid': txid, 'vout': i, 'amount': splitted_amount} for i in range(utxo_count)]
while (len(node.getrawmempool()) > 0):
self.generate(node, 1, sync_fun=self.no_op)<|docstring|>Split two coinbase UTxOs into many small coins<|endoftext|> |
840683e11bd02962c99a15e8c68ae83cba256c4f1abebbc854b5b133adbc0f58 | def sanity_check_estimates_range(self):
'Populate estimation buckets, assert estimates are in a sane range and\n are strictly increasing as the target decreases.'
self.fees_per_kb = []
self.memutxo = []
self.log.info('Will output estimates for 1/2/3/6/15/25 blocks')
for _ in range(2):
self.log.info("Creating transactions and mining them with a block size that can't keep up")
self.transact_and_mine(10, self.nodes[2])
check_estimates(self.nodes[1], self.fees_per_kb)
self.log.info('Creating transactions and mining them at a block size that is just big enough')
self.transact_and_mine(10, self.nodes[1])
check_estimates(self.nodes[1], self.fees_per_kb)
while (len(self.nodes[1].getrawmempool()) > 0):
self.generate(self.nodes[1], 1)
self.log.info('Final estimates after emptying mempools')
check_estimates(self.nodes[1], self.fees_per_kb) | Populate estimation buckets, assert estimates are in a sane range and
are strictly increasing as the target decreases. | test/functional/feature_fee_estimation.py | sanity_check_estimates_range | Jlemix/bitcoin | 32 | python | def sanity_check_estimates_range(self):
'Populate estimation buckets, assert estimates are in a sane range and\n are strictly increasing as the target decreases.'
self.fees_per_kb = []
self.memutxo = []
self.log.info('Will output estimates for 1/2/3/6/15/25 blocks')
for _ in range(2):
self.log.info("Creating transactions and mining them with a block size that can't keep up")
self.transact_and_mine(10, self.nodes[2])
check_estimates(self.nodes[1], self.fees_per_kb)
self.log.info('Creating transactions and mining them at a block size that is just big enough')
self.transact_and_mine(10, self.nodes[1])
check_estimates(self.nodes[1], self.fees_per_kb)
while (len(self.nodes[1].getrawmempool()) > 0):
self.generate(self.nodes[1], 1)
self.log.info('Final estimates after emptying mempools')
check_estimates(self.nodes[1], self.fees_per_kb) | def sanity_check_estimates_range(self):
'Populate estimation buckets, assert estimates are in a sane range and\n are strictly increasing as the target decreases.'
self.fees_per_kb = []
self.memutxo = []
self.log.info('Will output estimates for 1/2/3/6/15/25 blocks')
for _ in range(2):
self.log.info("Creating transactions and mining them with a block size that can't keep up")
self.transact_and_mine(10, self.nodes[2])
check_estimates(self.nodes[1], self.fees_per_kb)
self.log.info('Creating transactions and mining them at a block size that is just big enough')
self.transact_and_mine(10, self.nodes[1])
check_estimates(self.nodes[1], self.fees_per_kb)
while (len(self.nodes[1].getrawmempool()) > 0):
self.generate(self.nodes[1], 1)
self.log.info('Final estimates after emptying mempools')
check_estimates(self.nodes[1], self.fees_per_kb)<|docstring|>Populate estimation buckets, assert estimates are in a sane range and
are strictly increasing as the target decreases.<|endoftext|> |
9f3ebfcb1c1c224aee7da0a4d363c74141bee64488af9dae8b78166bd7e98967 | def sanity_check_rbf_estimates(self, utxos):
"During 5 blocks, broadcast low fee transactions. Only 10% of them get\n confirmed and the remaining ones get RBF'd with a high fee transaction at\n the next block.\n The block policy estimator should return the high feerate.\n "
node = self.nodes[0]
miner = self.nodes[1]
low_feerate = 1
high_feerate = 10
utxos_to_respend = []
txids_to_replace = []
assert_greater_than_or_equal(len(utxos), 250)
for _ in range(5):
for _ in range(45):
u = utxos.pop(0)
txid = send_tx(node, u, low_feerate)
utxos_to_respend.append(u)
txids_to_replace.append(txid)
for _ in range(5):
send_tx(node, utxos.pop(0), low_feerate)
self.sync_mempools(wait=0.1, nodes=[node, miner])
for txid in txids_to_replace:
miner.prioritisetransaction(txid=txid, fee_delta=(- COIN))
self.generate(miner, 1)
while (len(utxos_to_respend) > 0):
u = utxos_to_respend.pop(0)
send_tx(node, u, high_feerate)
self.sync_mempools(wait=0.1, nodes=[node, miner])
self.generate(miner, 1)
high_feerate_kvb = ((Decimal(high_feerate) / COIN) * (10 ** 3))
est_feerate = node.estimatesmartfee(2)['feerate']
assert_equal(est_feerate, high_feerate_kvb) | During 5 blocks, broadcast low fee transactions. Only 10% of them get
confirmed and the remaining ones get RBF'd with a high fee transaction at
the next block.
The block policy estimator should return the high feerate. | test/functional/feature_fee_estimation.py | sanity_check_rbf_estimates | Jlemix/bitcoin | 32 | python | def sanity_check_rbf_estimates(self, utxos):
"During 5 blocks, broadcast low fee transactions. Only 10% of them get\n confirmed and the remaining ones get RBF'd with a high fee transaction at\n the next block.\n The block policy estimator should return the high feerate.\n "
node = self.nodes[0]
miner = self.nodes[1]
low_feerate = 1
high_feerate = 10
utxos_to_respend = []
txids_to_replace = []
assert_greater_than_or_equal(len(utxos), 250)
for _ in range(5):
for _ in range(45):
u = utxos.pop(0)
txid = send_tx(node, u, low_feerate)
utxos_to_respend.append(u)
txids_to_replace.append(txid)
for _ in range(5):
send_tx(node, utxos.pop(0), low_feerate)
self.sync_mempools(wait=0.1, nodes=[node, miner])
for txid in txids_to_replace:
miner.prioritisetransaction(txid=txid, fee_delta=(- COIN))
self.generate(miner, 1)
while (len(utxos_to_respend) > 0):
u = utxos_to_respend.pop(0)
send_tx(node, u, high_feerate)
self.sync_mempools(wait=0.1, nodes=[node, miner])
self.generate(miner, 1)
high_feerate_kvb = ((Decimal(high_feerate) / COIN) * (10 ** 3))
est_feerate = node.estimatesmartfee(2)['feerate']
assert_equal(est_feerate, high_feerate_kvb) | def sanity_check_rbf_estimates(self, utxos):
"During 5 blocks, broadcast low fee transactions. Only 10% of them get\n confirmed and the remaining ones get RBF'd with a high fee transaction at\n the next block.\n The block policy estimator should return the high feerate.\n "
node = self.nodes[0]
miner = self.nodes[1]
low_feerate = 1
high_feerate = 10
utxos_to_respend = []
txids_to_replace = []
assert_greater_than_or_equal(len(utxos), 250)
for _ in range(5):
for _ in range(45):
u = utxos.pop(0)
txid = send_tx(node, u, low_feerate)
utxos_to_respend.append(u)
txids_to_replace.append(txid)
for _ in range(5):
send_tx(node, utxos.pop(0), low_feerate)
self.sync_mempools(wait=0.1, nodes=[node, miner])
for txid in txids_to_replace:
miner.prioritisetransaction(txid=txid, fee_delta=(- COIN))
self.generate(miner, 1)
while (len(utxos_to_respend) > 0):
u = utxos_to_respend.pop(0)
send_tx(node, u, high_feerate)
self.sync_mempools(wait=0.1, nodes=[node, miner])
self.generate(miner, 1)
high_feerate_kvb = ((Decimal(high_feerate) / COIN) * (10 ** 3))
est_feerate = node.estimatesmartfee(2)['feerate']
assert_equal(est_feerate, high_feerate_kvb)<|docstring|>During 5 blocks, broadcast low fee transactions. Only 10% of them get
confirmed and the remaining ones get RBF'd with a high fee transaction at
the next block.
The block policy estimator should return the high feerate.<|endoftext|> |
65fded06436430a9b2c22de01effb4295b29b7ccac264bcd2c11c8de18b00d29 | def __init__(self):
'Constructeur de la commande'
Commande.__init__(self, 'score', 'score')
self.schema = ''
self.aide_courte = 'affiche votre feuille de personnage'
self.aide_longue = 'Cette commande vous donne des informations générales sur votre personnage. Considérez ces informations comme une fiche de personnage, vous donnant des informations sur vos stats actuelles, vos états, votre race...' | Constructeur de la commande | src/primaires/perso/commandes/score/__init__.py | __init__ | vlegoff/tsunami | 14 | python | def __init__(self):
Commande.__init__(self, 'score', 'score')
self.schema =
self.aide_courte = 'affiche votre feuille de personnage'
self.aide_longue = 'Cette commande vous donne des informations générales sur votre personnage. Considérez ces informations comme une fiche de personnage, vous donnant des informations sur vos stats actuelles, vos états, votre race...' | def __init__(self):
Commande.__init__(self, 'score', 'score')
self.schema =
self.aide_courte = 'affiche votre feuille de personnage'
self.aide_longue = 'Cette commande vous donne des informations générales sur votre personnage. Considérez ces informations comme une fiche de personnage, vous donnant des informations sur vos stats actuelles, vos états, votre race...'<|docstring|>Constructeur de la commande<|endoftext|> |
dfdcab9a4014813dbe1eb9e4cdb41a4529f18e1000c708c36531826b46673aaf | def interpreter(self, personnage, dic_masques):
'Interprétation de la commande'
(personnage << MontrerScore.montrer(personnage)) | Interprétation de la commande | src/primaires/perso/commandes/score/__init__.py | interpreter | vlegoff/tsunami | 14 | python | def interpreter(self, personnage, dic_masques):
(personnage << MontrerScore.montrer(personnage)) | def interpreter(self, personnage, dic_masques):
(personnage << MontrerScore.montrer(personnage))<|docstring|>Interprétation de la commande<|endoftext|> |
76faaad4d21a476321e7c26596048ea5921b3a9085993cabdafc33ca6602c095 | def forward(self, src_seq, tgt_seq, **kwargs):
' Forward process of NMT model\n\n Given source and target side, return the probabilities of the samples.\n '
raise NotImplementedError | Forward process of NMT model
Given source and target side, return the probabilities of the samples. | src/models/base.py | forward | 15091444119/NJUNMT-pytorch | 86 | python | def forward(self, src_seq, tgt_seq, **kwargs):
' Forward process of NMT model\n\n Given source and target side, return the probabilities of the samples.\n '
raise NotImplementedError | def forward(self, src_seq, tgt_seq, **kwargs):
' Forward process of NMT model\n\n Given source and target side, return the probabilities of the samples.\n '
raise NotImplementedError<|docstring|>Forward process of NMT model
Given source and target side, return the probabilities of the samples.<|endoftext|> |
f978ade2c3dca4d992435b21dece9c70f543d566fc42595a5177095d15b33fd0 | def init_decoder(self, enc_outputs, expand_size=1):
'\n Prepare for decoding\n Args:\n enc_outputs (dict): Output dictionary from the return value of ```encode```\n expand_size: (int): Repeat for several times along the first dimension. This is usefull for\n beam search\n\n Returns (dict):\n A dict object store the states in decoding phase.\n '
raise NotImplementedError | Prepare for decoding
Args:
enc_outputs (dict): Output dictionary from the return value of ```encode```
expand_size: (int): Repeat for several times along the first dimension. This is usefull for
beam search
Returns (dict):
A dict object store the states in decoding phase. | src/models/base.py | init_decoder | 15091444119/NJUNMT-pytorch | 86 | python | def init_decoder(self, enc_outputs, expand_size=1):
'\n Prepare for decoding\n Args:\n enc_outputs (dict): Output dictionary from the return value of ```encode```\n expand_size: (int): Repeat for several times along the first dimension. This is usefull for\n beam search\n\n Returns (dict):\n A dict object store the states in decoding phase.\n '
raise NotImplementedError | def init_decoder(self, enc_outputs, expand_size=1):
'\n Prepare for decoding\n Args:\n enc_outputs (dict): Output dictionary from the return value of ```encode```\n expand_size: (int): Repeat for several times along the first dimension. This is usefull for\n beam search\n\n Returns (dict):\n A dict object store the states in decoding phase.\n '
raise NotImplementedError<|docstring|>Prepare for decoding
Args:
enc_outputs (dict): Output dictionary from the return value of ```encode```
expand_size: (int): Repeat for several times along the first dimension. This is usefull for
beam search
Returns (dict):
A dict object store the states in decoding phase.<|endoftext|> |
d6510fbcabeef044dbb712bda20ebaa12ce94815c513b26b6f0134aca8f72d36 | def encode(self, src_seq):
'\n Encode the source side\n '
raise NotImplementedError | Encode the source side | src/models/base.py | encode | 15091444119/NJUNMT-pytorch | 86 | python | def encode(self, src_seq):
'\n \n '
raise NotImplementedError | def encode(self, src_seq):
'\n \n '
raise NotImplementedError<|docstring|>Encode the source side<|endoftext|> |
e907f69d142b77d76d18b7fc6c4395d580bb2195a810214a6753cb15b7a05eb4 | def decode(self, tgt_seq, dec_states, log_probs=True):
'\n Decoding for one step\n Args:\n tgt_seq (torch.Tensor): All the generated tokens before.\n dec_states (dict): Decoding states.\n log_probs (bool): Return logarithm probabilities or probabilities. Default is True.\n\n Returns:\n Scores of next tokens and decoding states.\n '
raise NotImplementedError | Decoding for one step
Args:
tgt_seq (torch.Tensor): All the generated tokens before.
dec_states (dict): Decoding states.
log_probs (bool): Return logarithm probabilities or probabilities. Default is True.
Returns:
Scores of next tokens and decoding states. | src/models/base.py | decode | 15091444119/NJUNMT-pytorch | 86 | python | def decode(self, tgt_seq, dec_states, log_probs=True):
'\n Decoding for one step\n Args:\n tgt_seq (torch.Tensor): All the generated tokens before.\n dec_states (dict): Decoding states.\n log_probs (bool): Return logarithm probabilities or probabilities. Default is True.\n\n Returns:\n Scores of next tokens and decoding states.\n '
raise NotImplementedError | def decode(self, tgt_seq, dec_states, log_probs=True):
'\n Decoding for one step\n Args:\n tgt_seq (torch.Tensor): All the generated tokens before.\n dec_states (dict): Decoding states.\n log_probs (bool): Return logarithm probabilities or probabilities. Default is True.\n\n Returns:\n Scores of next tokens and decoding states.\n '
raise NotImplementedError<|docstring|>Decoding for one step
Args:
tgt_seq (torch.Tensor): All the generated tokens before.
dec_states (dict): Decoding states.
log_probs (bool): Return logarithm probabilities or probabilities. Default is True.
Returns:
Scores of next tokens and decoding states.<|endoftext|> |
302e495356c22c48781d52068fda4ade9178c78a47a907229e5fa5e831470ba4 | def reorder_dec_states(self, dec_states, new_beam_indices, beam_size):
'\n Re-ordering decoding states according to newly selected beam indices\n\n Args:\n dec_states (dict):\n new_beam_indices (torch.Tensor):\n beam_size (int):\n\n Returns:\n Re-ordered dec_states\n '
raise NotImplementedError | Re-ordering decoding states according to newly selected beam indices
Args:
dec_states (dict):
new_beam_indices (torch.Tensor):
beam_size (int):
Returns:
Re-ordered dec_states | src/models/base.py | reorder_dec_states | 15091444119/NJUNMT-pytorch | 86 | python | def reorder_dec_states(self, dec_states, new_beam_indices, beam_size):
'\n Re-ordering decoding states according to newly selected beam indices\n\n Args:\n dec_states (dict):\n new_beam_indices (torch.Tensor):\n beam_size (int):\n\n Returns:\n Re-ordered dec_states\n '
raise NotImplementedError | def reorder_dec_states(self, dec_states, new_beam_indices, beam_size):
'\n Re-ordering decoding states according to newly selected beam indices\n\n Args:\n dec_states (dict):\n new_beam_indices (torch.Tensor):\n beam_size (int):\n\n Returns:\n Re-ordered dec_states\n '
raise NotImplementedError<|docstring|>Re-ordering decoding states according to newly selected beam indices
Args:
dec_states (dict):
new_beam_indices (torch.Tensor):
beam_size (int):
Returns:
Re-ordered dec_states<|endoftext|> |
f2f36be5ce3e3694f7b8f689a6f5a0bad13ac9e8ce1d6557b42c0a1ea04eb991 | def atMostNGivenDigitSet(self, D, N):
'\n :type D: List[str]\n :type N: int\n :rtype: int\n '
S = str(N)
K = len(S)
dp = (([0] * K) + [1])
for i in xrange((K - 1), (- 1), (- 1)):
for d in D:
if (d < S[i]):
dp[i] += (len(D) ** ((K - i) - 1))
elif (d == S[i]):
dp[i] += dp[(i + 1)]
return (dp[0] + sum(((len(D) ** i) for i in xrange(1, K)))) | :type D: List[str]
:type N: int
:rtype: int | cs15211/NumbersAtMostNGivenDigitSet.py | atMostNGivenDigitSet | JulyKikuAkita/PythonPrac | 1 | python | def atMostNGivenDigitSet(self, D, N):
'\n :type D: List[str]\n :type N: int\n :rtype: int\n '
S = str(N)
K = len(S)
dp = (([0] * K) + [1])
for i in xrange((K - 1), (- 1), (- 1)):
for d in D:
if (d < S[i]):
dp[i] += (len(D) ** ((K - i) - 1))
elif (d == S[i]):
dp[i] += dp[(i + 1)]
return (dp[0] + sum(((len(D) ** i) for i in xrange(1, K)))) | def atMostNGivenDigitSet(self, D, N):
'\n :type D: List[str]\n :type N: int\n :rtype: int\n '
S = str(N)
K = len(S)
dp = (([0] * K) + [1])
for i in xrange((K - 1), (- 1), (- 1)):
for d in D:
if (d < S[i]):
dp[i] += (len(D) ** ((K - i) - 1))
elif (d == S[i]):
dp[i] += dp[(i + 1)]
return (dp[0] + sum(((len(D) ** i) for i in xrange(1, K))))<|docstring|>:type D: List[str]
:type N: int
:rtype: int<|endoftext|> |
826e9c2a524a2d46b349d3e4f1569df5ff0ad8e6380fa9f5893049b48ec5c536 | def atMostNGivenDigitSet(self, D, N):
'\n :type D: List[str]\n :type N: int\n :rtype: int\n '
B = len(D)
S = str(N)
K = len(S)
A = []
for c in S:
if (c in D):
A.append((D.index(c) + 1))
else:
i = bisect.bisect(D, c)
A.append(i)
if (i == 0):
for j in xrange((len(A) - 1), 0, (- 1)):
if A[j]:
break
A[j] += B
A[(j - 1)] -= 1
A.extend(([B] * (K - len(A))))
break
ans = 0
for x in A:
ans = ((ans * B) + x)
return ans | :type D: List[str]
:type N: int
:rtype: int | cs15211/NumbersAtMostNGivenDigitSet.py | atMostNGivenDigitSet | JulyKikuAkita/PythonPrac | 1 | python | def atMostNGivenDigitSet(self, D, N):
'\n :type D: List[str]\n :type N: int\n :rtype: int\n '
B = len(D)
S = str(N)
K = len(S)
A = []
for c in S:
if (c in D):
A.append((D.index(c) + 1))
else:
i = bisect.bisect(D, c)
A.append(i)
if (i == 0):
for j in xrange((len(A) - 1), 0, (- 1)):
if A[j]:
break
A[j] += B
A[(j - 1)] -= 1
A.extend(([B] * (K - len(A))))
break
ans = 0
for x in A:
ans = ((ans * B) + x)
return ans | def atMostNGivenDigitSet(self, D, N):
'\n :type D: List[str]\n :type N: int\n :rtype: int\n '
B = len(D)
S = str(N)
K = len(S)
A = []
for c in S:
if (c in D):
A.append((D.index(c) + 1))
else:
i = bisect.bisect(D, c)
A.append(i)
if (i == 0):
for j in xrange((len(A) - 1), 0, (- 1)):
if A[j]:
break
A[j] += B
A[(j - 1)] -= 1
A.extend(([B] * (K - len(A))))
break
ans = 0
for x in A:
ans = ((ans * B) + x)
return ans<|docstring|>:type D: List[str]
:type N: int
:rtype: int<|endoftext|> |
1c002154908ea8c3d629df823f64ce71818a91cdb639b011b2b6d5140045ba4c | def register(linter):
'required method to auto register this checker'
linter.register_checker(ForbiddenImportChecker(linter)) | required method to auto register this checker | venv/lib/python3.6/site-packages/python_ta/checkers/forbidden_import_checker.py | register | PenPenMark06/Stonehenge-MiniMax | 1 | python | def register(linter):
linter.register_checker(ForbiddenImportChecker(linter)) | def register(linter):
linter.register_checker(ForbiddenImportChecker(linter))<|docstring|>required method to auto register this checker<|endoftext|> |
637712bfe844baad251991b247e350358a7b8e1ff9eea11f7f092544217796f7 | @check_messages('forbidden-import')
def visit_import(self, node):
'visit an Import node'
temp = [name for name in node.names if ((name[0] not in self.config.allowed_import_modules) and (name[0] not in self.config.extra_imports))]
if (temp != []):
self.add_message('forbidden-import', node=node, args=(', '.join(map((lambda x: x[0]), temp)), node.lineno)) | visit an Import node | venv/lib/python3.6/site-packages/python_ta/checkers/forbidden_import_checker.py | visit_import | PenPenMark06/Stonehenge-MiniMax | 1 | python | @check_messages('forbidden-import')
def visit_import(self, node):
temp = [name for name in node.names if ((name[0] not in self.config.allowed_import_modules) and (name[0] not in self.config.extra_imports))]
if (temp != []):
self.add_message('forbidden-import', node=node, args=(', '.join(map((lambda x: x[0]), temp)), node.lineno)) | @check_messages('forbidden-import')
def visit_import(self, node):
temp = [name for name in node.names if ((name[0] not in self.config.allowed_import_modules) and (name[0] not in self.config.extra_imports))]
if (temp != []):
self.add_message('forbidden-import', node=node, args=(', '.join(map((lambda x: x[0]), temp)), node.lineno))<|docstring|>visit an Import node<|endoftext|> |
81f3d36581399b120f086c52f0f6d984f2abb03216f5e94861148f88a37b4a89 | @check_messages('forbidden-import')
def visit_importfrom(self, node):
'visit an ImportFrom node'
if ((node.modname not in self.config.allowed_import_modules) and (node.modname not in self.config.extra_imports)):
self.add_message('forbidden-import', node=node, args=(node.modname, node.lineno)) | visit an ImportFrom node | venv/lib/python3.6/site-packages/python_ta/checkers/forbidden_import_checker.py | visit_importfrom | PenPenMark06/Stonehenge-MiniMax | 1 | python | @check_messages('forbidden-import')
def visit_importfrom(self, node):
if ((node.modname not in self.config.allowed_import_modules) and (node.modname not in self.config.extra_imports)):
self.add_message('forbidden-import', node=node, args=(node.modname, node.lineno)) | @check_messages('forbidden-import')
def visit_importfrom(self, node):
if ((node.modname not in self.config.allowed_import_modules) and (node.modname not in self.config.extra_imports)):
self.add_message('forbidden-import', node=node, args=(node.modname, node.lineno))<|docstring|>visit an ImportFrom node<|endoftext|> |
d1edce13055bc0ace5779f0b9bf128fc8f273228c5f59754bf0838c13bdb0706 | def __init__(self, **kwargs):
'\n Initializes a new LogAnalyticsWarning object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param agent_id:\n The value to assign to the agent_id property of this LogAnalyticsWarning.\n :type agent_id: str\n\n :param host_name:\n The value to assign to the host_name property of this LogAnalyticsWarning.\n :type host_name: str\n\n :param rule_display_name:\n The value to assign to the rule_display_name property of this LogAnalyticsWarning.\n :type rule_display_name: str\n\n :param source_name:\n The value to assign to the source_name property of this LogAnalyticsWarning.\n :type source_name: str\n\n :param compartment_id:\n The value to assign to the compartment_id property of this LogAnalyticsWarning.\n :type compartment_id: str\n\n :param source_display_name:\n The value to assign to the source_display_name property of this LogAnalyticsWarning.\n :type source_display_name: str\n\n :param entity_name:\n The value to assign to the entity_name property of this LogAnalyticsWarning.\n :type entity_name: str\n\n :param time_collected:\n The value to assign to the time_collected property of this LogAnalyticsWarning.\n :type time_collected: datetime\n\n :param warning_id:\n The value to assign to the warning_id property of this LogAnalyticsWarning.\n :type warning_id: str\n\n :param time_of_initial_warning:\n The value to assign to the time_of_initial_warning property of this LogAnalyticsWarning.\n :type time_of_initial_warning: datetime\n\n :param is_active:\n The value to assign to the is_active property of this LogAnalyticsWarning.\n :type is_active: bool\n\n :param is_suppressed:\n The value to assign to the is_suppressed property of this LogAnalyticsWarning.\n :type is_suppressed: bool\n\n :param time_of_latest_warning:\n The value to assign to the time_of_latest_warning property of this LogAnalyticsWarning.\n :type time_of_latest_warning: datetime\n\n :param warning_level:\n The value to assign to the warning_level property of this LogAnalyticsWarning.\n :type warning_level: str\n\n :param warning_message:\n The value to assign to the warning_message property of this LogAnalyticsWarning.\n :type warning_message: str\n\n :param pattern_id:\n The value to assign to the pattern_id property of this LogAnalyticsWarning.\n :type pattern_id: str\n\n :param pattern_text:\n The value to assign to the pattern_text property of this LogAnalyticsWarning.\n :type pattern_text: str\n\n :param rule_id:\n The value to assign to the rule_id property of this LogAnalyticsWarning.\n :type rule_id: str\n\n :param source_id:\n The value to assign to the source_id property of this LogAnalyticsWarning.\n :type source_id: str\n\n :param suppressed_by:\n The value to assign to the suppressed_by property of this LogAnalyticsWarning.\n :type suppressed_by: str\n\n :param entity_id:\n The value to assign to the entity_id property of this LogAnalyticsWarning.\n :type entity_id: str\n\n :param entity_type:\n The value to assign to the entity_type property of this LogAnalyticsWarning.\n :type entity_type: str\n\n :param entity_type_display_name:\n The value to assign to the entity_type_display_name property of this LogAnalyticsWarning.\n :type entity_type_display_name: str\n\n :param type_display_name:\n The value to assign to the type_display_name property of this LogAnalyticsWarning.\n :type type_display_name: str\n\n :param type_name:\n The value to assign to the type_name property of this LogAnalyticsWarning.\n :type type_name: str\n\n :param severity:\n The value to assign to the severity property of this LogAnalyticsWarning.\n :type severity: int\n\n '
self.swagger_types = {'agent_id': 'str', 'host_name': 'str', 'rule_display_name': 'str', 'source_name': 'str', 'compartment_id': 'str', 'source_display_name': 'str', 'entity_name': 'str', 'time_collected': 'datetime', 'warning_id': 'str', 'time_of_initial_warning': 'datetime', 'is_active': 'bool', 'is_suppressed': 'bool', 'time_of_latest_warning': 'datetime', 'warning_level': 'str', 'warning_message': 'str', 'pattern_id': 'str', 'pattern_text': 'str', 'rule_id': 'str', 'source_id': 'str', 'suppressed_by': 'str', 'entity_id': 'str', 'entity_type': 'str', 'entity_type_display_name': 'str', 'type_display_name': 'str', 'type_name': 'str', 'severity': 'int'}
self.attribute_map = {'agent_id': 'agentId', 'host_name': 'hostName', 'rule_display_name': 'ruleDisplayName', 'source_name': 'sourceName', 'compartment_id': 'compartmentId', 'source_display_name': 'sourceDisplayName', 'entity_name': 'entityName', 'time_collected': 'timeCollected', 'warning_id': 'warningId', 'time_of_initial_warning': 'timeOfInitialWarning', 'is_active': 'isActive', 'is_suppressed': 'isSuppressed', 'time_of_latest_warning': 'timeOfLatestWarning', 'warning_level': 'warningLevel', 'warning_message': 'warningMessage', 'pattern_id': 'patternId', 'pattern_text': 'patternText', 'rule_id': 'ruleId', 'source_id': 'sourceId', 'suppressed_by': 'suppressedBy', 'entity_id': 'entityId', 'entity_type': 'entityType', 'entity_type_display_name': 'entityTypeDisplayName', 'type_display_name': 'typeDisplayName', 'type_name': 'typeName', 'severity': 'severity'}
self._agent_id = None
self._host_name = None
self._rule_display_name = None
self._source_name = None
self._compartment_id = None
self._source_display_name = None
self._entity_name = None
self._time_collected = None
self._warning_id = None
self._time_of_initial_warning = None
self._is_active = None
self._is_suppressed = None
self._time_of_latest_warning = None
self._warning_level = None
self._warning_message = None
self._pattern_id = None
self._pattern_text = None
self._rule_id = None
self._source_id = None
self._suppressed_by = None
self._entity_id = None
self._entity_type = None
self._entity_type_display_name = None
self._type_display_name = None
self._type_name = None
self._severity = None | Initializes a new LogAnalyticsWarning object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param agent_id:
The value to assign to the agent_id property of this LogAnalyticsWarning.
:type agent_id: str
:param host_name:
The value to assign to the host_name property of this LogAnalyticsWarning.
:type host_name: str
:param rule_display_name:
The value to assign to the rule_display_name property of this LogAnalyticsWarning.
:type rule_display_name: str
:param source_name:
The value to assign to the source_name property of this LogAnalyticsWarning.
:type source_name: str
:param compartment_id:
The value to assign to the compartment_id property of this LogAnalyticsWarning.
:type compartment_id: str
:param source_display_name:
The value to assign to the source_display_name property of this LogAnalyticsWarning.
:type source_display_name: str
:param entity_name:
The value to assign to the entity_name property of this LogAnalyticsWarning.
:type entity_name: str
:param time_collected:
The value to assign to the time_collected property of this LogAnalyticsWarning.
:type time_collected: datetime
:param warning_id:
The value to assign to the warning_id property of this LogAnalyticsWarning.
:type warning_id: str
:param time_of_initial_warning:
The value to assign to the time_of_initial_warning property of this LogAnalyticsWarning.
:type time_of_initial_warning: datetime
:param is_active:
The value to assign to the is_active property of this LogAnalyticsWarning.
:type is_active: bool
:param is_suppressed:
The value to assign to the is_suppressed property of this LogAnalyticsWarning.
:type is_suppressed: bool
:param time_of_latest_warning:
The value to assign to the time_of_latest_warning property of this LogAnalyticsWarning.
:type time_of_latest_warning: datetime
:param warning_level:
The value to assign to the warning_level property of this LogAnalyticsWarning.
:type warning_level: str
:param warning_message:
The value to assign to the warning_message property of this LogAnalyticsWarning.
:type warning_message: str
:param pattern_id:
The value to assign to the pattern_id property of this LogAnalyticsWarning.
:type pattern_id: str
:param pattern_text:
The value to assign to the pattern_text property of this LogAnalyticsWarning.
:type pattern_text: str
:param rule_id:
The value to assign to the rule_id property of this LogAnalyticsWarning.
:type rule_id: str
:param source_id:
The value to assign to the source_id property of this LogAnalyticsWarning.
:type source_id: str
:param suppressed_by:
The value to assign to the suppressed_by property of this LogAnalyticsWarning.
:type suppressed_by: str
:param entity_id:
The value to assign to the entity_id property of this LogAnalyticsWarning.
:type entity_id: str
:param entity_type:
The value to assign to the entity_type property of this LogAnalyticsWarning.
:type entity_type: str
:param entity_type_display_name:
The value to assign to the entity_type_display_name property of this LogAnalyticsWarning.
:type entity_type_display_name: str
:param type_display_name:
The value to assign to the type_display_name property of this LogAnalyticsWarning.
:type type_display_name: str
:param type_name:
The value to assign to the type_name property of this LogAnalyticsWarning.
:type type_name: str
:param severity:
The value to assign to the severity property of this LogAnalyticsWarning.
:type severity: int | src/oci/log_analytics/models/log_analytics_warning.py | __init__ | ezequielramos/oci-python-sdk | 249 | python | def __init__(self, **kwargs):
'\n Initializes a new LogAnalyticsWarning object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param agent_id:\n The value to assign to the agent_id property of this LogAnalyticsWarning.\n :type agent_id: str\n\n :param host_name:\n The value to assign to the host_name property of this LogAnalyticsWarning.\n :type host_name: str\n\n :param rule_display_name:\n The value to assign to the rule_display_name property of this LogAnalyticsWarning.\n :type rule_display_name: str\n\n :param source_name:\n The value to assign to the source_name property of this LogAnalyticsWarning.\n :type source_name: str\n\n :param compartment_id:\n The value to assign to the compartment_id property of this LogAnalyticsWarning.\n :type compartment_id: str\n\n :param source_display_name:\n The value to assign to the source_display_name property of this LogAnalyticsWarning.\n :type source_display_name: str\n\n :param entity_name:\n The value to assign to the entity_name property of this LogAnalyticsWarning.\n :type entity_name: str\n\n :param time_collected:\n The value to assign to the time_collected property of this LogAnalyticsWarning.\n :type time_collected: datetime\n\n :param warning_id:\n The value to assign to the warning_id property of this LogAnalyticsWarning.\n :type warning_id: str\n\n :param time_of_initial_warning:\n The value to assign to the time_of_initial_warning property of this LogAnalyticsWarning.\n :type time_of_initial_warning: datetime\n\n :param is_active:\n The value to assign to the is_active property of this LogAnalyticsWarning.\n :type is_active: bool\n\n :param is_suppressed:\n The value to assign to the is_suppressed property of this LogAnalyticsWarning.\n :type is_suppressed: bool\n\n :param time_of_latest_warning:\n The value to assign to the time_of_latest_warning property of this LogAnalyticsWarning.\n :type time_of_latest_warning: datetime\n\n :param warning_level:\n The value to assign to the warning_level property of this LogAnalyticsWarning.\n :type warning_level: str\n\n :param warning_message:\n The value to assign to the warning_message property of this LogAnalyticsWarning.\n :type warning_message: str\n\n :param pattern_id:\n The value to assign to the pattern_id property of this LogAnalyticsWarning.\n :type pattern_id: str\n\n :param pattern_text:\n The value to assign to the pattern_text property of this LogAnalyticsWarning.\n :type pattern_text: str\n\n :param rule_id:\n The value to assign to the rule_id property of this LogAnalyticsWarning.\n :type rule_id: str\n\n :param source_id:\n The value to assign to the source_id property of this LogAnalyticsWarning.\n :type source_id: str\n\n :param suppressed_by:\n The value to assign to the suppressed_by property of this LogAnalyticsWarning.\n :type suppressed_by: str\n\n :param entity_id:\n The value to assign to the entity_id property of this LogAnalyticsWarning.\n :type entity_id: str\n\n :param entity_type:\n The value to assign to the entity_type property of this LogAnalyticsWarning.\n :type entity_type: str\n\n :param entity_type_display_name:\n The value to assign to the entity_type_display_name property of this LogAnalyticsWarning.\n :type entity_type_display_name: str\n\n :param type_display_name:\n The value to assign to the type_display_name property of this LogAnalyticsWarning.\n :type type_display_name: str\n\n :param type_name:\n The value to assign to the type_name property of this LogAnalyticsWarning.\n :type type_name: str\n\n :param severity:\n The value to assign to the severity property of this LogAnalyticsWarning.\n :type severity: int\n\n '
self.swagger_types = {'agent_id': 'str', 'host_name': 'str', 'rule_display_name': 'str', 'source_name': 'str', 'compartment_id': 'str', 'source_display_name': 'str', 'entity_name': 'str', 'time_collected': 'datetime', 'warning_id': 'str', 'time_of_initial_warning': 'datetime', 'is_active': 'bool', 'is_suppressed': 'bool', 'time_of_latest_warning': 'datetime', 'warning_level': 'str', 'warning_message': 'str', 'pattern_id': 'str', 'pattern_text': 'str', 'rule_id': 'str', 'source_id': 'str', 'suppressed_by': 'str', 'entity_id': 'str', 'entity_type': 'str', 'entity_type_display_name': 'str', 'type_display_name': 'str', 'type_name': 'str', 'severity': 'int'}
self.attribute_map = {'agent_id': 'agentId', 'host_name': 'hostName', 'rule_display_name': 'ruleDisplayName', 'source_name': 'sourceName', 'compartment_id': 'compartmentId', 'source_display_name': 'sourceDisplayName', 'entity_name': 'entityName', 'time_collected': 'timeCollected', 'warning_id': 'warningId', 'time_of_initial_warning': 'timeOfInitialWarning', 'is_active': 'isActive', 'is_suppressed': 'isSuppressed', 'time_of_latest_warning': 'timeOfLatestWarning', 'warning_level': 'warningLevel', 'warning_message': 'warningMessage', 'pattern_id': 'patternId', 'pattern_text': 'patternText', 'rule_id': 'ruleId', 'source_id': 'sourceId', 'suppressed_by': 'suppressedBy', 'entity_id': 'entityId', 'entity_type': 'entityType', 'entity_type_display_name': 'entityTypeDisplayName', 'type_display_name': 'typeDisplayName', 'type_name': 'typeName', 'severity': 'severity'}
self._agent_id = None
self._host_name = None
self._rule_display_name = None
self._source_name = None
self._compartment_id = None
self._source_display_name = None
self._entity_name = None
self._time_collected = None
self._warning_id = None
self._time_of_initial_warning = None
self._is_active = None
self._is_suppressed = None
self._time_of_latest_warning = None
self._warning_level = None
self._warning_message = None
self._pattern_id = None
self._pattern_text = None
self._rule_id = None
self._source_id = None
self._suppressed_by = None
self._entity_id = None
self._entity_type = None
self._entity_type_display_name = None
self._type_display_name = None
self._type_name = None
self._severity = None | def __init__(self, **kwargs):
'\n Initializes a new LogAnalyticsWarning object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param agent_id:\n The value to assign to the agent_id property of this LogAnalyticsWarning.\n :type agent_id: str\n\n :param host_name:\n The value to assign to the host_name property of this LogAnalyticsWarning.\n :type host_name: str\n\n :param rule_display_name:\n The value to assign to the rule_display_name property of this LogAnalyticsWarning.\n :type rule_display_name: str\n\n :param source_name:\n The value to assign to the source_name property of this LogAnalyticsWarning.\n :type source_name: str\n\n :param compartment_id:\n The value to assign to the compartment_id property of this LogAnalyticsWarning.\n :type compartment_id: str\n\n :param source_display_name:\n The value to assign to the source_display_name property of this LogAnalyticsWarning.\n :type source_display_name: str\n\n :param entity_name:\n The value to assign to the entity_name property of this LogAnalyticsWarning.\n :type entity_name: str\n\n :param time_collected:\n The value to assign to the time_collected property of this LogAnalyticsWarning.\n :type time_collected: datetime\n\n :param warning_id:\n The value to assign to the warning_id property of this LogAnalyticsWarning.\n :type warning_id: str\n\n :param time_of_initial_warning:\n The value to assign to the time_of_initial_warning property of this LogAnalyticsWarning.\n :type time_of_initial_warning: datetime\n\n :param is_active:\n The value to assign to the is_active property of this LogAnalyticsWarning.\n :type is_active: bool\n\n :param is_suppressed:\n The value to assign to the is_suppressed property of this LogAnalyticsWarning.\n :type is_suppressed: bool\n\n :param time_of_latest_warning:\n The value to assign to the time_of_latest_warning property of this LogAnalyticsWarning.\n :type time_of_latest_warning: datetime\n\n :param warning_level:\n The value to assign to the warning_level property of this LogAnalyticsWarning.\n :type warning_level: str\n\n :param warning_message:\n The value to assign to the warning_message property of this LogAnalyticsWarning.\n :type warning_message: str\n\n :param pattern_id:\n The value to assign to the pattern_id property of this LogAnalyticsWarning.\n :type pattern_id: str\n\n :param pattern_text:\n The value to assign to the pattern_text property of this LogAnalyticsWarning.\n :type pattern_text: str\n\n :param rule_id:\n The value to assign to the rule_id property of this LogAnalyticsWarning.\n :type rule_id: str\n\n :param source_id:\n The value to assign to the source_id property of this LogAnalyticsWarning.\n :type source_id: str\n\n :param suppressed_by:\n The value to assign to the suppressed_by property of this LogAnalyticsWarning.\n :type suppressed_by: str\n\n :param entity_id:\n The value to assign to the entity_id property of this LogAnalyticsWarning.\n :type entity_id: str\n\n :param entity_type:\n The value to assign to the entity_type property of this LogAnalyticsWarning.\n :type entity_type: str\n\n :param entity_type_display_name:\n The value to assign to the entity_type_display_name property of this LogAnalyticsWarning.\n :type entity_type_display_name: str\n\n :param type_display_name:\n The value to assign to the type_display_name property of this LogAnalyticsWarning.\n :type type_display_name: str\n\n :param type_name:\n The value to assign to the type_name property of this LogAnalyticsWarning.\n :type type_name: str\n\n :param severity:\n The value to assign to the severity property of this LogAnalyticsWarning.\n :type severity: int\n\n '
self.swagger_types = {'agent_id': 'str', 'host_name': 'str', 'rule_display_name': 'str', 'source_name': 'str', 'compartment_id': 'str', 'source_display_name': 'str', 'entity_name': 'str', 'time_collected': 'datetime', 'warning_id': 'str', 'time_of_initial_warning': 'datetime', 'is_active': 'bool', 'is_suppressed': 'bool', 'time_of_latest_warning': 'datetime', 'warning_level': 'str', 'warning_message': 'str', 'pattern_id': 'str', 'pattern_text': 'str', 'rule_id': 'str', 'source_id': 'str', 'suppressed_by': 'str', 'entity_id': 'str', 'entity_type': 'str', 'entity_type_display_name': 'str', 'type_display_name': 'str', 'type_name': 'str', 'severity': 'int'}
self.attribute_map = {'agent_id': 'agentId', 'host_name': 'hostName', 'rule_display_name': 'ruleDisplayName', 'source_name': 'sourceName', 'compartment_id': 'compartmentId', 'source_display_name': 'sourceDisplayName', 'entity_name': 'entityName', 'time_collected': 'timeCollected', 'warning_id': 'warningId', 'time_of_initial_warning': 'timeOfInitialWarning', 'is_active': 'isActive', 'is_suppressed': 'isSuppressed', 'time_of_latest_warning': 'timeOfLatestWarning', 'warning_level': 'warningLevel', 'warning_message': 'warningMessage', 'pattern_id': 'patternId', 'pattern_text': 'patternText', 'rule_id': 'ruleId', 'source_id': 'sourceId', 'suppressed_by': 'suppressedBy', 'entity_id': 'entityId', 'entity_type': 'entityType', 'entity_type_display_name': 'entityTypeDisplayName', 'type_display_name': 'typeDisplayName', 'type_name': 'typeName', 'severity': 'severity'}
self._agent_id = None
self._host_name = None
self._rule_display_name = None
self._source_name = None
self._compartment_id = None
self._source_display_name = None
self._entity_name = None
self._time_collected = None
self._warning_id = None
self._time_of_initial_warning = None
self._is_active = None
self._is_suppressed = None
self._time_of_latest_warning = None
self._warning_level = None
self._warning_message = None
self._pattern_id = None
self._pattern_text = None
self._rule_id = None
self._source_id = None
self._suppressed_by = None
self._entity_id = None
self._entity_type = None
self._entity_type_display_name = None
self._type_display_name = None
self._type_name = None
self._severity = None<|docstring|>Initializes a new LogAnalyticsWarning object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param agent_id:
The value to assign to the agent_id property of this LogAnalyticsWarning.
:type agent_id: str
:param host_name:
The value to assign to the host_name property of this LogAnalyticsWarning.
:type host_name: str
:param rule_display_name:
The value to assign to the rule_display_name property of this LogAnalyticsWarning.
:type rule_display_name: str
:param source_name:
The value to assign to the source_name property of this LogAnalyticsWarning.
:type source_name: str
:param compartment_id:
The value to assign to the compartment_id property of this LogAnalyticsWarning.
:type compartment_id: str
:param source_display_name:
The value to assign to the source_display_name property of this LogAnalyticsWarning.
:type source_display_name: str
:param entity_name:
The value to assign to the entity_name property of this LogAnalyticsWarning.
:type entity_name: str
:param time_collected:
The value to assign to the time_collected property of this LogAnalyticsWarning.
:type time_collected: datetime
:param warning_id:
The value to assign to the warning_id property of this LogAnalyticsWarning.
:type warning_id: str
:param time_of_initial_warning:
The value to assign to the time_of_initial_warning property of this LogAnalyticsWarning.
:type time_of_initial_warning: datetime
:param is_active:
The value to assign to the is_active property of this LogAnalyticsWarning.
:type is_active: bool
:param is_suppressed:
The value to assign to the is_suppressed property of this LogAnalyticsWarning.
:type is_suppressed: bool
:param time_of_latest_warning:
The value to assign to the time_of_latest_warning property of this LogAnalyticsWarning.
:type time_of_latest_warning: datetime
:param warning_level:
The value to assign to the warning_level property of this LogAnalyticsWarning.
:type warning_level: str
:param warning_message:
The value to assign to the warning_message property of this LogAnalyticsWarning.
:type warning_message: str
:param pattern_id:
The value to assign to the pattern_id property of this LogAnalyticsWarning.
:type pattern_id: str
:param pattern_text:
The value to assign to the pattern_text property of this LogAnalyticsWarning.
:type pattern_text: str
:param rule_id:
The value to assign to the rule_id property of this LogAnalyticsWarning.
:type rule_id: str
:param source_id:
The value to assign to the source_id property of this LogAnalyticsWarning.
:type source_id: str
:param suppressed_by:
The value to assign to the suppressed_by property of this LogAnalyticsWarning.
:type suppressed_by: str
:param entity_id:
The value to assign to the entity_id property of this LogAnalyticsWarning.
:type entity_id: str
:param entity_type:
The value to assign to the entity_type property of this LogAnalyticsWarning.
:type entity_type: str
:param entity_type_display_name:
The value to assign to the entity_type_display_name property of this LogAnalyticsWarning.
:type entity_type_display_name: str
:param type_display_name:
The value to assign to the type_display_name property of this LogAnalyticsWarning.
:type type_display_name: str
:param type_name:
The value to assign to the type_name property of this LogAnalyticsWarning.
:type type_name: str
:param severity:
The value to assign to the severity property of this LogAnalyticsWarning.
:type severity: int<|endoftext|> |
7f56a2034f64e49bc580df92a1e34f41461cefb5f1def1ed28b9c3e4ad9222b0 | @property
def agent_id(self):
'\n Gets the agent_id of this LogAnalyticsWarning.\n The unique identifier of the agent associated with the warning\n\n\n :return: The agent_id of this LogAnalyticsWarning.\n :rtype: str\n '
return self._agent_id | Gets the agent_id of this LogAnalyticsWarning.
The unique identifier of the agent associated with the warning
:return: The agent_id of this LogAnalyticsWarning.
:rtype: str | src/oci/log_analytics/models/log_analytics_warning.py | agent_id | ezequielramos/oci-python-sdk | 249 | python | @property
def agent_id(self):
'\n Gets the agent_id of this LogAnalyticsWarning.\n The unique identifier of the agent associated with the warning\n\n\n :return: The agent_id of this LogAnalyticsWarning.\n :rtype: str\n '
return self._agent_id | @property
def agent_id(self):
'\n Gets the agent_id of this LogAnalyticsWarning.\n The unique identifier of the agent associated with the warning\n\n\n :return: The agent_id of this LogAnalyticsWarning.\n :rtype: str\n '
return self._agent_id<|docstring|>Gets the agent_id of this LogAnalyticsWarning.
The unique identifier of the agent associated with the warning
:return: The agent_id of this LogAnalyticsWarning.
:rtype: str<|endoftext|> |
a07bcec9155941119f5aab22543f026e706db385dab35860df8b8fa7fb3c564c | @agent_id.setter
def agent_id(self, agent_id):
'\n Sets the agent_id of this LogAnalyticsWarning.\n The unique identifier of the agent associated with the warning\n\n\n :param agent_id: The agent_id of this LogAnalyticsWarning.\n :type: str\n '
self._agent_id = agent_id | Sets the agent_id of this LogAnalyticsWarning.
The unique identifier of the agent associated with the warning
:param agent_id: The agent_id of this LogAnalyticsWarning.
:type: str | src/oci/log_analytics/models/log_analytics_warning.py | agent_id | ezequielramos/oci-python-sdk | 249 | python | @agent_id.setter
def agent_id(self, agent_id):
'\n Sets the agent_id of this LogAnalyticsWarning.\n The unique identifier of the agent associated with the warning\n\n\n :param agent_id: The agent_id of this LogAnalyticsWarning.\n :type: str\n '
self._agent_id = agent_id | @agent_id.setter
def agent_id(self, agent_id):
'\n Sets the agent_id of this LogAnalyticsWarning.\n The unique identifier of the agent associated with the warning\n\n\n :param agent_id: The agent_id of this LogAnalyticsWarning.\n :type: str\n '
self._agent_id = agent_id<|docstring|>Sets the agent_id of this LogAnalyticsWarning.
The unique identifier of the agent associated with the warning
:param agent_id: The agent_id of this LogAnalyticsWarning.
:type: str<|endoftext|> |
85ee059a8f2dea1574d28f8afe732f2cd08a5818ccfb40b3917029d35e8d6b7a | @property
def host_name(self):
'\n Gets the host_name of this LogAnalyticsWarning.\n The host containing the agent associated with the warning\n\n\n :return: The host_name of this LogAnalyticsWarning.\n :rtype: str\n '
return self._host_name | Gets the host_name of this LogAnalyticsWarning.
The host containing the agent associated with the warning
:return: The host_name of this LogAnalyticsWarning.
:rtype: str | src/oci/log_analytics/models/log_analytics_warning.py | host_name | ezequielramos/oci-python-sdk | 249 | python | @property
def host_name(self):
'\n Gets the host_name of this LogAnalyticsWarning.\n The host containing the agent associated with the warning\n\n\n :return: The host_name of this LogAnalyticsWarning.\n :rtype: str\n '
return self._host_name | @property
def host_name(self):
'\n Gets the host_name of this LogAnalyticsWarning.\n The host containing the agent associated with the warning\n\n\n :return: The host_name of this LogAnalyticsWarning.\n :rtype: str\n '
return self._host_name<|docstring|>Gets the host_name of this LogAnalyticsWarning.
The host containing the agent associated with the warning
:return: The host_name of this LogAnalyticsWarning.
:rtype: str<|endoftext|> |
fea73020c4b56fe12302fe70b9b6ccf22651105db0f1bc49e53477527eb5957b | @host_name.setter
def host_name(self, host_name):
'\n Sets the host_name of this LogAnalyticsWarning.\n The host containing the agent associated with the warning\n\n\n :param host_name: The host_name of this LogAnalyticsWarning.\n :type: str\n '
self._host_name = host_name | Sets the host_name of this LogAnalyticsWarning.
The host containing the agent associated with the warning
:param host_name: The host_name of this LogAnalyticsWarning.
:type: str | src/oci/log_analytics/models/log_analytics_warning.py | host_name | ezequielramos/oci-python-sdk | 249 | python | @host_name.setter
def host_name(self, host_name):
'\n Sets the host_name of this LogAnalyticsWarning.\n The host containing the agent associated with the warning\n\n\n :param host_name: The host_name of this LogAnalyticsWarning.\n :type: str\n '
self._host_name = host_name | @host_name.setter
def host_name(self, host_name):
'\n Sets the host_name of this LogAnalyticsWarning.\n The host containing the agent associated with the warning\n\n\n :param host_name: The host_name of this LogAnalyticsWarning.\n :type: str\n '
self._host_name = host_name<|docstring|>Sets the host_name of this LogAnalyticsWarning.
The host containing the agent associated with the warning
:param host_name: The host_name of this LogAnalyticsWarning.
:type: str<|endoftext|> |
1ce8a89bf49bab4f5aeb6ee0613a2e924f8655ad0185bd062eda4c9c9d3d6f4a | @property
def rule_display_name(self):
'\n Gets the rule_display_name of this LogAnalyticsWarning.\n The display name of the rule which triggered the warning\n\n\n :return: The rule_display_name of this LogAnalyticsWarning.\n :rtype: str\n '
return self._rule_display_name | Gets the rule_display_name of this LogAnalyticsWarning.
The display name of the rule which triggered the warning
:return: The rule_display_name of this LogAnalyticsWarning.
:rtype: str | src/oci/log_analytics/models/log_analytics_warning.py | rule_display_name | ezequielramos/oci-python-sdk | 249 | python | @property
def rule_display_name(self):
'\n Gets the rule_display_name of this LogAnalyticsWarning.\n The display name of the rule which triggered the warning\n\n\n :return: The rule_display_name of this LogAnalyticsWarning.\n :rtype: str\n '
return self._rule_display_name | @property
def rule_display_name(self):
'\n Gets the rule_display_name of this LogAnalyticsWarning.\n The display name of the rule which triggered the warning\n\n\n :return: The rule_display_name of this LogAnalyticsWarning.\n :rtype: str\n '
return self._rule_display_name<|docstring|>Gets the rule_display_name of this LogAnalyticsWarning.
The display name of the rule which triggered the warning
:return: The rule_display_name of this LogAnalyticsWarning.
:rtype: str<|endoftext|> |
a3c24d2d30b73ed4525d128e35484b1bd477b324ef15647603825d7d2ac19eea | @rule_display_name.setter
def rule_display_name(self, rule_display_name):
'\n Sets the rule_display_name of this LogAnalyticsWarning.\n The display name of the rule which triggered the warning\n\n\n :param rule_display_name: The rule_display_name of this LogAnalyticsWarning.\n :type: str\n '
self._rule_display_name = rule_display_name | Sets the rule_display_name of this LogAnalyticsWarning.
The display name of the rule which triggered the warning
:param rule_display_name: The rule_display_name of this LogAnalyticsWarning.
:type: str | src/oci/log_analytics/models/log_analytics_warning.py | rule_display_name | ezequielramos/oci-python-sdk | 249 | python | @rule_display_name.setter
def rule_display_name(self, rule_display_name):
'\n Sets the rule_display_name of this LogAnalyticsWarning.\n The display name of the rule which triggered the warning\n\n\n :param rule_display_name: The rule_display_name of this LogAnalyticsWarning.\n :type: str\n '
self._rule_display_name = rule_display_name | @rule_display_name.setter
def rule_display_name(self, rule_display_name):
'\n Sets the rule_display_name of this LogAnalyticsWarning.\n The display name of the rule which triggered the warning\n\n\n :param rule_display_name: The rule_display_name of this LogAnalyticsWarning.\n :type: str\n '
self._rule_display_name = rule_display_name<|docstring|>Sets the rule_display_name of this LogAnalyticsWarning.
The display name of the rule which triggered the warning
:param rule_display_name: The rule_display_name of this LogAnalyticsWarning.
:type: str<|endoftext|> |
24f36e2d57735ad412b625f5fb445ff7b2e54567c15e3fa7e3ae567156733326 | @property
def source_name(self):
'\n Gets the source_name of this LogAnalyticsWarning.\n The name of the source associated with the warning\n\n\n :return: The source_name of this LogAnalyticsWarning.\n :rtype: str\n '
return self._source_name | Gets the source_name of this LogAnalyticsWarning.
The name of the source associated with the warning
:return: The source_name of this LogAnalyticsWarning.
:rtype: str | src/oci/log_analytics/models/log_analytics_warning.py | source_name | ezequielramos/oci-python-sdk | 249 | python | @property
def source_name(self):
'\n Gets the source_name of this LogAnalyticsWarning.\n The name of the source associated with the warning\n\n\n :return: The source_name of this LogAnalyticsWarning.\n :rtype: str\n '
return self._source_name | @property
def source_name(self):
'\n Gets the source_name of this LogAnalyticsWarning.\n The name of the source associated with the warning\n\n\n :return: The source_name of this LogAnalyticsWarning.\n :rtype: str\n '
return self._source_name<|docstring|>Gets the source_name of this LogAnalyticsWarning.
The name of the source associated with the warning
:return: The source_name of this LogAnalyticsWarning.
:rtype: str<|endoftext|> |
c9d57edd851d16810fe1e5775a397253886287bec6da601d12f188900ac28bdc | @source_name.setter
def source_name(self, source_name):
'\n Sets the source_name of this LogAnalyticsWarning.\n The name of the source associated with the warning\n\n\n :param source_name: The source_name of this LogAnalyticsWarning.\n :type: str\n '
self._source_name = source_name | Sets the source_name of this LogAnalyticsWarning.
The name of the source associated with the warning
:param source_name: The source_name of this LogAnalyticsWarning.
:type: str | src/oci/log_analytics/models/log_analytics_warning.py | source_name | ezequielramos/oci-python-sdk | 249 | python | @source_name.setter
def source_name(self, source_name):
'\n Sets the source_name of this LogAnalyticsWarning.\n The name of the source associated with the warning\n\n\n :param source_name: The source_name of this LogAnalyticsWarning.\n :type: str\n '
self._source_name = source_name | @source_name.setter
def source_name(self, source_name):
'\n Sets the source_name of this LogAnalyticsWarning.\n The name of the source associated with the warning\n\n\n :param source_name: The source_name of this LogAnalyticsWarning.\n :type: str\n '
self._source_name = source_name<|docstring|>Sets the source_name of this LogAnalyticsWarning.
The name of the source associated with the warning
:param source_name: The source_name of this LogAnalyticsWarning.
:type: str<|endoftext|> |
6fcc49f2216f07f4bd2b2e52b321a13eb35f7235ffa95c195b980b9bbd0f849c | @property
def compartment_id(self):
'\n Gets the compartment_id of this LogAnalyticsWarning.\n The entity compartment ID.\n\n\n :return: The compartment_id of this LogAnalyticsWarning.\n :rtype: str\n '
return self._compartment_id | Gets the compartment_id of this LogAnalyticsWarning.
The entity compartment ID.
:return: The compartment_id of this LogAnalyticsWarning.
:rtype: str | src/oci/log_analytics/models/log_analytics_warning.py | compartment_id | ezequielramos/oci-python-sdk | 249 | python | @property
def compartment_id(self):
'\n Gets the compartment_id of this LogAnalyticsWarning.\n The entity compartment ID.\n\n\n :return: The compartment_id of this LogAnalyticsWarning.\n :rtype: str\n '
return self._compartment_id | @property
def compartment_id(self):
'\n Gets the compartment_id of this LogAnalyticsWarning.\n The entity compartment ID.\n\n\n :return: The compartment_id of this LogAnalyticsWarning.\n :rtype: str\n '
return self._compartment_id<|docstring|>Gets the compartment_id of this LogAnalyticsWarning.
The entity compartment ID.
:return: The compartment_id of this LogAnalyticsWarning.
:rtype: str<|endoftext|> |
4d8ce0ec808d2ae1778ac1df3c787107c05d98bc11e2eb6abccddc3fe90d4d84 | @compartment_id.setter
def compartment_id(self, compartment_id):
'\n Sets the compartment_id of this LogAnalyticsWarning.\n The entity compartment ID.\n\n\n :param compartment_id: The compartment_id of this LogAnalyticsWarning.\n :type: str\n '
self._compartment_id = compartment_id | Sets the compartment_id of this LogAnalyticsWarning.
The entity compartment ID.
:param compartment_id: The compartment_id of this LogAnalyticsWarning.
:type: str | src/oci/log_analytics/models/log_analytics_warning.py | compartment_id | ezequielramos/oci-python-sdk | 249 | python | @compartment_id.setter
def compartment_id(self, compartment_id):
'\n Sets the compartment_id of this LogAnalyticsWarning.\n The entity compartment ID.\n\n\n :param compartment_id: The compartment_id of this LogAnalyticsWarning.\n :type: str\n '
self._compartment_id = compartment_id | @compartment_id.setter
def compartment_id(self, compartment_id):
'\n Sets the compartment_id of this LogAnalyticsWarning.\n The entity compartment ID.\n\n\n :param compartment_id: The compartment_id of this LogAnalyticsWarning.\n :type: str\n '
self._compartment_id = compartment_id<|docstring|>Sets the compartment_id of this LogAnalyticsWarning.
The entity compartment ID.
:param compartment_id: The compartment_id of this LogAnalyticsWarning.
:type: str<|endoftext|> |
7f9b18d293cc681ddc121c806732aa79b59edb1acaf7dbd29fe4fd5c05601b16 | @property
def source_display_name(self):
'\n Gets the source_display_name of this LogAnalyticsWarning.\n The display name of the source associated with the warning\n\n\n :return: The source_display_name of this LogAnalyticsWarning.\n :rtype: str\n '
return self._source_display_name | Gets the source_display_name of this LogAnalyticsWarning.
The display name of the source associated with the warning
:return: The source_display_name of this LogAnalyticsWarning.
:rtype: str | src/oci/log_analytics/models/log_analytics_warning.py | source_display_name | ezequielramos/oci-python-sdk | 249 | python | @property
def source_display_name(self):
'\n Gets the source_display_name of this LogAnalyticsWarning.\n The display name of the source associated with the warning\n\n\n :return: The source_display_name of this LogAnalyticsWarning.\n :rtype: str\n '
return self._source_display_name | @property
def source_display_name(self):
'\n Gets the source_display_name of this LogAnalyticsWarning.\n The display name of the source associated with the warning\n\n\n :return: The source_display_name of this LogAnalyticsWarning.\n :rtype: str\n '
return self._source_display_name<|docstring|>Gets the source_display_name of this LogAnalyticsWarning.
The display name of the source associated with the warning
:return: The source_display_name of this LogAnalyticsWarning.
:rtype: str<|endoftext|> |
0e3c2b176e26f7f3a699e0dac44d97a5ce2a01e9df115686e15f89feb616b085 | @source_display_name.setter
def source_display_name(self, source_display_name):
'\n Sets the source_display_name of this LogAnalyticsWarning.\n The display name of the source associated with the warning\n\n\n :param source_display_name: The source_display_name of this LogAnalyticsWarning.\n :type: str\n '
self._source_display_name = source_display_name | Sets the source_display_name of this LogAnalyticsWarning.
The display name of the source associated with the warning
:param source_display_name: The source_display_name of this LogAnalyticsWarning.
:type: str | src/oci/log_analytics/models/log_analytics_warning.py | source_display_name | ezequielramos/oci-python-sdk | 249 | python | @source_display_name.setter
def source_display_name(self, source_display_name):
'\n Sets the source_display_name of this LogAnalyticsWarning.\n The display name of the source associated with the warning\n\n\n :param source_display_name: The source_display_name of this LogAnalyticsWarning.\n :type: str\n '
self._source_display_name = source_display_name | @source_display_name.setter
def source_display_name(self, source_display_name):
'\n Sets the source_display_name of this LogAnalyticsWarning.\n The display name of the source associated with the warning\n\n\n :param source_display_name: The source_display_name of this LogAnalyticsWarning.\n :type: str\n '
self._source_display_name = source_display_name<|docstring|>Sets the source_display_name of this LogAnalyticsWarning.
The display name of the source associated with the warning
:param source_display_name: The source_display_name of this LogAnalyticsWarning.
:type: str<|endoftext|> |
36ddbf553299aa1ee42704cef1e8ec8a6e7986499b67b6866c1b8c768daa27dc | @property
def entity_name(self):
'\n Gets the entity_name of this LogAnalyticsWarning.\n The name of the entity associated with the warning\n\n\n :return: The entity_name of this LogAnalyticsWarning.\n :rtype: str\n '
return self._entity_name | Gets the entity_name of this LogAnalyticsWarning.
The name of the entity associated with the warning
:return: The entity_name of this LogAnalyticsWarning.
:rtype: str | src/oci/log_analytics/models/log_analytics_warning.py | entity_name | ezequielramos/oci-python-sdk | 249 | python | @property
def entity_name(self):
'\n Gets the entity_name of this LogAnalyticsWarning.\n The name of the entity associated with the warning\n\n\n :return: The entity_name of this LogAnalyticsWarning.\n :rtype: str\n '
return self._entity_name | @property
def entity_name(self):
'\n Gets the entity_name of this LogAnalyticsWarning.\n The name of the entity associated with the warning\n\n\n :return: The entity_name of this LogAnalyticsWarning.\n :rtype: str\n '
return self._entity_name<|docstring|>Gets the entity_name of this LogAnalyticsWarning.
The name of the entity associated with the warning
:return: The entity_name of this LogAnalyticsWarning.
:rtype: str<|endoftext|> |
ddec4acf198e1e367563a1289d148760e17e52a2336519b5a4834d1a094b7dbb | @entity_name.setter
def entity_name(self, entity_name):
'\n Sets the entity_name of this LogAnalyticsWarning.\n The name of the entity associated with the warning\n\n\n :param entity_name: The entity_name of this LogAnalyticsWarning.\n :type: str\n '
self._entity_name = entity_name | Sets the entity_name of this LogAnalyticsWarning.
The name of the entity associated with the warning
:param entity_name: The entity_name of this LogAnalyticsWarning.
:type: str | src/oci/log_analytics/models/log_analytics_warning.py | entity_name | ezequielramos/oci-python-sdk | 249 | python | @entity_name.setter
def entity_name(self, entity_name):
'\n Sets the entity_name of this LogAnalyticsWarning.\n The name of the entity associated with the warning\n\n\n :param entity_name: The entity_name of this LogAnalyticsWarning.\n :type: str\n '
self._entity_name = entity_name | @entity_name.setter
def entity_name(self, entity_name):
'\n Sets the entity_name of this LogAnalyticsWarning.\n The name of the entity associated with the warning\n\n\n :param entity_name: The entity_name of this LogAnalyticsWarning.\n :type: str\n '
self._entity_name = entity_name<|docstring|>Sets the entity_name of this LogAnalyticsWarning.
The name of the entity associated with the warning
:param entity_name: The entity_name of this LogAnalyticsWarning.
:type: str<|endoftext|> |
b5cf321249c7a8ad1a50d75208abe96a082bd8867102fe4663f9d4749be50a8f | @property
def time_collected(self):
'\n Gets the time_collected of this LogAnalyticsWarning.\n The time at which the warning was most recently collected\n\n\n :return: The time_collected of this LogAnalyticsWarning.\n :rtype: datetime\n '
return self._time_collected | Gets the time_collected of this LogAnalyticsWarning.
The time at which the warning was most recently collected
:return: The time_collected of this LogAnalyticsWarning.
:rtype: datetime | src/oci/log_analytics/models/log_analytics_warning.py | time_collected | ezequielramos/oci-python-sdk | 249 | python | @property
def time_collected(self):
'\n Gets the time_collected of this LogAnalyticsWarning.\n The time at which the warning was most recently collected\n\n\n :return: The time_collected of this LogAnalyticsWarning.\n :rtype: datetime\n '
return self._time_collected | @property
def time_collected(self):
'\n Gets the time_collected of this LogAnalyticsWarning.\n The time at which the warning was most recently collected\n\n\n :return: The time_collected of this LogAnalyticsWarning.\n :rtype: datetime\n '
return self._time_collected<|docstring|>Gets the time_collected of this LogAnalyticsWarning.
The time at which the warning was most recently collected
:return: The time_collected of this LogAnalyticsWarning.
:rtype: datetime<|endoftext|> |
64aab265aa2f44563a2a05dd94840144a096352ee04046910ac67950ab9036e5 | @time_collected.setter
def time_collected(self, time_collected):
'\n Sets the time_collected of this LogAnalyticsWarning.\n The time at which the warning was most recently collected\n\n\n :param time_collected: The time_collected of this LogAnalyticsWarning.\n :type: datetime\n '
self._time_collected = time_collected | Sets the time_collected of this LogAnalyticsWarning.
The time at which the warning was most recently collected
:param time_collected: The time_collected of this LogAnalyticsWarning.
:type: datetime | src/oci/log_analytics/models/log_analytics_warning.py | time_collected | ezequielramos/oci-python-sdk | 249 | python | @time_collected.setter
def time_collected(self, time_collected):
'\n Sets the time_collected of this LogAnalyticsWarning.\n The time at which the warning was most recently collected\n\n\n :param time_collected: The time_collected of this LogAnalyticsWarning.\n :type: datetime\n '
self._time_collected = time_collected | @time_collected.setter
def time_collected(self, time_collected):
'\n Sets the time_collected of this LogAnalyticsWarning.\n The time at which the warning was most recently collected\n\n\n :param time_collected: The time_collected of this LogAnalyticsWarning.\n :type: datetime\n '
self._time_collected = time_collected<|docstring|>Sets the time_collected of this LogAnalyticsWarning.
The time at which the warning was most recently collected
:param time_collected: The time_collected of this LogAnalyticsWarning.
:type: datetime<|endoftext|> |
62af919a3c6f26c332fab27dadb896987e98aaf4f7753a31eb69a9c7e5fdeb92 | @property
def warning_id(self):
'\n Gets the warning_id of this LogAnalyticsWarning.\n The unique identifier of the warning\n\n\n :return: The warning_id of this LogAnalyticsWarning.\n :rtype: str\n '
return self._warning_id | Gets the warning_id of this LogAnalyticsWarning.
The unique identifier of the warning
:return: The warning_id of this LogAnalyticsWarning.
:rtype: str | src/oci/log_analytics/models/log_analytics_warning.py | warning_id | ezequielramos/oci-python-sdk | 249 | python | @property
def warning_id(self):
'\n Gets the warning_id of this LogAnalyticsWarning.\n The unique identifier of the warning\n\n\n :return: The warning_id of this LogAnalyticsWarning.\n :rtype: str\n '
return self._warning_id | @property
def warning_id(self):
'\n Gets the warning_id of this LogAnalyticsWarning.\n The unique identifier of the warning\n\n\n :return: The warning_id of this LogAnalyticsWarning.\n :rtype: str\n '
return self._warning_id<|docstring|>Gets the warning_id of this LogAnalyticsWarning.
The unique identifier of the warning
:return: The warning_id of this LogAnalyticsWarning.
:rtype: str<|endoftext|> |
cd50470fe7b710265fe72721d99d23207c441fc257a3c51a92f146fb050272eb | @warning_id.setter
def warning_id(self, warning_id):
'\n Sets the warning_id of this LogAnalyticsWarning.\n The unique identifier of the warning\n\n\n :param warning_id: The warning_id of this LogAnalyticsWarning.\n :type: str\n '
self._warning_id = warning_id | Sets the warning_id of this LogAnalyticsWarning.
The unique identifier of the warning
:param warning_id: The warning_id of this LogAnalyticsWarning.
:type: str | src/oci/log_analytics/models/log_analytics_warning.py | warning_id | ezequielramos/oci-python-sdk | 249 | python | @warning_id.setter
def warning_id(self, warning_id):
'\n Sets the warning_id of this LogAnalyticsWarning.\n The unique identifier of the warning\n\n\n :param warning_id: The warning_id of this LogAnalyticsWarning.\n :type: str\n '
self._warning_id = warning_id | @warning_id.setter
def warning_id(self, warning_id):
'\n Sets the warning_id of this LogAnalyticsWarning.\n The unique identifier of the warning\n\n\n :param warning_id: The warning_id of this LogAnalyticsWarning.\n :type: str\n '
self._warning_id = warning_id<|docstring|>Sets the warning_id of this LogAnalyticsWarning.
The unique identifier of the warning
:param warning_id: The warning_id of this LogAnalyticsWarning.
:type: str<|endoftext|> |
17a3e61d86323ae3176e914b1748a38aa4072dcf06adbdbc0429187e02ae4735 | @property
def time_of_initial_warning(self):
'\n Gets the time_of_initial_warning of this LogAnalyticsWarning.\n The date at which the warning was initially triggered\n\n\n :return: The time_of_initial_warning of this LogAnalyticsWarning.\n :rtype: datetime\n '
return self._time_of_initial_warning | Gets the time_of_initial_warning of this LogAnalyticsWarning.
The date at which the warning was initially triggered
:return: The time_of_initial_warning of this LogAnalyticsWarning.
:rtype: datetime | src/oci/log_analytics/models/log_analytics_warning.py | time_of_initial_warning | ezequielramos/oci-python-sdk | 249 | python | @property
def time_of_initial_warning(self):
'\n Gets the time_of_initial_warning of this LogAnalyticsWarning.\n The date at which the warning was initially triggered\n\n\n :return: The time_of_initial_warning of this LogAnalyticsWarning.\n :rtype: datetime\n '
return self._time_of_initial_warning | @property
def time_of_initial_warning(self):
'\n Gets the time_of_initial_warning of this LogAnalyticsWarning.\n The date at which the warning was initially triggered\n\n\n :return: The time_of_initial_warning of this LogAnalyticsWarning.\n :rtype: datetime\n '
return self._time_of_initial_warning<|docstring|>Gets the time_of_initial_warning of this LogAnalyticsWarning.
The date at which the warning was initially triggered
:return: The time_of_initial_warning of this LogAnalyticsWarning.
:rtype: datetime<|endoftext|> |
3a7e905730e699e6b3dd116ec7c40bfb6f87289b83ea1ce097cd54833124e83b | @time_of_initial_warning.setter
def time_of_initial_warning(self, time_of_initial_warning):
'\n Sets the time_of_initial_warning of this LogAnalyticsWarning.\n The date at which the warning was initially triggered\n\n\n :param time_of_initial_warning: The time_of_initial_warning of this LogAnalyticsWarning.\n :type: datetime\n '
self._time_of_initial_warning = time_of_initial_warning | Sets the time_of_initial_warning of this LogAnalyticsWarning.
The date at which the warning was initially triggered
:param time_of_initial_warning: The time_of_initial_warning of this LogAnalyticsWarning.
:type: datetime | src/oci/log_analytics/models/log_analytics_warning.py | time_of_initial_warning | ezequielramos/oci-python-sdk | 249 | python | @time_of_initial_warning.setter
def time_of_initial_warning(self, time_of_initial_warning):
'\n Sets the time_of_initial_warning of this LogAnalyticsWarning.\n The date at which the warning was initially triggered\n\n\n :param time_of_initial_warning: The time_of_initial_warning of this LogAnalyticsWarning.\n :type: datetime\n '
self._time_of_initial_warning = time_of_initial_warning | @time_of_initial_warning.setter
def time_of_initial_warning(self, time_of_initial_warning):
'\n Sets the time_of_initial_warning of this LogAnalyticsWarning.\n The date at which the warning was initially triggered\n\n\n :param time_of_initial_warning: The time_of_initial_warning of this LogAnalyticsWarning.\n :type: datetime\n '
self._time_of_initial_warning = time_of_initial_warning<|docstring|>Sets the time_of_initial_warning of this LogAnalyticsWarning.
The date at which the warning was initially triggered
:param time_of_initial_warning: The time_of_initial_warning of this LogAnalyticsWarning.
:type: datetime<|endoftext|> |
60debcd17f33225e0968966802e80de0ecba95d28cfc25abbf0f9293e1e7c616 | @property
def is_active(self):
'\n Gets the is_active of this LogAnalyticsWarning.\n A flag indicating if the warning is currently active\n\n\n :return: The is_active of this LogAnalyticsWarning.\n :rtype: bool\n '
return self._is_active | Gets the is_active of this LogAnalyticsWarning.
A flag indicating if the warning is currently active
:return: The is_active of this LogAnalyticsWarning.
:rtype: bool | src/oci/log_analytics/models/log_analytics_warning.py | is_active | ezequielramos/oci-python-sdk | 249 | python | @property
def is_active(self):
'\n Gets the is_active of this LogAnalyticsWarning.\n A flag indicating if the warning is currently active\n\n\n :return: The is_active of this LogAnalyticsWarning.\n :rtype: bool\n '
return self._is_active | @property
def is_active(self):
'\n Gets the is_active of this LogAnalyticsWarning.\n A flag indicating if the warning is currently active\n\n\n :return: The is_active of this LogAnalyticsWarning.\n :rtype: bool\n '
return self._is_active<|docstring|>Gets the is_active of this LogAnalyticsWarning.
A flag indicating if the warning is currently active
:return: The is_active of this LogAnalyticsWarning.
:rtype: bool<|endoftext|> |
20b389b17279999fd2a53b8f59af6c7113017ef4c1ed538c0ba6c858ce7636b1 | @is_active.setter
def is_active(self, is_active):
'\n Sets the is_active of this LogAnalyticsWarning.\n A flag indicating if the warning is currently active\n\n\n :param is_active: The is_active of this LogAnalyticsWarning.\n :type: bool\n '
self._is_active = is_active | Sets the is_active of this LogAnalyticsWarning.
A flag indicating if the warning is currently active
:param is_active: The is_active of this LogAnalyticsWarning.
:type: bool | src/oci/log_analytics/models/log_analytics_warning.py | is_active | ezequielramos/oci-python-sdk | 249 | python | @is_active.setter
def is_active(self, is_active):
'\n Sets the is_active of this LogAnalyticsWarning.\n A flag indicating if the warning is currently active\n\n\n :param is_active: The is_active of this LogAnalyticsWarning.\n :type: bool\n '
self._is_active = is_active | @is_active.setter
def is_active(self, is_active):
'\n Sets the is_active of this LogAnalyticsWarning.\n A flag indicating if the warning is currently active\n\n\n :param is_active: The is_active of this LogAnalyticsWarning.\n :type: bool\n '
self._is_active = is_active<|docstring|>Sets the is_active of this LogAnalyticsWarning.
A flag indicating if the warning is currently active
:param is_active: The is_active of this LogAnalyticsWarning.
:type: bool<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.