body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
0b56f1dacb9bc27a0500ff79d49cb31c6342dcef1f74507b7b994f47ef1431a0 | def set_params(self, epochsdata: mne.Epochs):
'\n\t\tDESCRIPTION\n\t\t-----------\n\t\tLoad Epochs data\n\n\t\tParameter\n\t\t-----------\n\t\tepochsdata: Instance of mne.Epochs\n\n\t\tExample\n\t\t-----------\n\n\t\t-----------\n\t\t'
self._params = {'data': epochsdata}
return self._params | DESCRIPTION
-----------
Load Epochs data
Parameter
-----------
epochsdata: Instance of mne.Epochs
Example
-----------
----------- | bci_lib/Stages/LoadData/LoadData.py | set_params | SahandSadeghpour/bci_lib | 0 | python | def set_params(self, epochsdata: mne.Epochs):
'\n\t\tDESCRIPTION\n\t\t-----------\n\t\tLoad Epochs data\n\n\t\tParameter\n\t\t-----------\n\t\tepochsdata: Instance of mne.Epochs\n\n\t\tExample\n\t\t-----------\n\n\t\t-----------\n\t\t'
self._params = {'data': epochsdata}
return self._params | def set_params(self, epochsdata: mne.Epochs):
'\n\t\tDESCRIPTION\n\t\t-----------\n\t\tLoad Epochs data\n\n\t\tParameter\n\t\t-----------\n\t\tepochsdata: Instance of mne.Epochs\n\n\t\tExample\n\t\t-----------\n\n\t\t-----------\n\t\t'
self._params = {'data': epochsdata}
return self._params<|docstring|>DESCRIPTION
-----------
Load Epochs data
Parameter
-----------
epochsdata: Instance of mne.Epochs
Example
-----------
-----------<|endoftext|> |
b7801fca452cf3e823dd0fbc10c3a10a51aaa33cc96918a1f72f7b86a150e652 | def do_task(self):
'\n\t\tDESCRIPTION\n\t\t-----------\n\t\tImport the epochs data from user and save it on database\n\t\t-----------\n\t\t'
epochs = self._params.pop('data')
output = EpochsData(self._outputs[0], epochs)
self._set_output(output, self._outputs[0]) | DESCRIPTION
-----------
Import the epochs data from user and save it on database
----------- | bci_lib/Stages/LoadData/LoadData.py | do_task | SahandSadeghpour/bci_lib | 0 | python | def do_task(self):
'\n\t\tDESCRIPTION\n\t\t-----------\n\t\tImport the epochs data from user and save it on database\n\t\t-----------\n\t\t'
epochs = self._params.pop('data')
output = EpochsData(self._outputs[0], epochs)
self._set_output(output, self._outputs[0]) | def do_task(self):
'\n\t\tDESCRIPTION\n\t\t-----------\n\t\tImport the epochs data from user and save it on database\n\t\t-----------\n\t\t'
epochs = self._params.pop('data')
output = EpochsData(self._outputs[0], epochs)
self._set_output(output, self._outputs[0])<|docstring|>DESCRIPTION
-----------
Import the epochs data from user and save it on database
-----------<|endoftext|> |
769f088229dcd811ebd578b3c3b269d4aed8f630bd1c6737a47f530526e87bf8 | @property
def detailed_balance_factor(self):
'Returns the detailed balance factor (sometimes called the Bose\n factor)\n\n Parameters\n ----------\n None\n\n Returns\n -------\n dbf : ndarray\n The detailed balance factor (temperature correction)\n\n '
return (1.0 - np.exp((((- self.Q[(:, 3)]) / BOLTZMANN_IN_MEV_K) / self.temp))) | Returns the detailed balance factor (sometimes called the Bose
factor)
Parameters
----------
None
Returns
-------
dbf : ndarray
The detailed balance factor (temperature correction) | neutronpy/data/analysis.py | detailed_balance_factor | neutronpy/neutronpy | 14 | python | @property
def detailed_balance_factor(self):
'Returns the detailed balance factor (sometimes called the Bose\n factor)\n\n Parameters\n ----------\n None\n\n Returns\n -------\n dbf : ndarray\n The detailed balance factor (temperature correction)\n\n '
return (1.0 - np.exp((((- self.Q[(:, 3)]) / BOLTZMANN_IN_MEV_K) / self.temp))) | @property
def detailed_balance_factor(self):
'Returns the detailed balance factor (sometimes called the Bose\n factor)\n\n Parameters\n ----------\n None\n\n Returns\n -------\n dbf : ndarray\n The detailed balance factor (temperature correction)\n\n '
return (1.0 - np.exp((((- self.Q[(:, 3)]) / BOLTZMANN_IN_MEV_K) / self.temp)))<|docstring|>Returns the detailed balance factor (sometimes called the Bose
factor)
Parameters
----------
None
Returns
-------
dbf : ndarray
The detailed balance factor (temperature correction)<|endoftext|> |
28582406b270b2581438a80fe67ddf4dce7e43c55b8fd5eb7f2d37263baee53b | def integrate(self, bounds=None, background=None, hkle=True):
'Returns the integrated intensity within given bounds\n\n Parameters\n ----------\n bounds : bool, optional\n A boolean expression representing the bounds inside which the\n calculation will be performed\n\n background : float or dict, optional\n Default: None\n\n hkle : bool, optional\n If True, integrates only over h, k, l, e dimensions, otherwise\n integrates over all dimensions in :py:attr:`.Data.data`\n\n Returns\n -------\n result : float\n The integrated intensity either over all data, or within\n specified boundaries\n\n '
result = 0
for key in self.get_keys(hkle):
result += np.trapz((self.intensity[self.get_bounds(bounds)] - self.estimate_background(background)), np.squeeze(self.data[key][self.get_bounds(bounds)]))
return result | Returns the integrated intensity within given bounds
Parameters
----------
bounds : bool, optional
A boolean expression representing the bounds inside which the
calculation will be performed
background : float or dict, optional
Default: None
hkle : bool, optional
If True, integrates only over h, k, l, e dimensions, otherwise
integrates over all dimensions in :py:attr:`.Data.data`
Returns
-------
result : float
The integrated intensity either over all data, or within
specified boundaries | neutronpy/data/analysis.py | integrate | neutronpy/neutronpy | 14 | python | def integrate(self, bounds=None, background=None, hkle=True):
'Returns the integrated intensity within given bounds\n\n Parameters\n ----------\n bounds : bool, optional\n A boolean expression representing the bounds inside which the\n calculation will be performed\n\n background : float or dict, optional\n Default: None\n\n hkle : bool, optional\n If True, integrates only over h, k, l, e dimensions, otherwise\n integrates over all dimensions in :py:attr:`.Data.data`\n\n Returns\n -------\n result : float\n The integrated intensity either over all data, or within\n specified boundaries\n\n '
result = 0
for key in self.get_keys(hkle):
result += np.trapz((self.intensity[self.get_bounds(bounds)] - self.estimate_background(background)), np.squeeze(self.data[key][self.get_bounds(bounds)]))
return result | def integrate(self, bounds=None, background=None, hkle=True):
'Returns the integrated intensity within given bounds\n\n Parameters\n ----------\n bounds : bool, optional\n A boolean expression representing the bounds inside which the\n calculation will be performed\n\n background : float or dict, optional\n Default: None\n\n hkle : bool, optional\n If True, integrates only over h, k, l, e dimensions, otherwise\n integrates over all dimensions in :py:attr:`.Data.data`\n\n Returns\n -------\n result : float\n The integrated intensity either over all data, or within\n specified boundaries\n\n '
result = 0
for key in self.get_keys(hkle):
result += np.trapz((self.intensity[self.get_bounds(bounds)] - self.estimate_background(background)), np.squeeze(self.data[key][self.get_bounds(bounds)]))
return result<|docstring|>Returns the integrated intensity within given bounds
Parameters
----------
bounds : bool, optional
A boolean expression representing the bounds inside which the
calculation will be performed
background : float or dict, optional
Default: None
hkle : bool, optional
If True, integrates only over h, k, l, e dimensions, otherwise
integrates over all dimensions in :py:attr:`.Data.data`
Returns
-------
result : float
The integrated intensity either over all data, or within
specified boundaries<|endoftext|> |
35752de05d9eccf4b7958942398d20f8273417b931550f275a87d865602abb5e | def position(self, bounds=None, background=None, hkle=True):
'Returns the position of a peak within the given bounds\n\n Parameters\n ----------\n bounds : bool, optional\n A boolean expression representing the bounds inside which the\n calculation will be performed\n\n background : float or dict, optional\n Default: None\n\n hkle : bool, optional\n If True, integrates only over h, k, l, e dimensions, otherwise\n integrates over all dimensions in :py:attr:`.Data.data`\n\n Returns\n -------\n result : tup\n The result is a tuple with position in each dimension of Q,\n (h, k, l, e)\n\n '
result = ()
for key in self.get_keys(hkle):
_result = 0
for key_integrate in self.get_keys(hkle):
_result += (np.trapz((self.data[key][self.get_bounds(bounds)] * (self.intensity[self.get_bounds(bounds)] - self.estimate_background(background))), self.data[key_integrate][self.get_bounds(bounds)]) / self.integrate(bounds, background))
result += (np.squeeze(_result),)
if hkle:
return result
else:
return dict(((key, value) for (key, value) in zip(self.get_keys(hkle), result))) | Returns the position of a peak within the given bounds
Parameters
----------
bounds : bool, optional
A boolean expression representing the bounds inside which the
calculation will be performed
background : float or dict, optional
Default: None
hkle : bool, optional
If True, integrates only over h, k, l, e dimensions, otherwise
integrates over all dimensions in :py:attr:`.Data.data`
Returns
-------
result : tup
The result is a tuple with position in each dimension of Q,
(h, k, l, e) | neutronpy/data/analysis.py | position | neutronpy/neutronpy | 14 | python | def position(self, bounds=None, background=None, hkle=True):
'Returns the position of a peak within the given bounds\n\n Parameters\n ----------\n bounds : bool, optional\n A boolean expression representing the bounds inside which the\n calculation will be performed\n\n background : float or dict, optional\n Default: None\n\n hkle : bool, optional\n If True, integrates only over h, k, l, e dimensions, otherwise\n integrates over all dimensions in :py:attr:`.Data.data`\n\n Returns\n -------\n result : tup\n The result is a tuple with position in each dimension of Q,\n (h, k, l, e)\n\n '
result = ()
for key in self.get_keys(hkle):
_result = 0
for key_integrate in self.get_keys(hkle):
_result += (np.trapz((self.data[key][self.get_bounds(bounds)] * (self.intensity[self.get_bounds(bounds)] - self.estimate_background(background))), self.data[key_integrate][self.get_bounds(bounds)]) / self.integrate(bounds, background))
result += (np.squeeze(_result),)
if hkle:
return result
else:
return dict(((key, value) for (key, value) in zip(self.get_keys(hkle), result))) | def position(self, bounds=None, background=None, hkle=True):
'Returns the position of a peak within the given bounds\n\n Parameters\n ----------\n bounds : bool, optional\n A boolean expression representing the bounds inside which the\n calculation will be performed\n\n background : float or dict, optional\n Default: None\n\n hkle : bool, optional\n If True, integrates only over h, k, l, e dimensions, otherwise\n integrates over all dimensions in :py:attr:`.Data.data`\n\n Returns\n -------\n result : tup\n The result is a tuple with position in each dimension of Q,\n (h, k, l, e)\n\n '
result = ()
for key in self.get_keys(hkle):
_result = 0
for key_integrate in self.get_keys(hkle):
_result += (np.trapz((self.data[key][self.get_bounds(bounds)] * (self.intensity[self.get_bounds(bounds)] - self.estimate_background(background))), self.data[key_integrate][self.get_bounds(bounds)]) / self.integrate(bounds, background))
result += (np.squeeze(_result),)
if hkle:
return result
else:
return dict(((key, value) for (key, value) in zip(self.get_keys(hkle), result)))<|docstring|>Returns the position of a peak within the given bounds
Parameters
----------
bounds : bool, optional
A boolean expression representing the bounds inside which the
calculation will be performed
background : float or dict, optional
Default: None
hkle : bool, optional
If True, integrates only over h, k, l, e dimensions, otherwise
integrates over all dimensions in :py:attr:`.Data.data`
Returns
-------
result : tup
The result is a tuple with position in each dimension of Q,
(h, k, l, e)<|endoftext|> |
344c62c489c52ae591313dc1a846c34ae3712bc953d4290b42fe032a89a9f856 | def width(self, bounds=None, background=None, fwhm=False, hkle=True):
'Returns the mean-squared width of a peak within the given bounds\n\n Parameters\n ----------\n bounds : bool, optional\n A boolean expression representing the bounds inside which the\n calculation will be performed\n\n background : float or dict, optional\n Default: None\n\n fwhm : bool, optional\n If True, returns width in fwhm, otherwise in mean-squared width.\n Default: False\n\n hkle : bool, optional\n If True, integrates only over h, k, l, e dimensions, otherwise\n integrates over all dimensions in :py:attr:`.Data.data`\n\n Returns\n -------\n result : tup\n The result is a tuple with the width in each dimension of Q,\n (h, k, l, e)\n\n '
result = ()
for key in self.get_keys(hkle):
_result = 0
for key_integrate in self.get_keys(hkle):
_result += (np.trapz((((self.data[key][self.get_bounds(bounds)] - self.position(bounds, background, hkle=False)[key]) ** 2) * (self.intensity[self.get_bounds(bounds)] - self.estimate_background(background))), self.data[key_integrate][self.get_bounds(bounds)]) / self.integrate(bounds, background))
if fwhm:
result += (((np.sqrt(np.squeeze(_result)) * 2.0) * np.sqrt((2.0 * np.log(2.0)))),)
else:
result += (np.squeeze(_result),)
if hkle:
return result
else:
return dict(((key, value) for (key, value) in zip(self.get_keys(hkle), result))) | Returns the mean-squared width of a peak within the given bounds
Parameters
----------
bounds : bool, optional
A boolean expression representing the bounds inside which the
calculation will be performed
background : float or dict, optional
Default: None
fwhm : bool, optional
If True, returns width in fwhm, otherwise in mean-squared width.
Default: False
hkle : bool, optional
If True, integrates only over h, k, l, e dimensions, otherwise
integrates over all dimensions in :py:attr:`.Data.data`
Returns
-------
result : tup
The result is a tuple with the width in each dimension of Q,
(h, k, l, e) | neutronpy/data/analysis.py | width | neutronpy/neutronpy | 14 | python | def width(self, bounds=None, background=None, fwhm=False, hkle=True):
'Returns the mean-squared width of a peak within the given bounds\n\n Parameters\n ----------\n bounds : bool, optional\n A boolean expression representing the bounds inside which the\n calculation will be performed\n\n background : float or dict, optional\n Default: None\n\n fwhm : bool, optional\n If True, returns width in fwhm, otherwise in mean-squared width.\n Default: False\n\n hkle : bool, optional\n If True, integrates only over h, k, l, e dimensions, otherwise\n integrates over all dimensions in :py:attr:`.Data.data`\n\n Returns\n -------\n result : tup\n The result is a tuple with the width in each dimension of Q,\n (h, k, l, e)\n\n '
result = ()
for key in self.get_keys(hkle):
_result = 0
for key_integrate in self.get_keys(hkle):
_result += (np.trapz((((self.data[key][self.get_bounds(bounds)] - self.position(bounds, background, hkle=False)[key]) ** 2) * (self.intensity[self.get_bounds(bounds)] - self.estimate_background(background))), self.data[key_integrate][self.get_bounds(bounds)]) / self.integrate(bounds, background))
if fwhm:
result += (((np.sqrt(np.squeeze(_result)) * 2.0) * np.sqrt((2.0 * np.log(2.0)))),)
else:
result += (np.squeeze(_result),)
if hkle:
return result
else:
return dict(((key, value) for (key, value) in zip(self.get_keys(hkle), result))) | def width(self, bounds=None, background=None, fwhm=False, hkle=True):
'Returns the mean-squared width of a peak within the given bounds\n\n Parameters\n ----------\n bounds : bool, optional\n A boolean expression representing the bounds inside which the\n calculation will be performed\n\n background : float or dict, optional\n Default: None\n\n fwhm : bool, optional\n If True, returns width in fwhm, otherwise in mean-squared width.\n Default: False\n\n hkle : bool, optional\n If True, integrates only over h, k, l, e dimensions, otherwise\n integrates over all dimensions in :py:attr:`.Data.data`\n\n Returns\n -------\n result : tup\n The result is a tuple with the width in each dimension of Q,\n (h, k, l, e)\n\n '
result = ()
for key in self.get_keys(hkle):
_result = 0
for key_integrate in self.get_keys(hkle):
_result += (np.trapz((((self.data[key][self.get_bounds(bounds)] - self.position(bounds, background, hkle=False)[key]) ** 2) * (self.intensity[self.get_bounds(bounds)] - self.estimate_background(background))), self.data[key_integrate][self.get_bounds(bounds)]) / self.integrate(bounds, background))
if fwhm:
result += (((np.sqrt(np.squeeze(_result)) * 2.0) * np.sqrt((2.0 * np.log(2.0)))),)
else:
result += (np.squeeze(_result),)
if hkle:
return result
else:
return dict(((key, value) for (key, value) in zip(self.get_keys(hkle), result)))<|docstring|>Returns the mean-squared width of a peak within the given bounds
Parameters
----------
bounds : bool, optional
A boolean expression representing the bounds inside which the
calculation will be performed
background : float or dict, optional
Default: None
fwhm : bool, optional
If True, returns width in fwhm, otherwise in mean-squared width.
Default: False
hkle : bool, optional
If True, integrates only over h, k, l, e dimensions, otherwise
integrates over all dimensions in :py:attr:`.Data.data`
Returns
-------
result : tup
The result is a tuple with the width in each dimension of Q,
(h, k, l, e)<|endoftext|> |
a4a48eefb3031e1b32f906b3409bb8f402b107e3b7ac13800fd786b19822de4a | def scattering_function(self, material, ei):
'Returns the neutron scattering function, i.e. the detector counts\n scaled by :math:`4 \\pi / \\sigma_{\\mathrm{tot}} * k_i/k_f`.\n\n Parameters\n ----------\n material : object\n Definition of the material given by the :py:class:`.Material`\n class\n\n ei : float\n Incident energy in meV\n\n Returns\n -------\n counts : ndarray\n The detector counts scaled by the total scattering cross section\n and ki/kf\n '
ki = Energy(energy=ei).wavevector
kf = Energy(energy=(ei - self.e)).wavevector
return (((((4 * np.pi) / material.total_scattering_cross_section) * ki) / kf) * self.detector) | Returns the neutron scattering function, i.e. the detector counts
scaled by :math:`4 \pi / \sigma_{\mathrm{tot}} * k_i/k_f`.
Parameters
----------
material : object
Definition of the material given by the :py:class:`.Material`
class
ei : float
Incident energy in meV
Returns
-------
counts : ndarray
The detector counts scaled by the total scattering cross section
and ki/kf | neutronpy/data/analysis.py | scattering_function | neutronpy/neutronpy | 14 | python | def scattering_function(self, material, ei):
'Returns the neutron scattering function, i.e. the detector counts\n scaled by :math:`4 \\pi / \\sigma_{\\mathrm{tot}} * k_i/k_f`.\n\n Parameters\n ----------\n material : object\n Definition of the material given by the :py:class:`.Material`\n class\n\n ei : float\n Incident energy in meV\n\n Returns\n -------\n counts : ndarray\n The detector counts scaled by the total scattering cross section\n and ki/kf\n '
ki = Energy(energy=ei).wavevector
kf = Energy(energy=(ei - self.e)).wavevector
return (((((4 * np.pi) / material.total_scattering_cross_section) * ki) / kf) * self.detector) | def scattering_function(self, material, ei):
'Returns the neutron scattering function, i.e. the detector counts\n scaled by :math:`4 \\pi / \\sigma_{\\mathrm{tot}} * k_i/k_f`.\n\n Parameters\n ----------\n material : object\n Definition of the material given by the :py:class:`.Material`\n class\n\n ei : float\n Incident energy in meV\n\n Returns\n -------\n counts : ndarray\n The detector counts scaled by the total scattering cross section\n and ki/kf\n '
ki = Energy(energy=ei).wavevector
kf = Energy(energy=(ei - self.e)).wavevector
return (((((4 * np.pi) / material.total_scattering_cross_section) * ki) / kf) * self.detector)<|docstring|>Returns the neutron scattering function, i.e. the detector counts
scaled by :math:`4 \pi / \sigma_{\mathrm{tot}} * k_i/k_f`.
Parameters
----------
material : object
Definition of the material given by the :py:class:`.Material`
class
ei : float
Incident energy in meV
Returns
-------
counts : ndarray
The detector counts scaled by the total scattering cross section
and ki/kf<|endoftext|> |
a3bb695cc4cb06531f2dd17a9a188ef52c4d0fe301fa595ebb752d9c312446dc | def dynamic_susceptibility(self, material, ei):
'Returns the dynamic susceptibility\n :math:`\\chi^{\\prime\\prime}(\\mathbf{Q},\\hbar\\omega)`\n\n Parameters\n ----------\n material : object\n Definition of the material given by the :py:class:`.Material`\n class\n\n ei : float\n Incident energy in meV\n\n Returns\n -------\n counts : ndarray\n The detector counts turned into the scattering function multiplied\n by the detailed balance factor\n '
return (self.scattering_function(material, ei) * self.detailed_balance_factor) | Returns the dynamic susceptibility
:math:`\chi^{\prime\prime}(\mathbf{Q},\hbar\omega)`
Parameters
----------
material : object
Definition of the material given by the :py:class:`.Material`
class
ei : float
Incident energy in meV
Returns
-------
counts : ndarray
The detector counts turned into the scattering function multiplied
by the detailed balance factor | neutronpy/data/analysis.py | dynamic_susceptibility | neutronpy/neutronpy | 14 | python | def dynamic_susceptibility(self, material, ei):
'Returns the dynamic susceptibility\n :math:`\\chi^{\\prime\\prime}(\\mathbf{Q},\\hbar\\omega)`\n\n Parameters\n ----------\n material : object\n Definition of the material given by the :py:class:`.Material`\n class\n\n ei : float\n Incident energy in meV\n\n Returns\n -------\n counts : ndarray\n The detector counts turned into the scattering function multiplied\n by the detailed balance factor\n '
return (self.scattering_function(material, ei) * self.detailed_balance_factor) | def dynamic_susceptibility(self, material, ei):
'Returns the dynamic susceptibility\n :math:`\\chi^{\\prime\\prime}(\\mathbf{Q},\\hbar\\omega)`\n\n Parameters\n ----------\n material : object\n Definition of the material given by the :py:class:`.Material`\n class\n\n ei : float\n Incident energy in meV\n\n Returns\n -------\n counts : ndarray\n The detector counts turned into the scattering function multiplied\n by the detailed balance factor\n '
return (self.scattering_function(material, ei) * self.detailed_balance_factor)<|docstring|>Returns the dynamic susceptibility
:math:`\chi^{\prime\prime}(\mathbf{Q},\hbar\omega)`
Parameters
----------
material : object
Definition of the material given by the :py:class:`.Material`
class
ei : float
Incident energy in meV
Returns
-------
counts : ndarray
The detector counts turned into the scattering function multiplied
by the detailed balance factor<|endoftext|> |
7a63b39b7aeaa1c55894cf98f0b7bd260b1330257c5418588d7a7a9b1372d4a3 | def estimate_background(self, bg_params):
"Estimate the background according to ``type`` specified.\n\n Parameters\n ----------\n bg_params : dict\n Input dictionary has keys 'type' and 'value'. Types are\n * 'constant' : background is the constant given by 'value'\n * 'percent' : background is estimated by the bottom x%, where x\n is value\n * 'minimum' : background is estimated as the detector counts\n\n Returns\n -------\n background : float or ndarray\n Value determined to be the background. Will return ndarray only if\n `'type'` is `'constant'` and `'value'` is an ndarray\n "
if isinstance(bg_params, type(None)):
return 0
elif isinstance(bg_params, numbers.Number):
return bg_params
elif (bg_params['type'] == 'constant'):
return bg_params['value']
elif (bg_params['type'] == 'percent'):
inten = self.intensity[(self.intensity >= 0.0)]
Npts = int((inten.size * (bg_params['value'] / 100.0)))
min_vals = inten[np.argsort(inten)[:Npts]]
background = np.average(min_vals)
return background
elif (bg_params['type'] == 'minimum'):
return min(self.intensity)
else:
return 0 | Estimate the background according to ``type`` specified.
Parameters
----------
bg_params : dict
Input dictionary has keys 'type' and 'value'. Types are
* 'constant' : background is the constant given by 'value'
* 'percent' : background is estimated by the bottom x%, where x
is value
* 'minimum' : background is estimated as the detector counts
Returns
-------
background : float or ndarray
Value determined to be the background. Will return ndarray only if
`'type'` is `'constant'` and `'value'` is an ndarray | neutronpy/data/analysis.py | estimate_background | neutronpy/neutronpy | 14 | python | def estimate_background(self, bg_params):
"Estimate the background according to ``type`` specified.\n\n Parameters\n ----------\n bg_params : dict\n Input dictionary has keys 'type' and 'value'. Types are\n * 'constant' : background is the constant given by 'value'\n * 'percent' : background is estimated by the bottom x%, where x\n is value\n * 'minimum' : background is estimated as the detector counts\n\n Returns\n -------\n background : float or ndarray\n Value determined to be the background. Will return ndarray only if\n `'type'` is `'constant'` and `'value'` is an ndarray\n "
if isinstance(bg_params, type(None)):
return 0
elif isinstance(bg_params, numbers.Number):
return bg_params
elif (bg_params['type'] == 'constant'):
return bg_params['value']
elif (bg_params['type'] == 'percent'):
inten = self.intensity[(self.intensity >= 0.0)]
Npts = int((inten.size * (bg_params['value'] / 100.0)))
min_vals = inten[np.argsort(inten)[:Npts]]
background = np.average(min_vals)
return background
elif (bg_params['type'] == 'minimum'):
return min(self.intensity)
else:
return 0 | def estimate_background(self, bg_params):
"Estimate the background according to ``type`` specified.\n\n Parameters\n ----------\n bg_params : dict\n Input dictionary has keys 'type' and 'value'. Types are\n * 'constant' : background is the constant given by 'value'\n * 'percent' : background is estimated by the bottom x%, where x\n is value\n * 'minimum' : background is estimated as the detector counts\n\n Returns\n -------\n background : float or ndarray\n Value determined to be the background. Will return ndarray only if\n `'type'` is `'constant'` and `'value'` is an ndarray\n "
if isinstance(bg_params, type(None)):
return 0
elif isinstance(bg_params, numbers.Number):
return bg_params
elif (bg_params['type'] == 'constant'):
return bg_params['value']
elif (bg_params['type'] == 'percent'):
inten = self.intensity[(self.intensity >= 0.0)]
Npts = int((inten.size * (bg_params['value'] / 100.0)))
min_vals = inten[np.argsort(inten)[:Npts]]
background = np.average(min_vals)
return background
elif (bg_params['type'] == 'minimum'):
return min(self.intensity)
else:
return 0<|docstring|>Estimate the background according to ``type`` specified.
Parameters
----------
bg_params : dict
Input dictionary has keys 'type' and 'value'. Types are
* 'constant' : background is the constant given by 'value'
* 'percent' : background is estimated by the bottom x%, where x
is value
* 'minimum' : background is estimated as the detector counts
Returns
-------
background : float or ndarray
Value determined to be the background. Will return ndarray only if
`'type'` is `'constant'` and `'value'` is an ndarray<|endoftext|> |
530dfc779b72717ad9d7329cd9f91c5a2cc24c5ab611fa01ce177d8d49b17f39 | def get_bounds(self, bounds):
'Generates a to_fit tuple if bounds is present in kwargs\n\n Parameters\n ----------\n bounds : dict\n\n Returns\n -------\n to_fit : tuple\n Tuple of indices\n\n '
if (bounds is not None):
return np.where(bounds)
else:
return np.where(self.Q[(:, 0)]) | Generates a to_fit tuple if bounds is present in kwargs
Parameters
----------
bounds : dict
Returns
-------
to_fit : tuple
Tuple of indices | neutronpy/data/analysis.py | get_bounds | neutronpy/neutronpy | 14 | python | def get_bounds(self, bounds):
'Generates a to_fit tuple if bounds is present in kwargs\n\n Parameters\n ----------\n bounds : dict\n\n Returns\n -------\n to_fit : tuple\n Tuple of indices\n\n '
if (bounds is not None):
return np.where(bounds)
else:
return np.where(self.Q[(:, 0)]) | def get_bounds(self, bounds):
'Generates a to_fit tuple if bounds is present in kwargs\n\n Parameters\n ----------\n bounds : dict\n\n Returns\n -------\n to_fit : tuple\n Tuple of indices\n\n '
if (bounds is not None):
return np.where(bounds)
else:
return np.where(self.Q[(:, 0)])<|docstring|>Generates a to_fit tuple if bounds is present in kwargs
Parameters
----------
bounds : dict
Returns
-------
to_fit : tuple
Tuple of indices<|endoftext|> |
80086f05e86ac7c546f2700f708132dfd95125d6e22c387f7c78764f9a56f3c5 | def get_keys(self, hkle):
'Returns all of the Dictionary key names\n\n Parameters\n ----------\n hkle : bool\n If True only returns keys for h,k,l,e, otherwise returns all keys\n\n Returns\n -------\n keys : list\n :py:attr:`.Data.data` dictionary keys\n\n '
if hkle:
return [key for key in self.data if (key in self.Q_keys.values())]
else:
return [key for key in self.data if (key not in self.data_keys.values())] | Returns all of the Dictionary key names
Parameters
----------
hkle : bool
If True only returns keys for h,k,l,e, otherwise returns all keys
Returns
-------
keys : list
:py:attr:`.Data.data` dictionary keys | neutronpy/data/analysis.py | get_keys | neutronpy/neutronpy | 14 | python | def get_keys(self, hkle):
'Returns all of the Dictionary key names\n\n Parameters\n ----------\n hkle : bool\n If True only returns keys for h,k,l,e, otherwise returns all keys\n\n Returns\n -------\n keys : list\n :py:attr:`.Data.data` dictionary keys\n\n '
if hkle:
return [key for key in self.data if (key in self.Q_keys.values())]
else:
return [key for key in self.data if (key not in self.data_keys.values())] | def get_keys(self, hkle):
'Returns all of the Dictionary key names\n\n Parameters\n ----------\n hkle : bool\n If True only returns keys for h,k,l,e, otherwise returns all keys\n\n Returns\n -------\n keys : list\n :py:attr:`.Data.data` dictionary keys\n\n '
if hkle:
return [key for key in self.data if (key in self.Q_keys.values())]
else:
return [key for key in self.data if (key not in self.data_keys.values())]<|docstring|>Returns all of the Dictionary key names
Parameters
----------
hkle : bool
If True only returns keys for h,k,l,e, otherwise returns all keys
Returns
-------
keys : list
:py:attr:`.Data.data` dictionary keys<|endoftext|> |
1c91d615351ece963da9b9e9af70c2f5458b52e22898cb7087f0a70f0d5ecc7b | def get_service(self):
'Get the service name from the config file'
prop = '{env}.service'.format(env=self.env)
env_service_declaration = get(self.deploy_json, prop, False)
return env_service_declaration | Get the service name from the config file | airmail/services/deploy_file.py | get_service | nymag/leviosa | 0 | python | def get_service(self):
prop = '{env}.service'.format(env=self.env)
env_service_declaration = get(self.deploy_json, prop, False)
return env_service_declaration | def get_service(self):
prop = '{env}.service'.format(env=self.env)
env_service_declaration = get(self.deploy_json, prop, False)
return env_service_declaration<|docstring|>Get the service name from the config file<|endoftext|> |
e6e2120e5d43f756f83bd9eb6ef0e0c61ecdc2e53544b49808406583d437524f | def get_with_prefix(self, prop, delim='-'):
'Retrieve a value with <ORG>-<ENV>- prefix. Can pass in custom delimiter '
if (prop not in self.deploy_json):
return None
top_level = self.get_top_level_prop(prop)
value = (top_level if (top_level != None) else self.get_prop(prop))
return ((((self.get_org() + delim) + self.env) + delim) + value) | Retrieve a value with <ORG>-<ENV>- prefix. Can pass in custom delimiter | airmail/services/deploy_file.py | get_with_prefix | nymag/leviosa | 0 | python | def get_with_prefix(self, prop, delim='-'):
' '
if (prop not in self.deploy_json):
return None
top_level = self.get_top_level_prop(prop)
value = (top_level if (top_level != None) else self.get_prop(prop))
return ((((self.get_org() + delim) + self.env) + delim) + value) | def get_with_prefix(self, prop, delim='-'):
' '
if (prop not in self.deploy_json):
return None
top_level = self.get_top_level_prop(prop)
value = (top_level if (top_level != None) else self.get_prop(prop))
return ((((self.get_org() + delim) + self.env) + delim) + value)<|docstring|>Retrieve a value with <ORG>-<ENV>- prefix. Can pass in custom delimiter<|endoftext|> |
abfea78b8fb06f055fd23695117cc073b64ab2ddaf8af01e5036fb5e3ea4596a | def inject_cluster_and_family(self):
'Assign the cluster and family for the service/task using the `name` and `cluster` fields if set'
cluster_val = self.get_with_prefix('cluster')
family_val = self.get_with_prefix('name')
if (cluster_val is None):
cluster_val = self.get_with_prefix('name')
set_(self.deploy_json, '.cluster', cluster_val)
set_(self.deploy_json, '.family', family_val) | Assign the cluster and family for the service/task using the `name` and `cluster` fields if set | airmail/services/deploy_file.py | inject_cluster_and_family | nymag/leviosa | 0 | python | def inject_cluster_and_family(self):
cluster_val = self.get_with_prefix('cluster')
family_val = self.get_with_prefix('name')
if (cluster_val is None):
cluster_val = self.get_with_prefix('name')
set_(self.deploy_json, '.cluster', cluster_val)
set_(self.deploy_json, '.family', family_val) | def inject_cluster_and_family(self):
cluster_val = self.get_with_prefix('cluster')
family_val = self.get_with_prefix('name')
if (cluster_val is None):
cluster_val = self.get_with_prefix('name')
set_(self.deploy_json, '.cluster', cluster_val)
set_(self.deploy_json, '.family', family_val)<|docstring|>Assign the cluster and family for the service/task using the `name` and `cluster` fields if set<|endoftext|> |
44c859bd58ddfbff6dca51039fbf932730feff1e582ff026a247891e1842dae6 | def get_top_level_prop(self, prop, default=None):
"Retrieve a property's value from the top level of the config"
return get(self.deploy_json, prop, default) | Retrieve a property's value from the top level of the config | airmail/services/deploy_file.py | get_top_level_prop | nymag/leviosa | 0 | python | def get_top_level_prop(self, prop, default=None):
return get(self.deploy_json, prop, default) | def get_top_level_prop(self, prop, default=None):
return get(self.deploy_json, prop, default)<|docstring|>Retrieve a property's value from the top level of the config<|endoftext|> |
245f2972e199b629268973729f0378ec8d9fe6416f5c8c90da6986893eb7973e | def get_prop(self, prop, default=None):
"Get a property's value that is nested in the env object"
prop = ((self.env + '.') + prop)
return get(self.deploy_json, prop, default) | Get a property's value that is nested in the env object | airmail/services/deploy_file.py | get_prop | nymag/leviosa | 0 | python | def get_prop(self, prop, default=None):
prop = ((self.env + '.') + prop)
return get(self.deploy_json, prop, default) | def get_prop(self, prop, default=None):
prop = ((self.env + '.') + prop)
return get(self.deploy_json, prop, default)<|docstring|>Get a property's value that is nested in the env object<|endoftext|> |
d6137f47c3b0f16821f3791ed1cb3b30ddf8984fec8546b1ea49c67f406da05a | def __init__(self, notes=None):
'PageOfNotesAllOf - a model defined in OpenAPI\n\n :param notes: The notes of this PageOfNotesAllOf. # noqa: E501\n :type notes: List[Note]\n '
self.openapi_types = {'notes': List[Note]}
self.attribute_map = {'notes': 'notes'}
self._notes = notes | PageOfNotesAllOf - a model defined in OpenAPI
:param notes: The notes of this PageOfNotesAllOf. # noqa: E501
:type notes: List[Note] | server/openapi_server/models/page_of_notes_all_of.py | __init__ | data2health/2014-i2b2-deid-db | 3 | python | def __init__(self, notes=None):
'PageOfNotesAllOf - a model defined in OpenAPI\n\n :param notes: The notes of this PageOfNotesAllOf. # noqa: E501\n :type notes: List[Note]\n '
self.openapi_types = {'notes': List[Note]}
self.attribute_map = {'notes': 'notes'}
self._notes = notes | def __init__(self, notes=None):
'PageOfNotesAllOf - a model defined in OpenAPI\n\n :param notes: The notes of this PageOfNotesAllOf. # noqa: E501\n :type notes: List[Note]\n '
self.openapi_types = {'notes': List[Note]}
self.attribute_map = {'notes': 'notes'}
self._notes = notes<|docstring|>PageOfNotesAllOf - a model defined in OpenAPI
:param notes: The notes of this PageOfNotesAllOf. # noqa: E501
:type notes: List[Note]<|endoftext|> |
bc9bcc1140a63419fee05af19335a7d50ece3da57e1ebbe9a998d5d92fee376f | @classmethod
def from_dict(cls, dikt) -> 'PageOfNotesAllOf':
'Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The PageOfNotes_allOf of this PageOfNotesAllOf. # noqa: E501\n :rtype: PageOfNotesAllOf\n '
return util.deserialize_model(dikt, cls) | Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The PageOfNotes_allOf of this PageOfNotesAllOf. # noqa: E501
:rtype: PageOfNotesAllOf | server/openapi_server/models/page_of_notes_all_of.py | from_dict | data2health/2014-i2b2-deid-db | 3 | python | @classmethod
def from_dict(cls, dikt) -> 'PageOfNotesAllOf':
'Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The PageOfNotes_allOf of this PageOfNotesAllOf. # noqa: E501\n :rtype: PageOfNotesAllOf\n '
return util.deserialize_model(dikt, cls) | @classmethod
def from_dict(cls, dikt) -> 'PageOfNotesAllOf':
'Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The PageOfNotes_allOf of this PageOfNotesAllOf. # noqa: E501\n :rtype: PageOfNotesAllOf\n '
return util.deserialize_model(dikt, cls)<|docstring|>Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The PageOfNotes_allOf of this PageOfNotesAllOf. # noqa: E501
:rtype: PageOfNotesAllOf<|endoftext|> |
bbeeb5283724f5840f7222f96ef2585311053b2fdd9b5fd846d9a3be845ece15 | @property
def notes(self):
'Gets the notes of this PageOfNotesAllOf.\n\n An array of notes # noqa: E501\n\n :return: The notes of this PageOfNotesAllOf.\n :rtype: List[Note]\n '
return self._notes | Gets the notes of this PageOfNotesAllOf.
An array of notes # noqa: E501
:return: The notes of this PageOfNotesAllOf.
:rtype: List[Note] | server/openapi_server/models/page_of_notes_all_of.py | notes | data2health/2014-i2b2-deid-db | 3 | python | @property
def notes(self):
'Gets the notes of this PageOfNotesAllOf.\n\n An array of notes # noqa: E501\n\n :return: The notes of this PageOfNotesAllOf.\n :rtype: List[Note]\n '
return self._notes | @property
def notes(self):
'Gets the notes of this PageOfNotesAllOf.\n\n An array of notes # noqa: E501\n\n :return: The notes of this PageOfNotesAllOf.\n :rtype: List[Note]\n '
return self._notes<|docstring|>Gets the notes of this PageOfNotesAllOf.
An array of notes # noqa: E501
:return: The notes of this PageOfNotesAllOf.
:rtype: List[Note]<|endoftext|> |
047e0aad3ff271a325db2632c0d0dd1462f67ba713f57cd7e2dcb5b049956813 | @notes.setter
def notes(self, notes):
'Sets the notes of this PageOfNotesAllOf.\n\n An array of notes # noqa: E501\n\n :param notes: The notes of this PageOfNotesAllOf.\n :type notes: List[Note]\n '
self._notes = notes | Sets the notes of this PageOfNotesAllOf.
An array of notes # noqa: E501
:param notes: The notes of this PageOfNotesAllOf.
:type notes: List[Note] | server/openapi_server/models/page_of_notes_all_of.py | notes | data2health/2014-i2b2-deid-db | 3 | python | @notes.setter
def notes(self, notes):
'Sets the notes of this PageOfNotesAllOf.\n\n An array of notes # noqa: E501\n\n :param notes: The notes of this PageOfNotesAllOf.\n :type notes: List[Note]\n '
self._notes = notes | @notes.setter
def notes(self, notes):
'Sets the notes of this PageOfNotesAllOf.\n\n An array of notes # noqa: E501\n\n :param notes: The notes of this PageOfNotesAllOf.\n :type notes: List[Note]\n '
self._notes = notes<|docstring|>Sets the notes of this PageOfNotesAllOf.
An array of notes # noqa: E501
:param notes: The notes of this PageOfNotesAllOf.
:type notes: List[Note]<|endoftext|> |
9a9d54f664335d4d4cbadbdb3461ba2a21711c7415b234186700c41d0c12f7c6 | def __init__(self, label_folder, image_folder_override=None):
' TODO: docstring'
if (not Path(label_folder).is_dir()):
raise ValueError('Label folder {} not a valid directory'.format(label_folder))
if (image_folder_override is not None):
if (not Path(image_folder_override).is_dir()):
raise ValueError('Image folder {} not a valid directory'.format(image_folder_override))
self._image_folder_override = image_folder_override
self._label_folder = label_folder
self._dataframe = pd.DataFrame()
self._class_ids = dict() | TODO: docstring | modules/pascalvoc.py | __init__ | dkloving/object-annotation-convert | 0 | python | def __init__(self, label_folder, image_folder_override=None):
' '
if (not Path(label_folder).is_dir()):
raise ValueError('Label folder {} not a valid directory'.format(label_folder))
if (image_folder_override is not None):
if (not Path(image_folder_override).is_dir()):
raise ValueError('Image folder {} not a valid directory'.format(image_folder_override))
self._image_folder_override = image_folder_override
self._label_folder = label_folder
self._dataframe = pd.DataFrame()
self._class_ids = dict() | def __init__(self, label_folder, image_folder_override=None):
' '
if (not Path(label_folder).is_dir()):
raise ValueError('Label folder {} not a valid directory'.format(label_folder))
if (image_folder_override is not None):
if (not Path(image_folder_override).is_dir()):
raise ValueError('Image folder {} not a valid directory'.format(image_folder_override))
self._image_folder_override = image_folder_override
self._label_folder = label_folder
self._dataframe = pd.DataFrame()
self._class_ids = dict()<|docstring|>TODO: docstring<|endoftext|> |
ab402e4172f681c82b7b6acb77c423bccf9d7f7d17523abaf749cb2ce1f69812 | def fit(self, deep_validate_images=False):
'TODO: docstring'
if deep_validate_images:
warn('Deep validation of images can be very slow on large datasets.')
label_files = Path(self._label_folder).glob('*.xml')
objects_df = pd.DataFrame()
for label_file in label_files:
file = label_file.read_text()
xml = etree.fromstring(file)
new_df = self.__xml_to_dataframe(xml, deep_validate_images)
objects_df = pd.concat([objects_df, new_df])
self._dataframe = objects_df
return self | TODO: docstring | modules/pascalvoc.py | fit | dkloving/object-annotation-convert | 0 | python | def fit(self, deep_validate_images=False):
if deep_validate_images:
warn('Deep validation of images can be very slow on large datasets.')
label_files = Path(self._label_folder).glob('*.xml')
objects_df = pd.DataFrame()
for label_file in label_files:
file = label_file.read_text()
xml = etree.fromstring(file)
new_df = self.__xml_to_dataframe(xml, deep_validate_images)
objects_df = pd.concat([objects_df, new_df])
self._dataframe = objects_df
return self | def fit(self, deep_validate_images=False):
if deep_validate_images:
warn('Deep validation of images can be very slow on large datasets.')
label_files = Path(self._label_folder).glob('*.xml')
objects_df = pd.DataFrame()
for label_file in label_files:
file = label_file.read_text()
xml = etree.fromstring(file)
new_df = self.__xml_to_dataframe(xml, deep_validate_images)
objects_df = pd.concat([objects_df, new_df])
self._dataframe = objects_df
return self<|docstring|>TODO: docstring<|endoftext|> |
1090af748d450cfa4c999d04c0c237b23d2da60814bc7fafc0b4206b6c0e8d0b | def __xml_to_dataframe(self, xml, deep_validate_images):
' TODO: docstring'
if (self._image_folder_override is None):
image_id = str(Path(xml.find('path').text))
else:
image_id = str(Path(self._image_folder_override).joinpath(Path(xml.find('filename').text)))
image_width = xml.find('size').find('width').text
image_height = xml.find('size').find('height').text
image_depth = xml.find('size').find('depth').text
image_valid = validate_image(image_id, deep_validate_images, image_width, image_height, image_depth)
objects = []
for item in xml:
if (item.tag == 'object'):
class_name = item.find('name').text
if (class_name not in self._class_ids):
self._class_ids[class_name] = len(self._class_ids)
class_id = self._class_ids[class_name]
objects.append({'class_name': class_name, 'class_id': class_id, 'x_min': item.find('bndbox').find('xmin').text, 'x_max': item.find('bndbox').find('xmax').text, 'y_min': item.find('bndbox').find('ymin').text, 'y_max': item.find('bndbox').find('ymax').text})
objects = pd.DataFrame(objects)
for item in ['image_id', 'image_width', 'image_height', 'image_depth', 'image_valid']:
objects[item] = eval(item)
return objects | TODO: docstring | modules/pascalvoc.py | __xml_to_dataframe | dkloving/object-annotation-convert | 0 | python | def __xml_to_dataframe(self, xml, deep_validate_images):
' '
if (self._image_folder_override is None):
image_id = str(Path(xml.find('path').text))
else:
image_id = str(Path(self._image_folder_override).joinpath(Path(xml.find('filename').text)))
image_width = xml.find('size').find('width').text
image_height = xml.find('size').find('height').text
image_depth = xml.find('size').find('depth').text
image_valid = validate_image(image_id, deep_validate_images, image_width, image_height, image_depth)
objects = []
for item in xml:
if (item.tag == 'object'):
class_name = item.find('name').text
if (class_name not in self._class_ids):
self._class_ids[class_name] = len(self._class_ids)
class_id = self._class_ids[class_name]
objects.append({'class_name': class_name, 'class_id': class_id, 'x_min': item.find('bndbox').find('xmin').text, 'x_max': item.find('bndbox').find('xmax').text, 'y_min': item.find('bndbox').find('ymin').text, 'y_max': item.find('bndbox').find('ymax').text})
objects = pd.DataFrame(objects)
for item in ['image_id', 'image_width', 'image_height', 'image_depth', 'image_valid']:
objects[item] = eval(item)
return objects | def __xml_to_dataframe(self, xml, deep_validate_images):
' '
if (self._image_folder_override is None):
image_id = str(Path(xml.find('path').text))
else:
image_id = str(Path(self._image_folder_override).joinpath(Path(xml.find('filename').text)))
image_width = xml.find('size').find('width').text
image_height = xml.find('size').find('height').text
image_depth = xml.find('size').find('depth').text
image_valid = validate_image(image_id, deep_validate_images, image_width, image_height, image_depth)
objects = []
for item in xml:
if (item.tag == 'object'):
class_name = item.find('name').text
if (class_name not in self._class_ids):
self._class_ids[class_name] = len(self._class_ids)
class_id = self._class_ids[class_name]
objects.append({'class_name': class_name, 'class_id': class_id, 'x_min': item.find('bndbox').find('xmin').text, 'x_max': item.find('bndbox').find('xmax').text, 'y_min': item.find('bndbox').find('ymin').text, 'y_max': item.find('bndbox').find('ymax').text})
objects = pd.DataFrame(objects)
for item in ['image_id', 'image_width', 'image_height', 'image_depth', 'image_valid']:
objects[item] = eval(item)
return objects<|docstring|>TODO: docstring<|endoftext|> |
a8717cd1eb4899a0b06ec6d7591467e8aeb8a8ca6ffbeb9144e8619a3923291d | def _Install(vm):
'Installs the node.js package on the VM.'
vm.Install('build_tools')
vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, NODE_DIR))
vm.RemoteCommand('cd {0} && git checkout {1}'.format(NODE_DIR, GIT_TAG))
vm.RemoteCommand('cd {0} && ./configure --prefix=/usr'.format(NODE_DIR))
vm.RemoteCommand('cd {0} && make && sudo make install'.format(NODE_DIR)) | Installs the node.js package on the VM. | perfkitbenchmarker/linux_packages/node_js.py | _Install | kwinstonix/PerfKitBenchmarker | 3 | python | def _Install(vm):
vm.Install('build_tools')
vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, NODE_DIR))
vm.RemoteCommand('cd {0} && git checkout {1}'.format(NODE_DIR, GIT_TAG))
vm.RemoteCommand('cd {0} && ./configure --prefix=/usr'.format(NODE_DIR))
vm.RemoteCommand('cd {0} && make && sudo make install'.format(NODE_DIR)) | def _Install(vm):
vm.Install('build_tools')
vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, NODE_DIR))
vm.RemoteCommand('cd {0} && git checkout {1}'.format(NODE_DIR, GIT_TAG))
vm.RemoteCommand('cd {0} && ./configure --prefix=/usr'.format(NODE_DIR))
vm.RemoteCommand('cd {0} && make && sudo make install'.format(NODE_DIR))<|docstring|>Installs the node.js package on the VM.<|endoftext|> |
067a6e082b366fb986dd6892fb22dac253ee71a16d2fa533acd10607632018ac | def YumInstall(vm):
'Installs the node.js package on the VM.'
_Install(vm) | Installs the node.js package on the VM. | perfkitbenchmarker/linux_packages/node_js.py | YumInstall | kwinstonix/PerfKitBenchmarker | 3 | python | def YumInstall(vm):
_Install(vm) | def YumInstall(vm):
_Install(vm)<|docstring|>Installs the node.js package on the VM.<|endoftext|> |
aa4a5e14f25b15cebef4ec04a19977caec8bb207be4ceee4c1dd75d39bb525b3 | def AptInstall(vm):
'Installs the node.js package on the VM.'
_Install(vm) | Installs the node.js package on the VM. | perfkitbenchmarker/linux_packages/node_js.py | AptInstall | kwinstonix/PerfKitBenchmarker | 3 | python | def AptInstall(vm):
_Install(vm) | def AptInstall(vm):
_Install(vm)<|docstring|>Installs the node.js package on the VM.<|endoftext|> |
ca0c3d94f42cd84cda0601ce6c30e576236135b62824666a023c98fa2653236f | def _Uninstall(vm):
'Uninstalls the node.js package on the VM.'
vm.RemoteCommand('cd {0} && sudo make uninstall'.format(NODE_DIR)) | Uninstalls the node.js package on the VM. | perfkitbenchmarker/linux_packages/node_js.py | _Uninstall | kwinstonix/PerfKitBenchmarker | 3 | python | def _Uninstall(vm):
vm.RemoteCommand('cd {0} && sudo make uninstall'.format(NODE_DIR)) | def _Uninstall(vm):
vm.RemoteCommand('cd {0} && sudo make uninstall'.format(NODE_DIR))<|docstring|>Uninstalls the node.js package on the VM.<|endoftext|> |
fcd0e8a78d9e42474a729dfe29bc0fd60ce4a4771b44a14b45bbb9d5e966bebb | def YumUninstall(vm):
'Uninstalls the node.js package on the VM.'
_Uninstall(vm) | Uninstalls the node.js package on the VM. | perfkitbenchmarker/linux_packages/node_js.py | YumUninstall | kwinstonix/PerfKitBenchmarker | 3 | python | def YumUninstall(vm):
_Uninstall(vm) | def YumUninstall(vm):
_Uninstall(vm)<|docstring|>Uninstalls the node.js package on the VM.<|endoftext|> |
4892b14e10f2a28798ed248985a6ca39224e963b305d8f5d70c21804bbdc8194 | def AptUninstall(vm):
'Uninstalls the node.js package on the VM.'
_Uninstall(vm) | Uninstalls the node.js package on the VM. | perfkitbenchmarker/linux_packages/node_js.py | AptUninstall | kwinstonix/PerfKitBenchmarker | 3 | python | def AptUninstall(vm):
_Uninstall(vm) | def AptUninstall(vm):
_Uninstall(vm)<|docstring|>Uninstalls the node.js package on the VM.<|endoftext|> |
6fc20aafa811e854bca2795cee57b67e76ca0bf3fd8276797fb5884fa1d8b9dc | def __init__(self, xknx, group_address=None, device_name=None, after_update_cb=None):
'Initialize remote value of KNX DPT 17.001 (DPT_Scene_Number).'
super().__init__(xknx, group_address, None, device_name=device_name, after_update_cb=after_update_cb) | Initialize remote value of KNX DPT 17.001 (DPT_Scene_Number). | xknx/remote_value/remote_value_scene_number.py | __init__ | FredericMa/xknx | 1 | python | def __init__(self, xknx, group_address=None, device_name=None, after_update_cb=None):
super().__init__(xknx, group_address, None, device_name=device_name, after_update_cb=after_update_cb) | def __init__(self, xknx, group_address=None, device_name=None, after_update_cb=None):
super().__init__(xknx, group_address, None, device_name=device_name, after_update_cb=after_update_cb)<|docstring|>Initialize remote value of KNX DPT 17.001 (DPT_Scene_Number).<|endoftext|> |
7366615a5a49e1b54d4fe13f7b6e6ce5736e88ee5b93a8c80313678373b5faf9 | def payload_valid(self, payload):
'Test if telegram payload may be parsed.'
return (isinstance(payload, DPTArray) and (len(payload.value) == 1)) | Test if telegram payload may be parsed. | xknx/remote_value/remote_value_scene_number.py | payload_valid | FredericMa/xknx | 1 | python | def payload_valid(self, payload):
return (isinstance(payload, DPTArray) and (len(payload.value) == 1)) | def payload_valid(self, payload):
return (isinstance(payload, DPTArray) and (len(payload.value) == 1))<|docstring|>Test if telegram payload may be parsed.<|endoftext|> |
1847407c9c2dfc1d4d0c3b1553c3e25691cb98f3a59bb1907f093b1f3ff32a47 | def to_knx(self, value):
'Convert value to payload.'
return DPTArray(DPTSceneNumber.to_knx(value)) | Convert value to payload. | xknx/remote_value/remote_value_scene_number.py | to_knx | FredericMa/xknx | 1 | python | def to_knx(self, value):
return DPTArray(DPTSceneNumber.to_knx(value)) | def to_knx(self, value):
return DPTArray(DPTSceneNumber.to_knx(value))<|docstring|>Convert value to payload.<|endoftext|> |
462f9b44d0c20d2c8d3904b357d94451be5abd6977a968288cf93837c7d6a466 | def from_knx(self, payload):
'Convert current payload to value.'
return DPTSceneNumber.from_knx(payload.value) | Convert current payload to value. | xknx/remote_value/remote_value_scene_number.py | from_knx | FredericMa/xknx | 1 | python | def from_knx(self, payload):
return DPTSceneNumber.from_knx(payload.value) | def from_knx(self, payload):
return DPTSceneNumber.from_knx(payload.value)<|docstring|>Convert current payload to value.<|endoftext|> |
c5f7b873f7c02867db2dae34cf1ebe8bc5a39f0f64e69cacab9034bba46c5e25 | @command('show', short_help='Display a Timer job')
@click.argument('JOB_ID')
@LoginManager.requires_login(LoginManager.TIMER_RS)
def show_command(login_manager: LoginManager, job_id: str):
'\n Display information about a particular job.\n '
timer_client = login_manager.get_timer_client()
response = timer_client.get_job(job_id)
formatted_print(response, text_format=FORMAT_TEXT_RECORD, fields=JOB_FORMAT_FIELDS) | Display information about a particular job. | src/globus_cli/commands/timer/show.py | show_command | globusonline/globus-cli | 0 | python | @command('show', short_help='Display a Timer job')
@click.argument('JOB_ID')
@LoginManager.requires_login(LoginManager.TIMER_RS)
def show_command(login_manager: LoginManager, job_id: str):
'\n \n '
timer_client = login_manager.get_timer_client()
response = timer_client.get_job(job_id)
formatted_print(response, text_format=FORMAT_TEXT_RECORD, fields=JOB_FORMAT_FIELDS) | @command('show', short_help='Display a Timer job')
@click.argument('JOB_ID')
@LoginManager.requires_login(LoginManager.TIMER_RS)
def show_command(login_manager: LoginManager, job_id: str):
'\n \n '
timer_client = login_manager.get_timer_client()
response = timer_client.get_job(job_id)
formatted_print(response, text_format=FORMAT_TEXT_RECORD, fields=JOB_FORMAT_FIELDS)<|docstring|>Display information about a particular job.<|endoftext|> |
1c032bd95c739a72eab42a58fad01c83a9308f8d7bbe290a4964c6b610d0fac5 | def make_mock_confirmation_notification(self, successes, **contact_info):
'contact_info and successes\n '
notification = Mock()
notification.contact_info = contact_info
notification.successes = successes
return notification | contact_info and successes | intake/tests/services/test_submissions.py | make_mock_confirmation_notification | dane-king/intake | 51 | python | def make_mock_confirmation_notification(self, successes, **contact_info):
'\n '
notification = Mock()
notification.contact_info = contact_info
notification.successes = successes
return notification | def make_mock_confirmation_notification(self, successes, **contact_info):
'\n '
notification = Mock()
notification.contact_info = contact_info
notification.successes = successes
return notification<|docstring|>contact_info and successes<|endoftext|> |
b6f848031cbf40d138413632b03d115f199fe4397873d35dcc1b3bb3e6dc2060 | async def fetch_self(self) -> entity.ObjectiveEntity:
'Perform an HTTP request fetching this objective entity definition.\n\n Returns\n -------\n `aiobungie.crate.ObjectiveEntity`\n An objective entity definition.\n '
return (await self.net.request.fetch_objective_entity(self.hash)) | Perform an HTTP request fetching this objective entity definition.
Returns
-------
`aiobungie.crate.ObjectiveEntity`
An objective entity definition. | aiobungie/crate/records.py | fetch_self | nxtlo/aiobungie | 36 | python | async def fetch_self(self) -> entity.ObjectiveEntity:
'Perform an HTTP request fetching this objective entity definition.\n\n Returns\n -------\n `aiobungie.crate.ObjectiveEntity`\n An objective entity definition.\n '
return (await self.net.request.fetch_objective_entity(self.hash)) | async def fetch_self(self) -> entity.ObjectiveEntity:
'Perform an HTTP request fetching this objective entity definition.\n\n Returns\n -------\n `aiobungie.crate.ObjectiveEntity`\n An objective entity definition.\n '
return (await self.net.request.fetch_objective_entity(self.hash))<|docstring|>Perform an HTTP request fetching this objective entity definition.
Returns
-------
`aiobungie.crate.ObjectiveEntity`
An objective entity definition.<|endoftext|> |
e77fe5fffebe0228266df778062ef7dbd81c0ba4e83154aede0370821194bd40 | def messages_from_raw(r):
'Extract data messages from raw recorded SignalR data.\n\n This function can be used to extract message data from raw SignalR data\n which was saved using :class:`SignalRClient` in debug mode.\n\n Args:\n r (iterable) : Iterable containing raw SignalR responses.\n '
ret = list()
errorcount = 0
for data in r:
data = data.replace("'", '"').replace('True', 'true').replace('False', 'false')
try:
data = json.loads(data)
except json.JSONDecodeError:
errorcount += 1
continue
messages = (data['M'] if (('M' in data) and (len(data['M']) > 0)) else {})
for inner_data in messages:
hub = (inner_data['H'] if ('H' in inner_data) else '')
if (hub.lower() == 'streaming'):
message = inner_data['A']
ret.append(message)
return (ret, errorcount) | Extract data messages from raw recorded SignalR data.
This function can be used to extract message data from raw SignalR data
which was saved using :class:`SignalRClient` in debug mode.
Args:
r (iterable) : Iterable containing raw SignalR responses. | fastf1/livetiming/client.py | messages_from_raw | althype/Fast-F1 | 690 | python | def messages_from_raw(r):
'Extract data messages from raw recorded SignalR data.\n\n This function can be used to extract message data from raw SignalR data\n which was saved using :class:`SignalRClient` in debug mode.\n\n Args:\n r (iterable) : Iterable containing raw SignalR responses.\n '
ret = list()
errorcount = 0
for data in r:
data = data.replace("'", '"').replace('True', 'true').replace('False', 'false')
try:
data = json.loads(data)
except json.JSONDecodeError:
errorcount += 1
continue
messages = (data['M'] if (('M' in data) and (len(data['M']) > 0)) else {})
for inner_data in messages:
hub = (inner_data['H'] if ('H' in inner_data) else )
if (hub.lower() == 'streaming'):
message = inner_data['A']
ret.append(message)
return (ret, errorcount) | def messages_from_raw(r):
'Extract data messages from raw recorded SignalR data.\n\n This function can be used to extract message data from raw SignalR data\n which was saved using :class:`SignalRClient` in debug mode.\n\n Args:\n r (iterable) : Iterable containing raw SignalR responses.\n '
ret = list()
errorcount = 0
for data in r:
data = data.replace("'", '"').replace('True', 'true').replace('False', 'false')
try:
data = json.loads(data)
except json.JSONDecodeError:
errorcount += 1
continue
messages = (data['M'] if (('M' in data) and (len(data['M']) > 0)) else {})
for inner_data in messages:
hub = (inner_data['H'] if ('H' in inner_data) else )
if (hub.lower() == 'streaming'):
message = inner_data['A']
ret.append(message)
return (ret, errorcount)<|docstring|>Extract data messages from raw recorded SignalR data.
This function can be used to extract message data from raw SignalR data
which was saved using :class:`SignalRClient` in debug mode.
Args:
r (iterable) : Iterable containing raw SignalR responses.<|endoftext|> |
1926e9a433013c47f3937e5a9a4033c3d62d552437c8541b439694fddeb90776 | def start(self):
'Connect to the data stream and start writing the data to a file.'
try:
asyncio.run(self._async_start())
except KeyboardInterrupt:
self.logger.warning('Keyboard interrupt - exiting...')
return | Connect to the data stream and start writing the data to a file. | fastf1/livetiming/client.py | start | althype/Fast-F1 | 690 | python | def start(self):
try:
asyncio.run(self._async_start())
except KeyboardInterrupt:
self.logger.warning('Keyboard interrupt - exiting...')
return | def start(self):
try:
asyncio.run(self._async_start())
except KeyboardInterrupt:
self.logger.warning('Keyboard interrupt - exiting...')
return<|docstring|>Connect to the data stream and start writing the data to a file.<|endoftext|> |
323d91ac780f1a2e7059a7de2cf0d71d28252ac814da78b96f88b0bc900a19ba | def ClearContext(self):
'Clear any previous context.'
self._context = None | Clear any previous context. | transitfeed/problems.py | ClearContext | robinjanke/transitfeed | 647 | python | def ClearContext(self):
self._context = None | def ClearContext(self):
self._context = None<|docstring|>Clear any previous context.<|endoftext|> |
326ab3d0ee644429e0660673fc3995f10c8cf6ec50151341d148ac612f1604c8 | def SetFileContext(self, file_name, row_num, row, headers):
"Save the current context to be output with any errors.\n\n Args:\n file_name: string\n row_num: int\n row: list of strings\n headers: list of column headers, its order corresponding to row's\n "
self._context = (file_name, row_num, row, headers) | Save the current context to be output with any errors.
Args:
file_name: string
row_num: int
row: list of strings
headers: list of column headers, its order corresponding to row's | transitfeed/problems.py | SetFileContext | robinjanke/transitfeed | 647 | python | def SetFileContext(self, file_name, row_num, row, headers):
"Save the current context to be output with any errors.\n\n Args:\n file_name: string\n row_num: int\n row: list of strings\n headers: list of column headers, its order corresponding to row's\n "
self._context = (file_name, row_num, row, headers) | def SetFileContext(self, file_name, row_num, row, headers):
"Save the current context to be output with any errors.\n\n Args:\n file_name: string\n row_num: int\n row: list of strings\n headers: list of column headers, its order corresponding to row's\n "
self._context = (file_name, row_num, row, headers)<|docstring|>Save the current context to be output with any errors.
Args:
file_name: string
row_num: int
row: list of strings
headers: list of column headers, its order corresponding to row's<|endoftext|> |
094505d2c87cea707b25690c6bf74861df89bbf84efc8b413a27dd33e3d1a89c | def AddToAccumulator(self, e):
'Report an exception to the Problem Accumulator'
self.accumulator._Report(e) | Report an exception to the Problem Accumulator | transitfeed/problems.py | AddToAccumulator | robinjanke/transitfeed | 647 | python | def AddToAccumulator(self, e):
self.accumulator._Report(e) | def AddToAccumulator(self, e):
self.accumulator._Report(e)<|docstring|>Report an exception to the Problem Accumulator<|endoftext|> |
3b4bf9d05a6e6571a960615885aea76990d660fcc9f7036c970d7ab2411d0121 | def InvalidLineEnd(self, bad_line_end, context=None, type=TYPE_WARNING):
'bad_line_end is a human readable string.'
e = InvalidLineEnd(bad_line_end=bad_line_end, context=context, context2=self._context, type=type)
self.AddToAccumulator(e) | bad_line_end is a human readable string. | transitfeed/problems.py | InvalidLineEnd | robinjanke/transitfeed | 647 | python | def InvalidLineEnd(self, bad_line_end, context=None, type=TYPE_WARNING):
e = InvalidLineEnd(bad_line_end=bad_line_end, context=context, context2=self._context, type=type)
self.AddToAccumulator(e) | def InvalidLineEnd(self, bad_line_end, context=None, type=TYPE_WARNING):
e = InvalidLineEnd(bad_line_end=bad_line_end, context=context, context2=self._context, type=type)
self.AddToAccumulator(e)<|docstring|>bad_line_end is a human readable string.<|endoftext|> |
f86af560676aaad951626c54d11d7135fb0ba181fab19524d5c977628a91907d | @staticmethod
def _LineWrap(text, width):
'\n A word-wrap function that preserves existing line breaks\n and most spaces in the text. Expects that existing line\n breaks are posix newlines (\n).\n\n Taken from:\n http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/148061\n '
return reduce((lambda line, word, width=width: ('%s%s%s' % (line, ' \n'[((((len(line) - line.rfind('\n')) - 1) + len(word.split('\n', 1)[0])) >= width)], word))), text.split(' ')) | A word-wrap function that preserves existing line breaks
and most spaces in the text. Expects that existing line
breaks are posix newlines (
).
Taken from:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/148061 | transitfeed/problems.py | _LineWrap | robinjanke/transitfeed | 647 | python | @staticmethod
def _LineWrap(text, width):
'\n A word-wrap function that preserves existing line breaks\n and most spaces in the text. Expects that existing line\n breaks are posix newlines (\n).\n\n Taken from:\n http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/148061\n '
return reduce((lambda line, word, width=width: ('%s%s%s' % (line, ' \n'[((((len(line) - line.rfind('\n')) - 1) + len(word.split('\n', 1)[0])) >= width)], word))), text.split(' ')) | @staticmethod
def _LineWrap(text, width):
'\n A word-wrap function that preserves existing line breaks\n and most spaces in the text. Expects that existing line\n breaks are posix newlines (\n).\n\n Taken from:\n http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/148061\n '
return reduce((lambda line, word, width=width: ('%s%s%s' % (line, ' \n'[((((len(line) - line.rfind('\n')) - 1) + len(word.split('\n', 1)[0])) >= width)], word))), text.split(' '))<|docstring|>A word-wrap function that preserves existing line breaks
and most spaces in the text. Expects that existing line
breaks are posix newlines (
).
Taken from:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/148061<|endoftext|> |
863d1c353c4e844f7c715a00f7b9ddefb430f55cf8320d6467de3e3400113638 | def __init__(self, context=None, context2=None, **kwargs):
'Initialize an exception object, saving all keyword arguments in self.\n context and context2, if present, must be a tuple of (file_name, row_num,\n row, headers). context2 comes from ProblemReporter.SetFileContext. context\n was passed in with the keyword arguments. context2 is ignored if context\n is present.'
Exception.__init__(self)
if context:
self.__dict__.update(self.ContextTupleToDict(context))
elif context2:
self.__dict__.update(self.ContextTupleToDict(context2))
self.__dict__.update(kwargs)
if (('type' in kwargs) and (kwargs['type'] in ALL_TYPES)):
self._type = kwargs['type']
else:
self._type = TYPE_ERROR | Initialize an exception object, saving all keyword arguments in self.
context and context2, if present, must be a tuple of (file_name, row_num,
row, headers). context2 comes from ProblemReporter.SetFileContext. context
was passed in with the keyword arguments. context2 is ignored if context
is present. | transitfeed/problems.py | __init__ | robinjanke/transitfeed | 647 | python | def __init__(self, context=None, context2=None, **kwargs):
'Initialize an exception object, saving all keyword arguments in self.\n context and context2, if present, must be a tuple of (file_name, row_num,\n row, headers). context2 comes from ProblemReporter.SetFileContext. context\n was passed in with the keyword arguments. context2 is ignored if context\n is present.'
Exception.__init__(self)
if context:
self.__dict__.update(self.ContextTupleToDict(context))
elif context2:
self.__dict__.update(self.ContextTupleToDict(context2))
self.__dict__.update(kwargs)
if (('type' in kwargs) and (kwargs['type'] in ALL_TYPES)):
self._type = kwargs['type']
else:
self._type = TYPE_ERROR | def __init__(self, context=None, context2=None, **kwargs):
'Initialize an exception object, saving all keyword arguments in self.\n context and context2, if present, must be a tuple of (file_name, row_num,\n row, headers). context2 comes from ProblemReporter.SetFileContext. context\n was passed in with the keyword arguments. context2 is ignored if context\n is present.'
Exception.__init__(self)
if context:
self.__dict__.update(self.ContextTupleToDict(context))
elif context2:
self.__dict__.update(self.ContextTupleToDict(context2))
self.__dict__.update(kwargs)
if (('type' in kwargs) and (kwargs['type'] in ALL_TYPES)):
self._type = kwargs['type']
else:
self._type = TYPE_ERROR<|docstring|>Initialize an exception object, saving all keyword arguments in self.
context and context2, if present, must be a tuple of (file_name, row_num,
row, headers). context2 comes from ProblemReporter.SetFileContext. context
was passed in with the keyword arguments. context2 is ignored if context
is present.<|endoftext|> |
e19a0f30c7ec7f9ba2d1acf1c503c2ed8c71827df9cad533e8dce7d99450d8da | @staticmethod
def ContextTupleToDict(context):
'Convert a tuple representing a context into a dict of (key, value) pairs\n '
d = {}
if (not context):
return d
for (k, v) in zip(ExceptionWithContext.CONTEXT_PARTS, context):
if ((v != '') and (v != None)):
d[k] = v
return d | Convert a tuple representing a context into a dict of (key, value) pairs | transitfeed/problems.py | ContextTupleToDict | robinjanke/transitfeed | 647 | python | @staticmethod
def ContextTupleToDict(context):
'\n '
d = {}
if (not context):
return d
for (k, v) in zip(ExceptionWithContext.CONTEXT_PARTS, context):
if ((v != ) and (v != None)):
d[k] = v
return d | @staticmethod
def ContextTupleToDict(context):
'\n '
d = {}
if (not context):
return d
for (k, v) in zip(ExceptionWithContext.CONTEXT_PARTS, context):
if ((v != ) and (v != None)):
d[k] = v
return d<|docstring|>Convert a tuple representing a context into a dict of (key, value) pairs<|endoftext|> |
e8ba02d6dd5cffd2fc462ecd4e1fba19de775a462981757ed47e625feb642043 | def GetDictToFormat(self):
'Return a copy of self as a dict, suitable for passing to FormatProblem'
d = {}
for (k, v) in self.__dict__.items():
d[k] = util.EncodeUnicode(v)
return d | Return a copy of self as a dict, suitable for passing to FormatProblem | transitfeed/problems.py | GetDictToFormat | robinjanke/transitfeed | 647 | python | def GetDictToFormat(self):
d = {}
for (k, v) in self.__dict__.items():
d[k] = util.EncodeUnicode(v)
return d | def GetDictToFormat(self):
d = {}
for (k, v) in self.__dict__.items():
d[k] = util.EncodeUnicode(v)
return d<|docstring|>Return a copy of self as a dict, suitable for passing to FormatProblem<|endoftext|> |
f2225740e71c0098f47e8b2956ca5f6396de4fdc9167d1def1a6490393685852 | def FormatProblem(self, d=None):
'Return a text string describing the problem.\n\n Args:\n d: map returned by GetDictToFormat with with formatting added\n '
if (not d):
d = self.GetDictToFormat()
output_error_text = (self.__class__.ERROR_TEXT % d)
if (('reason' in d) and d['reason']):
return ('%s\n%s' % (output_error_text, d['reason']))
else:
return output_error_text | Return a text string describing the problem.
Args:
d: map returned by GetDictToFormat with with formatting added | transitfeed/problems.py | FormatProblem | robinjanke/transitfeed | 647 | python | def FormatProblem(self, d=None):
'Return a text string describing the problem.\n\n Args:\n d: map returned by GetDictToFormat with with formatting added\n '
if (not d):
d = self.GetDictToFormat()
output_error_text = (self.__class__.ERROR_TEXT % d)
if (('reason' in d) and d['reason']):
return ('%s\n%s' % (output_error_text, d['reason']))
else:
return output_error_text | def FormatProblem(self, d=None):
'Return a text string describing the problem.\n\n Args:\n d: map returned by GetDictToFormat with with formatting added\n '
if (not d):
d = self.GetDictToFormat()
output_error_text = (self.__class__.ERROR_TEXT % d)
if (('reason' in d) and d['reason']):
return ('%s\n%s' % (output_error_text, d['reason']))
else:
return output_error_text<|docstring|>Return a text string describing the problem.
Args:
d: map returned by GetDictToFormat with with formatting added<|endoftext|> |
110baf963463b61d1bbe1b747aef6544cc1975580f2597161d46c9b43a585f65 | def FormatContext(self):
'Return a text string describing the context'
text = ''
if hasattr(self, 'feed_name'):
text += ("In feed '%s': " % self.feed_name)
if hasattr(self, 'file_name'):
text += self.file_name
if hasattr(self, 'row_num'):
text += (':%i' % self.row_num)
if hasattr(self, 'column_name'):
text += (' column %s' % self.column_name)
return text | Return a text string describing the context | transitfeed/problems.py | FormatContext | robinjanke/transitfeed | 647 | python | def FormatContext(self):
text =
if hasattr(self, 'feed_name'):
text += ("In feed '%s': " % self.feed_name)
if hasattr(self, 'file_name'):
text += self.file_name
if hasattr(self, 'row_num'):
text += (':%i' % self.row_num)
if hasattr(self, 'column_name'):
text += (' column %s' % self.column_name)
return text | def FormatContext(self):
text =
if hasattr(self, 'feed_name'):
text += ("In feed '%s': " % self.feed_name)
if hasattr(self, 'file_name'):
text += self.file_name
if hasattr(self, 'row_num'):
text += (':%i' % self.row_num)
if hasattr(self, 'column_name'):
text += (' column %s' % self.column_name)
return text<|docstring|>Return a text string describing the context<|endoftext|> |
2556dc2c457a084b66684b8dbcec84a3fb8d83b5b62d364ca066ff5da93ae127 | def __cmp__(self, y):
"Return an int <0/0/>0 when self is more/same/less significant than y.\n\n Subclasses should define this if exceptions should be listed in something\n other than the order they are reported.\n\n Args:\n y: object to compare to self\n\n Returns:\n An int which is negative if self is more significant than y, 0 if they\n are similar significance and positive if self is less significant than\n y. Returning a float won't work.\n\n Raises:\n TypeError by default, meaning objects of the type can not be compared.\n "
raise TypeError('__cmp__ not defined') | Return an int <0/0/>0 when self is more/same/less significant than y.
Subclasses should define this if exceptions should be listed in something
other than the order they are reported.
Args:
y: object to compare to self
Returns:
An int which is negative if self is more significant than y, 0 if they
are similar significance and positive if self is less significant than
y. Returning a float won't work.
Raises:
TypeError by default, meaning objects of the type can not be compared. | transitfeed/problems.py | __cmp__ | robinjanke/transitfeed | 647 | python | def __cmp__(self, y):
"Return an int <0/0/>0 when self is more/same/less significant than y.\n\n Subclasses should define this if exceptions should be listed in something\n other than the order they are reported.\n\n Args:\n y: object to compare to self\n\n Returns:\n An int which is negative if self is more significant than y, 0 if they\n are similar significance and positive if self is less significant than\n y. Returning a float won't work.\n\n Raises:\n TypeError by default, meaning objects of the type can not be compared.\n "
raise TypeError('__cmp__ not defined') | def __cmp__(self, y):
"Return an int <0/0/>0 when self is more/same/less significant than y.\n\n Subclasses should define this if exceptions should be listed in something\n other than the order they are reported.\n\n Args:\n y: object to compare to self\n\n Returns:\n An int which is negative if self is more significant than y, 0 if they\n are similar significance and positive if self is less significant than\n y. Returning a float won't work.\n\n Raises:\n TypeError by default, meaning objects of the type can not be compared.\n "
raise TypeError('__cmp__ not defined')<|docstring|>Return an int <0/0/>0 when self is more/same/less significant than y.
Subclasses should define this if exceptions should be listed in something
other than the order they are reported.
Args:
y: object to compare to self
Returns:
An int which is negative if self is more significant than y, 0 if they
are similar significance and positive if self is less significant than
y. Returning a float won't work.
Raises:
TypeError by default, meaning objects of the type can not be compared.<|endoftext|> |
621ad1803b1e6676b8779510e90f6cb3d9634262a3343e74bcc1e0a658d04efc | def GetOrderKey(self):
'Return a tuple that can be used to sort problems into a consistent order.\n\n Returns:\n A list of values.\n '
context_attributes = ['_type']
context_attributes.extend(ExceptionWithContext.CONTEXT_PARTS)
context_attributes.extend(self._GetExtraOrderAttributes())
tokens = []
for context_attribute in context_attributes:
tokens.append(getattr(self, context_attribute, None))
return tokens | Return a tuple that can be used to sort problems into a consistent order.
Returns:
A list of values. | transitfeed/problems.py | GetOrderKey | robinjanke/transitfeed | 647 | python | def GetOrderKey(self):
'Return a tuple that can be used to sort problems into a consistent order.\n\n Returns:\n A list of values.\n '
context_attributes = ['_type']
context_attributes.extend(ExceptionWithContext.CONTEXT_PARTS)
context_attributes.extend(self._GetExtraOrderAttributes())
tokens = []
for context_attribute in context_attributes:
tokens.append(getattr(self, context_attribute, None))
return tokens | def GetOrderKey(self):
'Return a tuple that can be used to sort problems into a consistent order.\n\n Returns:\n A list of values.\n '
context_attributes = ['_type']
context_attributes.extend(ExceptionWithContext.CONTEXT_PARTS)
context_attributes.extend(self._GetExtraOrderAttributes())
tokens = []
for context_attribute in context_attributes:
tokens.append(getattr(self, context_attribute, None))
return tokens<|docstring|>Return a tuple that can be used to sort problems into a consistent order.
Returns:
A list of values.<|endoftext|> |
940356f2739a15094f81246ab70d8a13fdaf69a54e6aa0e485ef70bbd5209f76 | def _GetExtraOrderAttributes(self):
'Return a list of extra attributes that should be used by GetOrderKey().\n\n The GetOrderkey method uses the list of class attributes defined in\n CONTEXT_PARTS to generate a list value that can be used as a comparison\n key for sorting problems in a consistent order. Some specific problem\n types may which to define additional attributes that should be used\n when generating the order key. They can override this method to do so.\n\n Returns:\n A list of class attribute names.\n '
return [] | Return a list of extra attributes that should be used by GetOrderKey().
The GetOrderkey method uses the list of class attributes defined in
CONTEXT_PARTS to generate a list value that can be used as a comparison
key for sorting problems in a consistent order. Some specific problem
types may which to define additional attributes that should be used
when generating the order key. They can override this method to do so.
Returns:
A list of class attribute names. | transitfeed/problems.py | _GetExtraOrderAttributes | robinjanke/transitfeed | 647 | python | def _GetExtraOrderAttributes(self):
'Return a list of extra attributes that should be used by GetOrderKey().\n\n The GetOrderkey method uses the list of class attributes defined in\n CONTEXT_PARTS to generate a list value that can be used as a comparison\n key for sorting problems in a consistent order. Some specific problem\n types may which to define additional attributes that should be used\n when generating the order key. They can override this method to do so.\n\n Returns:\n A list of class attribute names.\n '
return [] | def _GetExtraOrderAttributes(self):
'Return a list of extra attributes that should be used by GetOrderKey().\n\n The GetOrderkey method uses the list of class attributes defined in\n CONTEXT_PARTS to generate a list value that can be used as a comparison\n key for sorting problems in a consistent order. Some specific problem\n types may which to define additional attributes that should be used\n when generating the order key. They can override this method to do so.\n\n Returns:\n A list of class attribute names.\n '
return []<|docstring|>Return a list of extra attributes that should be used by GetOrderKey().
The GetOrderkey method uses the list of class attributes defined in
CONTEXT_PARTS to generate a list value that can be used as a comparison
key for sorting problems in a consistent order. Some specific problem
types may which to define additional attributes that should be used
when generating the order key. They can override this method to do so.
Returns:
A list of class attribute names.<|endoftext|> |
aceb05fcb921c2130224dd1be1d8cba91a5c951e652a9aee0b31fd511545f39a | def __init__(self, raise_warnings=False):
'Initialise.\n\n Args:\n raise_warnings: If this is True then warnings are also raised as\n exceptions.\n If it is false, warnings are printed to the console using\n SimpleProblemAccumulator.\n '
self.raise_warnings = raise_warnings
self.accumulator = SimpleProblemAccumulator() | Initialise.
Args:
raise_warnings: If this is True then warnings are also raised as
exceptions.
If it is false, warnings are printed to the console using
SimpleProblemAccumulator. | transitfeed/problems.py | __init__ | robinjanke/transitfeed | 647 | python | def __init__(self, raise_warnings=False):
'Initialise.\n\n Args:\n raise_warnings: If this is True then warnings are also raised as\n exceptions.\n If it is false, warnings are printed to the console using\n SimpleProblemAccumulator.\n '
self.raise_warnings = raise_warnings
self.accumulator = SimpleProblemAccumulator() | def __init__(self, raise_warnings=False):
'Initialise.\n\n Args:\n raise_warnings: If this is True then warnings are also raised as\n exceptions.\n If it is false, warnings are printed to the console using\n SimpleProblemAccumulator.\n '
self.raise_warnings = raise_warnings
self.accumulator = SimpleProblemAccumulator()<|docstring|>Initialise.
Args:
raise_warnings: If this is True then warnings are also raised as
exceptions.
If it is false, warnings are printed to the console using
SimpleProblemAccumulator.<|endoftext|> |
5c70d2239aeb91e311f1a35e390445109b0dd38ad3f185b23e9a838e1b542e1c | def spatial_pyramid_pooling(input, level):
'\n\tinput: 4 channel input (bt,ch,r,c)\n\tlevel: no of levels of pooling\n\treturns : does spatial pyrimidal pooling and returns the output\n\t'
assert (input.dim() == 4)
output = []
"\n\t\tNOTE: Sumit's implementation\n\t"
for i in range(1, (level + 1)):
kernel_size = (int(np.ceil((input.size(2) / (1.0 * i)))), int(np.ceil((input.size(3) / (1.0 * i)))))
stride_size = (int(np.floor((input.size(2) / (1.0 * i)))), int(np.floor((input.size(3) / (1.0 * i)))))
level_out = F.max_pool2d(input, kernel_size=kernel_size, stride=stride_size)
output.append(level_out.view(input.size()[0], (- 1)))
final_out = torch.cat(output, 1)
return final_out | input: 4 channel input (bt,ch,r,c)
level: no of levels of pooling
returns : does spatial pyrimidal pooling and returns the output | lib/Utility/FeatureOperations.py | spatial_pyramid_pooling | SAGNIKMJR/MetaQNN_ImageClassification_PyTorch | 12 | python | def spatial_pyramid_pooling(input, level):
'\n\tinput: 4 channel input (bt,ch,r,c)\n\tlevel: no of levels of pooling\n\treturns : does spatial pyrimidal pooling and returns the output\n\t'
assert (input.dim() == 4)
output = []
"\n\t\tNOTE: Sumit's implementation\n\t"
for i in range(1, (level + 1)):
kernel_size = (int(np.ceil((input.size(2) / (1.0 * i)))), int(np.ceil((input.size(3) / (1.0 * i)))))
stride_size = (int(np.floor((input.size(2) / (1.0 * i)))), int(np.floor((input.size(3) / (1.0 * i)))))
level_out = F.max_pool2d(input, kernel_size=kernel_size, stride=stride_size)
output.append(level_out.view(input.size()[0], (- 1)))
final_out = torch.cat(output, 1)
return final_out | def spatial_pyramid_pooling(input, level):
'\n\tinput: 4 channel input (bt,ch,r,c)\n\tlevel: no of levels of pooling\n\treturns : does spatial pyrimidal pooling and returns the output\n\t'
assert (input.dim() == 4)
output = []
"\n\t\tNOTE: Sumit's implementation\n\t"
for i in range(1, (level + 1)):
kernel_size = (int(np.ceil((input.size(2) / (1.0 * i)))), int(np.ceil((input.size(3) / (1.0 * i)))))
stride_size = (int(np.floor((input.size(2) / (1.0 * i)))), int(np.floor((input.size(3) / (1.0 * i)))))
level_out = F.max_pool2d(input, kernel_size=kernel_size, stride=stride_size)
output.append(level_out.view(input.size()[0], (- 1)))
final_out = torch.cat(output, 1)
return final_out<|docstring|>input: 4 channel input (bt,ch,r,c)
level: no of levels of pooling
returns : does spatial pyrimidal pooling and returns the output<|endoftext|> |
67e60bc4ebb60e6cbcd81481f596780975effa0ff5ff0b107c3b21a14b1dd245 | def full_average_pooling(input):
'\n\tinput: 4 channel input (bt,ch,r,c)\n\treturns : bt*ch*1*1 feature maps performed by averaging pooling with kernel size == input size\n\t'
assert (input.dim() == 4)
return F.avg_pool2d(input, kernel_size=(input.size(2), input.size(3))) | input: 4 channel input (bt,ch,r,c)
returns : bt*ch*1*1 feature maps performed by averaging pooling with kernel size == input size | lib/Utility/FeatureOperations.py | full_average_pooling | SAGNIKMJR/MetaQNN_ImageClassification_PyTorch | 12 | python | def full_average_pooling(input):
'\n\tinput: 4 channel input (bt,ch,r,c)\n\treturns : bt*ch*1*1 feature maps performed by averaging pooling with kernel size == input size\n\t'
assert (input.dim() == 4)
return F.avg_pool2d(input, kernel_size=(input.size(2), input.size(3))) | def full_average_pooling(input):
'\n\tinput: 4 channel input (bt,ch,r,c)\n\treturns : bt*ch*1*1 feature maps performed by averaging pooling with kernel size == input size\n\t'
assert (input.dim() == 4)
return F.avg_pool2d(input, kernel_size=(input.size(2), input.size(3)))<|docstring|>input: 4 channel input (bt,ch,r,c)
returns : bt*ch*1*1 feature maps performed by averaging pooling with kernel size == input size<|endoftext|> |
7d6662849587a8c00c437085efda1f2697320bd7d3247de39a91b50b3b6ff53d | def CenterCrop(cropTarget, cropVar):
'\n\tcropTarget: target image (the shape is deduced from this image)\n\tcropVar: image to be cropped\n\treturns : crops CropVar to the size of cropTarget by performing center crop\n\t'
cropSize = cropTarget.size()
tw = (cropSize[2] // 2)
th = (cropSize[3] // 2)
varSize = cropVar.size()
c1 = (varSize[2] // 2)
c2 = (varSize[3] // 2)
subW = 0
subH = 0
if (((cropSize[2] % 2) == 0) and ((varSize[2] % 2) == 0)):
subW = 1
if (((cropSize[3] % 2) == 0) and ((varSize[3] % 2) == 0)):
subH = 1
cropOp = cropVar[(:, :, (c1 - tw):(((c1 + tw) + 1) - subW), (c2 - th):(((c2 + th) + 1) - subH))].clone()
return cropOp | cropTarget: target image (the shape is deduced from this image)
cropVar: image to be cropped
returns : crops CropVar to the size of cropTarget by performing center crop | lib/Utility/FeatureOperations.py | CenterCrop | SAGNIKMJR/MetaQNN_ImageClassification_PyTorch | 12 | python | def CenterCrop(cropTarget, cropVar):
'\n\tcropTarget: target image (the shape is deduced from this image)\n\tcropVar: image to be cropped\n\treturns : crops CropVar to the size of cropTarget by performing center crop\n\t'
cropSize = cropTarget.size()
tw = (cropSize[2] // 2)
th = (cropSize[3] // 2)
varSize = cropVar.size()
c1 = (varSize[2] // 2)
c2 = (varSize[3] // 2)
subW = 0
subH = 0
if (((cropSize[2] % 2) == 0) and ((varSize[2] % 2) == 0)):
subW = 1
if (((cropSize[3] % 2) == 0) and ((varSize[3] % 2) == 0)):
subH = 1
cropOp = cropVar[(:, :, (c1 - tw):(((c1 + tw) + 1) - subW), (c2 - th):(((c2 + th) + 1) - subH))].clone()
return cropOp | def CenterCrop(cropTarget, cropVar):
'\n\tcropTarget: target image (the shape is deduced from this image)\n\tcropVar: image to be cropped\n\treturns : crops CropVar to the size of cropTarget by performing center crop\n\t'
cropSize = cropTarget.size()
tw = (cropSize[2] // 2)
th = (cropSize[3] // 2)
varSize = cropVar.size()
c1 = (varSize[2] // 2)
c2 = (varSize[3] // 2)
subW = 0
subH = 0
if (((cropSize[2] % 2) == 0) and ((varSize[2] % 2) == 0)):
subW = 1
if (((cropSize[3] % 2) == 0) and ((varSize[3] % 2) == 0)):
subH = 1
cropOp = cropVar[(:, :, (c1 - tw):(((c1 + tw) + 1) - subW), (c2 - th):(((c2 + th) + 1) - subH))].clone()
return cropOp<|docstring|>cropTarget: target image (the shape is deduced from this image)
cropVar: image to be cropped
returns : crops CropVar to the size of cropTarget by performing center crop<|endoftext|> |
a8b6f9791d41ab8100de8e64c48ba166e8512fc114368b31e612fa0ef111c6fb | def PeriodicShuffle(x, factor):
'\n\tx: input feature map\n\tfactor: upsampling factor\n\treturns : upsampled image with the mentioned factor\n\t'
(btSize, ch, rows, cols) = x.size()
ch_target = (ch / (factor * factor))
ch_factor = (ch / ch_target)
shape_1 = [btSize, (ch_factor // factor), (ch_factor // factor), rows, cols]
shape_2 = [btSize, 1, (rows * factor), (cols * factor)]
out = []
for i in range(ch_target):
temp = x[(:, (i * ch_factor):((i + 1) * ch_factor), :, :)]
temp = temp.view(shape_1)
temp = temp.permute(0, 1, 3, 2, 4)
temp = temp.contiguous()
temp = temp.view(shape_2)
out.append(temp)
out = torch.cat(out, 1)
return out | x: input feature map
factor: upsampling factor
returns : upsampled image with the mentioned factor | lib/Utility/FeatureOperations.py | PeriodicShuffle | SAGNIKMJR/MetaQNN_ImageClassification_PyTorch | 12 | python | def PeriodicShuffle(x, factor):
'\n\tx: input feature map\n\tfactor: upsampling factor\n\treturns : upsampled image with the mentioned factor\n\t'
(btSize, ch, rows, cols) = x.size()
ch_target = (ch / (factor * factor))
ch_factor = (ch / ch_target)
shape_1 = [btSize, (ch_factor // factor), (ch_factor // factor), rows, cols]
shape_2 = [btSize, 1, (rows * factor), (cols * factor)]
out = []
for i in range(ch_target):
temp = x[(:, (i * ch_factor):((i + 1) * ch_factor), :, :)]
temp = temp.view(shape_1)
temp = temp.permute(0, 1, 3, 2, 4)
temp = temp.contiguous()
temp = temp.view(shape_2)
out.append(temp)
out = torch.cat(out, 1)
return out | def PeriodicShuffle(x, factor):
'\n\tx: input feature map\n\tfactor: upsampling factor\n\treturns : upsampled image with the mentioned factor\n\t'
(btSize, ch, rows, cols) = x.size()
ch_target = (ch / (factor * factor))
ch_factor = (ch / ch_target)
shape_1 = [btSize, (ch_factor // factor), (ch_factor // factor), rows, cols]
shape_2 = [btSize, 1, (rows * factor), (cols * factor)]
out = []
for i in range(ch_target):
temp = x[(:, (i * ch_factor):((i + 1) * ch_factor), :, :)]
temp = temp.view(shape_1)
temp = temp.permute(0, 1, 3, 2, 4)
temp = temp.contiguous()
temp = temp.view(shape_2)
out.append(temp)
out = torch.cat(out, 1)
return out<|docstring|>x: input feature map
factor: upsampling factor
returns : upsampled image with the mentioned factor<|endoftext|> |
949e2b0a84119a40bbe60b3b91b95fbaa32a386dd4908ba185623114aae8f87c | def KL_multivariate(means, logstds, means_0, stds_0):
'\n KL in the special case where the target distribution \n is factorized and the prior is any multivariate gaussian\n '
dets = ((2.0 * tf.reduce_sum((tf.log(stds_0) - logstds))) - tf.cast(means.shape[0], tf.float32))
norm_trace = tf.reduce_sum(((((means - means_0) ** 2) + (tf.exp(logstds) ** 2)) / (stds_0 ** 2)))
KL = (0.5 * (dets + norm_trace))
return KL | KL in the special case where the target distribution
is factorized and the prior is any multivariate gaussian | models/models_organic_bandit.py | KL_multivariate | criteo-research/blob | 14 | python | def KL_multivariate(means, logstds, means_0, stds_0):
'\n KL in the special case where the target distribution \n is factorized and the prior is any multivariate gaussian\n '
dets = ((2.0 * tf.reduce_sum((tf.log(stds_0) - logstds))) - tf.cast(means.shape[0], tf.float32))
norm_trace = tf.reduce_sum(((((means - means_0) ** 2) + (tf.exp(logstds) ** 2)) / (stds_0 ** 2)))
KL = (0.5 * (dets + norm_trace))
return KL | def KL_multivariate(means, logstds, means_0, stds_0):
'\n KL in the special case where the target distribution \n is factorized and the prior is any multivariate gaussian\n '
dets = ((2.0 * tf.reduce_sum((tf.log(stds_0) - logstds))) - tf.cast(means.shape[0], tf.float32))
norm_trace = tf.reduce_sum(((((means - means_0) ** 2) + (tf.exp(logstds) ** 2)) / (stds_0 ** 2)))
KL = (0.5 * (dets + norm_trace))
return KL<|docstring|>KL in the special case where the target distribution
is factorized and the prior is any multivariate gaussian<|endoftext|> |
c86ce5be41573430c3204eccd528d45752776281f500033719eca7665a41fd8b | def check_complete(task, out_queue):
'\n Checks if task is complete, puts the result to out_queue.\n '
logger.debug('Checking if %s is complete', task)
try:
is_complete = task.complete()
except BaseException:
is_complete = TracebackWrapper(traceback.format_exc())
out_queue.put((task, is_complete)) | Checks if task is complete, puts the result to out_queue. | luigi/worker.py | check_complete | GlobalFishingWatch/luigi | 2 | python | def check_complete(task, out_queue):
'\n \n '
logger.debug('Checking if %s is complete', task)
try:
is_complete = task.complete()
except BaseException:
is_complete = TracebackWrapper(traceback.format_exc())
out_queue.put((task, is_complete)) | def check_complete(task, out_queue):
'\n \n '
logger.debug('Checking if %s is complete', task)
try:
is_complete = task.complete()
except BaseException:
is_complete = TracebackWrapper(traceback.format_exc())
out_queue.put((task, is_complete))<|docstring|>Checks if task is complete, puts the result to out_queue.<|endoftext|> |
6e082224e627c3bf1f3b6284aa79ed468bd5a5d0f4a451a7881239be81ee2428 | def terminate(self):
'Terminate this process and its subprocesses.'
try:
return self._recursive_terminate()
except ImportError:
return super(TaskProcess, self).terminate() | Terminate this process and its subprocesses. | luigi/worker.py | terminate | GlobalFishingWatch/luigi | 2 | python | def terminate(self):
try:
return self._recursive_terminate()
except ImportError:
return super(TaskProcess, self).terminate() | def terminate(self):
try:
return self._recursive_terminate()
except ImportError:
return super(TaskProcess, self).terminate()<|docstring|>Terminate this process and its subprocesses.<|endoftext|> |
2d05553e9e56ea42f7c782e719e027402a97c06c3c05df8eefb9d67356892845 | def _add_task(self, *args, **kwargs):
'\n Call ``self._scheduler.add_task``, but store the values too so we can\n implement :py:func:`luigi.execution_summary.summary`.\n '
task = self._scheduled_tasks.get(kwargs['task_id'])
if task:
msg = (task, kwargs['status'], kwargs['runnable'])
self._add_task_history.append(msg)
self._scheduler.add_task(*args, **kwargs) | Call ``self._scheduler.add_task``, but store the values too so we can
implement :py:func:`luigi.execution_summary.summary`. | luigi/worker.py | _add_task | GlobalFishingWatch/luigi | 2 | python | def _add_task(self, *args, **kwargs):
'\n Call ``self._scheduler.add_task``, but store the values too so we can\n implement :py:func:`luigi.execution_summary.summary`.\n '
task = self._scheduled_tasks.get(kwargs['task_id'])
if task:
msg = (task, kwargs['status'], kwargs['runnable'])
self._add_task_history.append(msg)
self._scheduler.add_task(*args, **kwargs) | def _add_task(self, *args, **kwargs):
'\n Call ``self._scheduler.add_task``, but store the values too so we can\n implement :py:func:`luigi.execution_summary.summary`.\n '
task = self._scheduled_tasks.get(kwargs['task_id'])
if task:
msg = (task, kwargs['status'], kwargs['runnable'])
self._add_task_history.append(msg)
self._scheduler.add_task(*args, **kwargs)<|docstring|>Call ``self._scheduler.add_task``, but store the values too so we can
implement :py:func:`luigi.execution_summary.summary`.<|endoftext|> |
f376057ed6012832466ef0a588c647f2937bb764f694d7d7b925981dacb09b6c | def stop(self):
'\n Stop the KeepAliveThread associated with this Worker.\n\n This should be called whenever you are done with a worker instance to clean up.\n\n Warning: this should _only_ be performed if you are sure this worker\n is not performing any work or will perform any work after this has been called\n\n TODO: also kill all currently running tasks\n\n TODO (maybe): Worker should be/have a context manager to enforce calling this\n whenever you stop using a Worker instance\n '
self._keep_alive_thread.stop()
self._keep_alive_thread.join() | Stop the KeepAliveThread associated with this Worker.
This should be called whenever you are done with a worker instance to clean up.
Warning: this should _only_ be performed if you are sure this worker
is not performing any work or will perform any work after this has been called
TODO: also kill all currently running tasks
TODO (maybe): Worker should be/have a context manager to enforce calling this
whenever you stop using a Worker instance | luigi/worker.py | stop | GlobalFishingWatch/luigi | 2 | python | def stop(self):
'\n Stop the KeepAliveThread associated with this Worker.\n\n This should be called whenever you are done with a worker instance to clean up.\n\n Warning: this should _only_ be performed if you are sure this worker\n is not performing any work or will perform any work after this has been called\n\n TODO: also kill all currently running tasks\n\n TODO (maybe): Worker should be/have a context manager to enforce calling this\n whenever you stop using a Worker instance\n '
self._keep_alive_thread.stop()
self._keep_alive_thread.join() | def stop(self):
'\n Stop the KeepAliveThread associated with this Worker.\n\n This should be called whenever you are done with a worker instance to clean up.\n\n Warning: this should _only_ be performed if you are sure this worker\n is not performing any work or will perform any work after this has been called\n\n TODO: also kill all currently running tasks\n\n TODO (maybe): Worker should be/have a context manager to enforce calling this\n whenever you stop using a Worker instance\n '
self._keep_alive_thread.stop()
self._keep_alive_thread.join()<|docstring|>Stop the KeepAliveThread associated with this Worker.
This should be called whenever you are done with a worker instance to clean up.
Warning: this should _only_ be performed if you are sure this worker
is not performing any work or will perform any work after this has been called
TODO: also kill all currently running tasks
TODO (maybe): Worker should be/have a context manager to enforce calling this
whenever you stop using a Worker instance<|endoftext|> |
f1a8464776a8a1a6462c99f2bcbd1a944ed0efbbf8c0be0a75dbad84fd0822d7 | def add(self, task, multiprocess=False):
'\n Add a Task for the worker to check and possibly schedule and run.\n\n Returns True if task and its dependencies were successfully scheduled or completed before.\n '
if ((self._first_task is None) and hasattr(task, 'task_id')):
self._first_task = task.task_id
self.add_succeeded = True
if multiprocess:
queue = multiprocessing.Manager().Queue()
pool = multiprocessing.Pool()
else:
queue = DequeQueue()
pool = SingleProcessPool()
self._validate_task(task)
pool.apply_async(check_complete, [task, queue])
queue_size = 1
try:
seen = set([task.task_id])
while queue_size:
current = queue.get()
queue_size -= 1
(item, is_complete) = current
for next in self._add(item, is_complete):
if (next.task_id not in seen):
self._validate_task(next)
seen.add(next.task_id)
pool.apply_async(check_complete, [next, queue])
queue_size += 1
except (KeyboardInterrupt, TaskException):
raise
except Exception as ex:
self.add_succeeded = False
formatted_traceback = traceback.format_exc()
self._log_unexpected_error(task)
task.trigger_event(Event.BROKEN_TASK, task, ex)
self._email_unexpected_error(task, formatted_traceback)
finally:
pool.close()
pool.join()
return self.add_succeeded | Add a Task for the worker to check and possibly schedule and run.
Returns True if task and its dependencies were successfully scheduled or completed before. | luigi/worker.py | add | GlobalFishingWatch/luigi | 2 | python | def add(self, task, multiprocess=False):
'\n Add a Task for the worker to check and possibly schedule and run.\n\n Returns True if task and its dependencies were successfully scheduled or completed before.\n '
if ((self._first_task is None) and hasattr(task, 'task_id')):
self._first_task = task.task_id
self.add_succeeded = True
if multiprocess:
queue = multiprocessing.Manager().Queue()
pool = multiprocessing.Pool()
else:
queue = DequeQueue()
pool = SingleProcessPool()
self._validate_task(task)
pool.apply_async(check_complete, [task, queue])
queue_size = 1
try:
seen = set([task.task_id])
while queue_size:
current = queue.get()
queue_size -= 1
(item, is_complete) = current
for next in self._add(item, is_complete):
if (next.task_id not in seen):
self._validate_task(next)
seen.add(next.task_id)
pool.apply_async(check_complete, [next, queue])
queue_size += 1
except (KeyboardInterrupt, TaskException):
raise
except Exception as ex:
self.add_succeeded = False
formatted_traceback = traceback.format_exc()
self._log_unexpected_error(task)
task.trigger_event(Event.BROKEN_TASK, task, ex)
self._email_unexpected_error(task, formatted_traceback)
finally:
pool.close()
pool.join()
return self.add_succeeded | def add(self, task, multiprocess=False):
'\n Add a Task for the worker to check and possibly schedule and run.\n\n Returns True if task and its dependencies were successfully scheduled or completed before.\n '
if ((self._first_task is None) and hasattr(task, 'task_id')):
self._first_task = task.task_id
self.add_succeeded = True
if multiprocess:
queue = multiprocessing.Manager().Queue()
pool = multiprocessing.Pool()
else:
queue = DequeQueue()
pool = SingleProcessPool()
self._validate_task(task)
pool.apply_async(check_complete, [task, queue])
queue_size = 1
try:
seen = set([task.task_id])
while queue_size:
current = queue.get()
queue_size -= 1
(item, is_complete) = current
for next in self._add(item, is_complete):
if (next.task_id not in seen):
self._validate_task(next)
seen.add(next.task_id)
pool.apply_async(check_complete, [next, queue])
queue_size += 1
except (KeyboardInterrupt, TaskException):
raise
except Exception as ex:
self.add_succeeded = False
formatted_traceback = traceback.format_exc()
self._log_unexpected_error(task)
task.trigger_event(Event.BROKEN_TASK, task, ex)
self._email_unexpected_error(task, formatted_traceback)
finally:
pool.close()
pool.join()
return self.add_succeeded<|docstring|>Add a Task for the worker to check and possibly schedule and run.
Returns True if task and its dependencies were successfully scheduled or completed before.<|endoftext|> |
313d7849a6c1f50aff7dc684e566283a84e3613c4287a9c481e2f7c0c684c31a | def _purge_children(self):
'\n Find dead children and put a response on the result queue.\n\n :return:\n '
for (task_id, p) in six.iteritems(self._running_tasks):
if ((not p.is_alive()) and p.exitcode):
error_msg = ('Worker task %s died unexpectedly with exit code %s' % (task_id, p.exitcode))
elif ((p.timeout_time is not None) and (time.time() > float(p.timeout_time)) and p.is_alive()):
p.terminate()
error_msg = ('Worker task %s timed out and was terminated.' % task_id)
else:
continue
logger.info(error_msg)
self._task_result_queue.put((task_id, FAILED, error_msg, [], [])) | Find dead children and put a response on the result queue.
:return: | luigi/worker.py | _purge_children | GlobalFishingWatch/luigi | 2 | python | def _purge_children(self):
'\n Find dead children and put a response on the result queue.\n\n :return:\n '
for (task_id, p) in six.iteritems(self._running_tasks):
if ((not p.is_alive()) and p.exitcode):
error_msg = ('Worker task %s died unexpectedly with exit code %s' % (task_id, p.exitcode))
elif ((p.timeout_time is not None) and (time.time() > float(p.timeout_time)) and p.is_alive()):
p.terminate()
error_msg = ('Worker task %s timed out and was terminated.' % task_id)
else:
continue
logger.info(error_msg)
self._task_result_queue.put((task_id, FAILED, error_msg, [], [])) | def _purge_children(self):
'\n Find dead children and put a response on the result queue.\n\n :return:\n '
for (task_id, p) in six.iteritems(self._running_tasks):
if ((not p.is_alive()) and p.exitcode):
error_msg = ('Worker task %s died unexpectedly with exit code %s' % (task_id, p.exitcode))
elif ((p.timeout_time is not None) and (time.time() > float(p.timeout_time)) and p.is_alive()):
p.terminate()
error_msg = ('Worker task %s timed out and was terminated.' % task_id)
else:
continue
logger.info(error_msg)
self._task_result_queue.put((task_id, FAILED, error_msg, [], []))<|docstring|>Find dead children and put a response on the result queue.
:return:<|endoftext|> |
0922b556e0b814b85164c2807e929eb879fa4b74b5e97e2f7b0b6d707f92cf2d | def _handle_next_task(self):
'\n We have to catch three ways a task can be "done":\n\n 1. normal execution: the task runs/fails and puts a result back on the queue,\n 2. new dependencies: the task yielded new deps that were not complete and\n will be rescheduled and dependencies added,\n 3. child process dies: we need to catch this separately.\n '
while True:
self._purge_children()
try:
(task_id, status, expl, missing, new_requirements) = self._task_result_queue.get(timeout=self._config.wait_interval)
except Queue.Empty:
return
task = self._scheduled_tasks[task_id]
if ((not task) or (task_id not in self._running_tasks)):
continue
new_deps = []
if new_requirements:
new_req = [load_task(module, name, params) for (module, name, params) in new_requirements]
for t in new_req:
self.add(t)
new_deps = [t.task_id for t in new_req]
self._add_task(worker=self._id, task_id=task_id, status=status, expl=expl, resources=task.process_resources(), runnable=None, params=task.to_str_params(), family=task.task_family, module=task.task_module, new_deps=new_deps, assistant=self._assistant)
if (status == RUNNING):
continue
self._running_tasks.pop(task_id)
if missing:
reschedule = True
for task_id in missing:
self.unfulfilled_counts[task_id] += 1
if (self.unfulfilled_counts[task_id] > self._config.max_reschedules):
reschedule = False
if reschedule:
self.add(task)
self.run_succeeded &= (status in (DONE, SUSPENDED))
return | We have to catch three ways a task can be "done":
1. normal execution: the task runs/fails and puts a result back on the queue,
2. new dependencies: the task yielded new deps that were not complete and
will be rescheduled and dependencies added,
3. child process dies: we need to catch this separately. | luigi/worker.py | _handle_next_task | GlobalFishingWatch/luigi | 2 | python | def _handle_next_task(self):
'\n We have to catch three ways a task can be "done":\n\n 1. normal execution: the task runs/fails and puts a result back on the queue,\n 2. new dependencies: the task yielded new deps that were not complete and\n will be rescheduled and dependencies added,\n 3. child process dies: we need to catch this separately.\n '
while True:
self._purge_children()
try:
(task_id, status, expl, missing, new_requirements) = self._task_result_queue.get(timeout=self._config.wait_interval)
except Queue.Empty:
return
task = self._scheduled_tasks[task_id]
if ((not task) or (task_id not in self._running_tasks)):
continue
new_deps = []
if new_requirements:
new_req = [load_task(module, name, params) for (module, name, params) in new_requirements]
for t in new_req:
self.add(t)
new_deps = [t.task_id for t in new_req]
self._add_task(worker=self._id, task_id=task_id, status=status, expl=expl, resources=task.process_resources(), runnable=None, params=task.to_str_params(), family=task.task_family, module=task.task_module, new_deps=new_deps, assistant=self._assistant)
if (status == RUNNING):
continue
self._running_tasks.pop(task_id)
if missing:
reschedule = True
for task_id in missing:
self.unfulfilled_counts[task_id] += 1
if (self.unfulfilled_counts[task_id] > self._config.max_reschedules):
reschedule = False
if reschedule:
self.add(task)
self.run_succeeded &= (status in (DONE, SUSPENDED))
return | def _handle_next_task(self):
'\n We have to catch three ways a task can be "done":\n\n 1. normal execution: the task runs/fails and puts a result back on the queue,\n 2. new dependencies: the task yielded new deps that were not complete and\n will be rescheduled and dependencies added,\n 3. child process dies: we need to catch this separately.\n '
while True:
self._purge_children()
try:
(task_id, status, expl, missing, new_requirements) = self._task_result_queue.get(timeout=self._config.wait_interval)
except Queue.Empty:
return
task = self._scheduled_tasks[task_id]
if ((not task) or (task_id not in self._running_tasks)):
continue
new_deps = []
if new_requirements:
new_req = [load_task(module, name, params) for (module, name, params) in new_requirements]
for t in new_req:
self.add(t)
new_deps = [t.task_id for t in new_req]
self._add_task(worker=self._id, task_id=task_id, status=status, expl=expl, resources=task.process_resources(), runnable=None, params=task.to_str_params(), family=task.task_family, module=task.task_module, new_deps=new_deps, assistant=self._assistant)
if (status == RUNNING):
continue
self._running_tasks.pop(task_id)
if missing:
reschedule = True
for task_id in missing:
self.unfulfilled_counts[task_id] += 1
if (self.unfulfilled_counts[task_id] > self._config.max_reschedules):
reschedule = False
if reschedule:
self.add(task)
self.run_succeeded &= (status in (DONE, SUSPENDED))
return<|docstring|>We have to catch three ways a task can be "done":
1. normal execution: the task runs/fails and puts a result back on the queue,
2. new dependencies: the task yielded new deps that were not complete and
will be rescheduled and dependencies added,
3. child process dies: we need to catch this separately.<|endoftext|> |
a01773dd35a79109418b36875ac9111e60a7b31b0900bb5266fdb92315fd236c | def _keep_alive(self, n_pending_tasks, n_unique_pending):
'\n Returns true if a worker should stay alive given.\n\n If worker-keep-alive is not set, this will always return false.\n For an assistant, it will always return the value of worker-keep-alive.\n Otherwise, it will return true for nonzero n_pending_tasks.\n\n If worker-count-uniques is true, it will also\n require that one of the tasks is unique to this worker.\n '
if (not self._config.keep_alive):
return False
elif self._assistant:
return True
else:
return (n_pending_tasks and (n_unique_pending or (not self._config.count_uniques))) | Returns true if a worker should stay alive given.
If worker-keep-alive is not set, this will always return false.
For an assistant, it will always return the value of worker-keep-alive.
Otherwise, it will return true for nonzero n_pending_tasks.
If worker-count-uniques is true, it will also
require that one of the tasks is unique to this worker. | luigi/worker.py | _keep_alive | GlobalFishingWatch/luigi | 2 | python | def _keep_alive(self, n_pending_tasks, n_unique_pending):
'\n Returns true if a worker should stay alive given.\n\n If worker-keep-alive is not set, this will always return false.\n For an assistant, it will always return the value of worker-keep-alive.\n Otherwise, it will return true for nonzero n_pending_tasks.\n\n If worker-count-uniques is true, it will also\n require that one of the tasks is unique to this worker.\n '
if (not self._config.keep_alive):
return False
elif self._assistant:
return True
else:
return (n_pending_tasks and (n_unique_pending or (not self._config.count_uniques))) | def _keep_alive(self, n_pending_tasks, n_unique_pending):
'\n Returns true if a worker should stay alive given.\n\n If worker-keep-alive is not set, this will always return false.\n For an assistant, it will always return the value of worker-keep-alive.\n Otherwise, it will return true for nonzero n_pending_tasks.\n\n If worker-count-uniques is true, it will also\n require that one of the tasks is unique to this worker.\n '
if (not self._config.keep_alive):
return False
elif self._assistant:
return True
else:
return (n_pending_tasks and (n_unique_pending or (not self._config.count_uniques)))<|docstring|>Returns true if a worker should stay alive given.
If worker-keep-alive is not set, this will always return false.
For an assistant, it will always return the value of worker-keep-alive.
Otherwise, it will return true for nonzero n_pending_tasks.
If worker-count-uniques is true, it will also
require that one of the tasks is unique to this worker.<|endoftext|> |
0a5ddea33ec6fae4d3f1c0806295d1de3683b6fa893a19700e740380cbd89b3f | def handle_interrupt(self, signum, _):
'\n Stops the assistant from asking for more work on SIGUSR1\n '
if (signum == signal.SIGUSR1):
self._config.keep_alive = False
self._stop_requesting_work = True | Stops the assistant from asking for more work on SIGUSR1 | luigi/worker.py | handle_interrupt | GlobalFishingWatch/luigi | 2 | python | def handle_interrupt(self, signum, _):
'\n \n '
if (signum == signal.SIGUSR1):
self._config.keep_alive = False
self._stop_requesting_work = True | def handle_interrupt(self, signum, _):
'\n \n '
if (signum == signal.SIGUSR1):
self._config.keep_alive = False
self._stop_requesting_work = True<|docstring|>Stops the assistant from asking for more work on SIGUSR1<|endoftext|> |
e19e3cc8d905f1523e1c90ae5f062b5f92ff0e9a25e32d1bc0caa976011eda81 | def run(self):
'\n Returns True if all scheduled tasks were executed successfully.\n '
logger.info('Running Worker with %d processes', self.worker_processes)
sleeper = self._sleeper()
self.run_succeeded = True
self._add_worker()
while True:
while (len(self._running_tasks) >= self.worker_processes):
logger.debug('%d running tasks, waiting for next task to finish', len(self._running_tasks))
self._handle_next_task()
(task_id, running_tasks, n_pending_tasks, n_unique_pending) = self._get_work()
if (task_id is None):
if (not self._stop_requesting_work):
self._log_remote_tasks(running_tasks, n_pending_tasks, n_unique_pending)
if (len(self._running_tasks) == 0):
if self._keep_alive(n_pending_tasks, n_unique_pending):
six.next(sleeper)
continue
else:
break
else:
self._handle_next_task()
continue
logger.debug('Pending tasks: %s', n_pending_tasks)
self._run_task(task_id)
while len(self._running_tasks):
logger.debug('Shut down Worker, %d more tasks to go', len(self._running_tasks))
self._handle_next_task()
return self.run_succeeded | Returns True if all scheduled tasks were executed successfully. | luigi/worker.py | run | GlobalFishingWatch/luigi | 2 | python | def run(self):
'\n \n '
logger.info('Running Worker with %d processes', self.worker_processes)
sleeper = self._sleeper()
self.run_succeeded = True
self._add_worker()
while True:
while (len(self._running_tasks) >= self.worker_processes):
logger.debug('%d running tasks, waiting for next task to finish', len(self._running_tasks))
self._handle_next_task()
(task_id, running_tasks, n_pending_tasks, n_unique_pending) = self._get_work()
if (task_id is None):
if (not self._stop_requesting_work):
self._log_remote_tasks(running_tasks, n_pending_tasks, n_unique_pending)
if (len(self._running_tasks) == 0):
if self._keep_alive(n_pending_tasks, n_unique_pending):
six.next(sleeper)
continue
else:
break
else:
self._handle_next_task()
continue
logger.debug('Pending tasks: %s', n_pending_tasks)
self._run_task(task_id)
while len(self._running_tasks):
logger.debug('Shut down Worker, %d more tasks to go', len(self._running_tasks))
self._handle_next_task()
return self.run_succeeded | def run(self):
'\n \n '
logger.info('Running Worker with %d processes', self.worker_processes)
sleeper = self._sleeper()
self.run_succeeded = True
self._add_worker()
while True:
while (len(self._running_tasks) >= self.worker_processes):
logger.debug('%d running tasks, waiting for next task to finish', len(self._running_tasks))
self._handle_next_task()
(task_id, running_tasks, n_pending_tasks, n_unique_pending) = self._get_work()
if (task_id is None):
if (not self._stop_requesting_work):
self._log_remote_tasks(running_tasks, n_pending_tasks, n_unique_pending)
if (len(self._running_tasks) == 0):
if self._keep_alive(n_pending_tasks, n_unique_pending):
six.next(sleeper)
continue
else:
break
else:
self._handle_next_task()
continue
logger.debug('Pending tasks: %s', n_pending_tasks)
self._run_task(task_id)
while len(self._running_tasks):
logger.debug('Shut down Worker, %d more tasks to go', len(self._running_tasks))
self._handle_next_task()
return self.run_succeeded<|docstring|>Returns True if all scheduled tasks were executed successfully.<|endoftext|> |
c9dcf846ca211f60f4348bd404a5dd2c9bc051cbc8e4940b1fd3e1c040040166 | def _nested_dim(space):
'\n Return the total number of dimensions in the entire (nested) space.\n '
if isinstance(space, Dict):
return [*itertools.chain.from_iterable([_nested_dim_helper(s) for s in space.spaces.values()])]
elif isinstance(space, Tuple):
return [*itertools.chain.from_iterable([_nested_dim_helper(s) for s in space.spaces])]
else:
return _nested_dim_helper(space) | Return the total number of dimensions in the entire (nested) space. | abmarl/sim/wrappers/ravel_discrete_wrapper.py | _nested_dim | Leonardo767/Abmarl | 7 | python | def _nested_dim(space):
'\n \n '
if isinstance(space, Dict):
return [*itertools.chain.from_iterable([_nested_dim_helper(s) for s in space.spaces.values()])]
elif isinstance(space, Tuple):
return [*itertools.chain.from_iterable([_nested_dim_helper(s) for s in space.spaces])]
else:
return _nested_dim_helper(space) | def _nested_dim(space):
'\n \n '
if isinstance(space, Dict):
return [*itertools.chain.from_iterable([_nested_dim_helper(s) for s in space.spaces.values()])]
elif isinstance(space, Tuple):
return [*itertools.chain.from_iterable([_nested_dim_helper(s) for s in space.spaces])]
else:
return _nested_dim_helper(space)<|docstring|>Return the total number of dimensions in the entire (nested) space.<|endoftext|> |
d039a82564a2432c0afd5bda97f2be3da645199f5327512b2d619b0f94e71737 | def ravel(space, point):
'\n Ravel point in space to a single discrete value.\n '
return _ravel_helper(space, point)[0] | Ravel point in space to a single discrete value. | abmarl/sim/wrappers/ravel_discrete_wrapper.py | ravel | Leonardo767/Abmarl | 7 | python | def ravel(space, point):
'\n \n '
return _ravel_helper(space, point)[0] | def ravel(space, point):
'\n \n '
return _ravel_helper(space, point)[0]<|docstring|>Ravel point in space to a single discrete value.<|endoftext|> |
8b7c243d4ffa132035b5818f764113a3362e924b368c1abbebbdcb08387efcea | def unravel(space, point):
'\n Unravel a single discrete point to a value in the space.\n '
if isinstance(space, Discrete):
return point
if isinstance(space, MultiDiscrete):
return [*np.unravel_index(point, space.nvec)]
if isinstance(space, MultiBinary):
return [*np.unravel_index(point, ([2] * space.n))]
if isinstance(space, Box):
space_helper = ((space.high + 1) - space.low).flatten()
return (np.reshape(np.unravel_index(point, space_helper), space.shape) + space.low)
elif isinstance(space, Dict):
dims = _nested_dim(space)
unravelled_point = unravel(MultiDiscrete(dims), point)
output = {}
for (i, (key, value)) in enumerate(space.spaces.items()):
output[key] = unravel(value, unravelled_point[i])
return output
elif isinstance(space, Tuple):
dims = _nested_dim(space)
unravelled_point = unravel(MultiDiscrete(dims), point)
output = []
for (i, value) in enumerate(space.spaces):
output.append(unravel(value, unravelled_point[i]))
return tuple(output) | Unravel a single discrete point to a value in the space. | abmarl/sim/wrappers/ravel_discrete_wrapper.py | unravel | Leonardo767/Abmarl | 7 | python | def unravel(space, point):
'\n \n '
if isinstance(space, Discrete):
return point
if isinstance(space, MultiDiscrete):
return [*np.unravel_index(point, space.nvec)]
if isinstance(space, MultiBinary):
return [*np.unravel_index(point, ([2] * space.n))]
if isinstance(space, Box):
space_helper = ((space.high + 1) - space.low).flatten()
return (np.reshape(np.unravel_index(point, space_helper), space.shape) + space.low)
elif isinstance(space, Dict):
dims = _nested_dim(space)
unravelled_point = unravel(MultiDiscrete(dims), point)
output = {}
for (i, (key, value)) in enumerate(space.spaces.items()):
output[key] = unravel(value, unravelled_point[i])
return output
elif isinstance(space, Tuple):
dims = _nested_dim(space)
unravelled_point = unravel(MultiDiscrete(dims), point)
output = []
for (i, value) in enumerate(space.spaces):
output.append(unravel(value, unravelled_point[i]))
return tuple(output) | def unravel(space, point):
'\n \n '
if isinstance(space, Discrete):
return point
if isinstance(space, MultiDiscrete):
return [*np.unravel_index(point, space.nvec)]
if isinstance(space, MultiBinary):
return [*np.unravel_index(point, ([2] * space.n))]
if isinstance(space, Box):
space_helper = ((space.high + 1) - space.low).flatten()
return (np.reshape(np.unravel_index(point, space_helper), space.shape) + space.low)
elif isinstance(space, Dict):
dims = _nested_dim(space)
unravelled_point = unravel(MultiDiscrete(dims), point)
output = {}
for (i, (key, value)) in enumerate(space.spaces.items()):
output[key] = unravel(value, unravelled_point[i])
return output
elif isinstance(space, Tuple):
dims = _nested_dim(space)
unravelled_point = unravel(MultiDiscrete(dims), point)
output = []
for (i, value) in enumerate(space.spaces):
output.append(unravel(value, unravelled_point[i]))
return tuple(output)<|docstring|>Unravel a single discrete point to a value in the space.<|endoftext|> |
40b381557de949e5010d27e7e18967b28b363ed448b9cec86cf907017a044788 | def ravel_space(space):
'\n Convert the space into a Discrete space.\n '
dims = _nested_dim_helper(space)
return Discrete(dims[0]) | Convert the space into a Discrete space. | abmarl/sim/wrappers/ravel_discrete_wrapper.py | ravel_space | Leonardo767/Abmarl | 7 | python | def ravel_space(space):
'\n \n '
dims = _nested_dim_helper(space)
return Discrete(dims[0]) | def ravel_space(space):
'\n \n '
dims = _nested_dim_helper(space)
return Discrete(dims[0])<|docstring|>Convert the space into a Discrete space.<|endoftext|> |
2b8040b3fe673d377af90427740eb780f94b2ba33f045255052599eed446c8c4 | def _isbounded(space):
"\n Gym Box converts np.inf to min and max values for integer types. As a result,\n Box.is_bounded doesn't work because it checks for inf, not for min/max values\n of that dtype. This function checks for min/max values of the dtype.\n "
return (space.is_bounded() and (not (space.low == np.iinfo(space.dtype).min).any()) and (not (space.low == np.iinfo(space.dtype).max).any()) and (not (space.high == np.iinfo(space.dtype).min).any()) and (not (space.high == np.iinfo(space.dtype).max).any())) | Gym Box converts np.inf to min and max values for integer types. As a result,
Box.is_bounded doesn't work because it checks for inf, not for min/max values
of that dtype. This function checks for min/max values of the dtype. | abmarl/sim/wrappers/ravel_discrete_wrapper.py | _isbounded | Leonardo767/Abmarl | 7 | python | def _isbounded(space):
"\n Gym Box converts np.inf to min and max values for integer types. As a result,\n Box.is_bounded doesn't work because it checks for inf, not for min/max values\n of that dtype. This function checks for min/max values of the dtype.\n "
return (space.is_bounded() and (not (space.low == np.iinfo(space.dtype).min).any()) and (not (space.low == np.iinfo(space.dtype).max).any()) and (not (space.high == np.iinfo(space.dtype).min).any()) and (not (space.high == np.iinfo(space.dtype).max).any())) | def _isbounded(space):
"\n Gym Box converts np.inf to min and max values for integer types. As a result,\n Box.is_bounded doesn't work because it checks for inf, not for min/max values\n of that dtype. This function checks for min/max values of the dtype.\n "
return (space.is_bounded() and (not (space.low == np.iinfo(space.dtype).min).any()) and (not (space.low == np.iinfo(space.dtype).max).any()) and (not (space.high == np.iinfo(space.dtype).min).any()) and (not (space.high == np.iinfo(space.dtype).max).any()))<|docstring|>Gym Box converts np.inf to min and max values for integer types. As a result,
Box.is_bounded doesn't work because it checks for inf, not for min/max values
of that dtype. This function checks for min/max values of the dtype.<|endoftext|> |
e2c2bd4a044da411045a9d306beb974d4416e8281c1294749d0275517f194de4 | def check_space(space):
'\n Ensure that the space is of type that can be ravelled to discrete value.\n '
if (isinstance(space, Discrete) or isinstance(space, MultiDiscrete) or isinstance(space, MultiBinary)):
return True
elif (isinstance(space, Box) and np.issubdtype(space, np.int) and _isbounded(space)):
return True
elif isinstance(space, Dict):
return all([check_space(sub_space) for sub_space in space.spaces.values()])
elif isinstance(space, Tuple):
return all([check_space(sub_space) for sub_space in space.spaces])
else:
return False | Ensure that the space is of type that can be ravelled to discrete value. | abmarl/sim/wrappers/ravel_discrete_wrapper.py | check_space | Leonardo767/Abmarl | 7 | python | def check_space(space):
'\n \n '
if (isinstance(space, Discrete) or isinstance(space, MultiDiscrete) or isinstance(space, MultiBinary)):
return True
elif (isinstance(space, Box) and np.issubdtype(space, np.int) and _isbounded(space)):
return True
elif isinstance(space, Dict):
return all([check_space(sub_space) for sub_space in space.spaces.values()])
elif isinstance(space, Tuple):
return all([check_space(sub_space) for sub_space in space.spaces])
else:
return False | def check_space(space):
'\n \n '
if (isinstance(space, Discrete) or isinstance(space, MultiDiscrete) or isinstance(space, MultiBinary)):
return True
elif (isinstance(space, Box) and np.issubdtype(space, np.int) and _isbounded(space)):
return True
elif isinstance(space, Dict):
return all([check_space(sub_space) for sub_space in space.spaces.values()])
elif isinstance(space, Tuple):
return all([check_space(sub_space) for sub_space in space.spaces])
else:
return False<|docstring|>Ensure that the space is of type that can be ravelled to discrete value.<|endoftext|> |
886540c6c6d41f80df4c26d2e594121e4795c82f782127c79ec5640c86c830b6 | @property
def users(self):
' Returns users in a tuple '
return list(self.new_users.keys()) | Returns users in a tuple | htpasswd/basic.py | users | willjp/htpasswd | 19 | python | @property
def users(self):
' '
return list(self.new_users.keys()) | @property
def users(self):
' '
return list(self.new_users.keys())<|docstring|>Returns users in a tuple<|endoftext|> |
4185993a993bc18a3e4ddcd0116a946e3ee8e2821de133e990287f1f7f295251 | def add(self, user, password):
' Adds a user with password '
if self.__contains__(user):
raise UserExists
self.new_users[user] = (self._encrypt_password(password) + '\n') | Adds a user with password | htpasswd/basic.py | add | willjp/htpasswd | 19 | python | def add(self, user, password):
' '
if self.__contains__(user):
raise UserExists
self.new_users[user] = (self._encrypt_password(password) + '\n') | def add(self, user, password):
' '
if self.__contains__(user):
raise UserExists
self.new_users[user] = (self._encrypt_password(password) + '\n')<|docstring|>Adds a user with password<|endoftext|> |
d862879aabc83f097293e91be48b761c39f24e63cc2033d1c0eb07517d0b9ba8 | def pop(self, user):
' Deletes a user '
if (not self.__contains__(user)):
raise UserNotExists
self.new_users.pop(user) | Deletes a user | htpasswd/basic.py | pop | willjp/htpasswd | 19 | python | def pop(self, user):
' '
if (not self.__contains__(user)):
raise UserNotExists
self.new_users.pop(user) | def pop(self, user):
' '
if (not self.__contains__(user)):
raise UserNotExists
self.new_users.pop(user)<|docstring|>Deletes a user<|endoftext|> |
8ff887171f038fe76ffa40d21f77506e8bb45e16fb4a98d68b7da2dde13c1eb7 | def change_password(self, user, password):
' Changes user password '
if (not self.__contains__(user)):
raise UserNotExists
self.new_users[user] = (self._encrypt_password(password) + '\n') | Changes user password | htpasswd/basic.py | change_password | willjp/htpasswd | 19 | python | def change_password(self, user, password):
' '
if (not self.__contains__(user)):
raise UserNotExists
self.new_users[user] = (self._encrypt_password(password) + '\n') | def change_password(self, user, password):
' '
if (not self.__contains__(user)):
raise UserNotExists
self.new_users[user] = (self._encrypt_password(password) + '\n')<|docstring|>Changes user password<|endoftext|> |
d7917cf0e39891f3fb5aa4f2d49286b8f3e203bea3f9a6821c023e4a424269fb | def _encrypt_password(self, password):
'encrypt the password for given mode '
if (self.encryption_mode.lower() == 'crypt'):
return self._crypt_password(password)
elif (self.encryption_mode.lower() == 'md5'):
return self._md5_password(password)
elif (self.encryption_mode.lower() == 'md5-base'):
return self._md5_base_password(password)
else:
raise UnknownEncryptionMode(self.encryption_mode) | encrypt the password for given mode | htpasswd/basic.py | _encrypt_password | willjp/htpasswd | 19 | python | def _encrypt_password(self, password):
' '
if (self.encryption_mode.lower() == 'crypt'):
return self._crypt_password(password)
elif (self.encryption_mode.lower() == 'md5'):
return self._md5_password(password)
elif (self.encryption_mode.lower() == 'md5-base'):
return self._md5_base_password(password)
else:
raise UnknownEncryptionMode(self.encryption_mode) | def _encrypt_password(self, password):
' '
if (self.encryption_mode.lower() == 'crypt'):
return self._crypt_password(password)
elif (self.encryption_mode.lower() == 'md5'):
return self._md5_password(password)
elif (self.encryption_mode.lower() == 'md5-base'):
return self._md5_base_password(password)
else:
raise UnknownEncryptionMode(self.encryption_mode)<|docstring|>encrypt the password for given mode<|endoftext|> |
cdaa177bbcc0b2bdfb5b6b6e4c61d9eee89ae4e769a759a07f8f86c10f0c2b2a | def _crypt_password(self, password):
' Crypts password '
def salt():
' Generates some salt '
symbols = (ascii_letters + digits)
return (choice(symbols) + choice(symbols))
return crypt(password, salt()) | Crypts password | htpasswd/basic.py | _crypt_password | willjp/htpasswd | 19 | python | def _crypt_password(self, password):
' '
def salt():
' Generates some salt '
symbols = (ascii_letters + digits)
return (choice(symbols) + choice(symbols))
return crypt(password, salt()) | def _crypt_password(self, password):
' '
def salt():
' Generates some salt '
symbols = (ascii_letters + digits)
return (choice(symbols) + choice(symbols))
return crypt(password, salt())<|docstring|>Crypts password<|endoftext|> |
7ba2646bfc16dd3d215f52df7b00c687a1563677265245dcd16ba2ac73bf3f1b | def _md5_password(self, password):
" Crypts password using openssl binary and MD5 (apache variant,\n 'apr1') encryption "
return subprocess.check_output(['openssl', 'passwd', '-apr1', password]).decode('utf-8').strip() | Crypts password using openssl binary and MD5 (apache variant,
'apr1') encryption | htpasswd/basic.py | _md5_password | willjp/htpasswd | 19 | python | def _md5_password(self, password):
" Crypts password using openssl binary and MD5 (apache variant,\n 'apr1') encryption "
return subprocess.check_output(['openssl', 'passwd', '-apr1', password]).decode('utf-8').strip() | def _md5_password(self, password):
" Crypts password using openssl binary and MD5 (apache variant,\n 'apr1') encryption "
return subprocess.check_output(['openssl', 'passwd', '-apr1', password]).decode('utf-8').strip()<|docstring|>Crypts password using openssl binary and MD5 (apache variant,
'apr1') encryption<|endoftext|> |
2357e192545659d0e04ed8f3c28f74a49aeb9b17b11c3242844189a4b873cff8 | def _md5_base_password(self, password):
' Crypts password using openssl binary and MD5 based encryption '
return subprocess.check_output(['openssl', 'passwd', '-1', password]).decode('utf-8').strip() | Crypts password using openssl binary and MD5 based encryption | htpasswd/basic.py | _md5_base_password | willjp/htpasswd | 19 | python | def _md5_base_password(self, password):
' '
return subprocess.check_output(['openssl', 'passwd', '-1', password]).decode('utf-8').strip() | def _md5_base_password(self, password):
' '
return subprocess.check_output(['openssl', 'passwd', '-1', password]).decode('utf-8').strip()<|docstring|>Crypts password using openssl binary and MD5 based encryption<|endoftext|> |
6b7e98b35ef0f0933aff8fd75cd7bcfdf2e3722f115e160395a43db8ce24f3d9 | def salt():
' Generates some salt '
symbols = (ascii_letters + digits)
return (choice(symbols) + choice(symbols)) | Generates some salt | htpasswd/basic.py | salt | willjp/htpasswd | 19 | python | def salt():
' '
symbols = (ascii_letters + digits)
return (choice(symbols) + choice(symbols)) | def salt():
' '
symbols = (ascii_letters + digits)
return (choice(symbols) + choice(symbols))<|docstring|>Generates some salt<|endoftext|> |
7dafbe2ed60b5d04aaca9f2aa4baa2ec41e2555639255751f726197fbca7d6b6 | def refreshAll(self):
"\n Set the text of lineEdit once it's valid\n "
self.Videocapture_ = '0' | Set the text of lineEdit once it's valid | mainwindow.py | refreshAll | vitalik-ez/Diplom | 0 | python | def refreshAll(self):
"\n \n "
self.Videocapture_ = '0' | def refreshAll(self):
"\n \n "
self.Videocapture_ = '0'<|docstring|>Set the text of lineEdit once it's valid<|endoftext|> |
b021fccdf18d63e4f1aa5ea187d71053a308a3d891a6b18bc68c890216d85350 | @pyqtSlot()
def runSlot(self):
'\n Called when the user presses the Run button\n '
print('Clicked Run')
self.refreshAll()
print(self.Videocapture_)
ui.hide()
self.outputWindow_() | Called when the user presses the Run button | mainwindow.py | runSlot | vitalik-ez/Diplom | 0 | python | @pyqtSlot()
def runSlot(self):
'\n \n '
print('Clicked Run')
self.refreshAll()
print(self.Videocapture_)
ui.hide()
self.outputWindow_() | @pyqtSlot()
def runSlot(self):
'\n \n '
print('Clicked Run')
self.refreshAll()
print(self.Videocapture_)
ui.hide()
self.outputWindow_()<|docstring|>Called when the user presses the Run button<|endoftext|> |
9ae81bcabf195317d124f4e5cb3749a2b19b91d12e0a651e0f805b4227081761 | def outputWindow_(self):
'\n Created new window for vidual output of the video in GUI\n '
self._new_window = Ui_OutputDialog()
self._new_window.show()
self._new_window.startVideo(self.Videocapture_)
print('Video Played') | Created new window for vidual output of the video in GUI | mainwindow.py | outputWindow_ | vitalik-ez/Diplom | 0 | python | def outputWindow_(self):
'\n \n '
self._new_window = Ui_OutputDialog()
self._new_window.show()
self._new_window.startVideo(self.Videocapture_)
print('Video Played') | def outputWindow_(self):
'\n \n '
self._new_window = Ui_OutputDialog()
self._new_window.show()
self._new_window.startVideo(self.Videocapture_)
print('Video Played')<|docstring|>Created new window for vidual output of the video in GUI<|endoftext|> |
bce2b62feabce5b0cbd622dfa5065e78fee60096623a3061ed14637c5c561581 | def SampleRandomPair(m):
'SampleRandomPair: Samples a pair [i,j] with 0<=i,j<=m-1 and i!=j, uniformly from the set of all such pairs\n It returns a list.\n \n >> SampleRandomPair(10)\n '
assert ((type(m) is int) and (m >= 2)), "'m' has to be an integer, which is >=2."
sample = list()
i = np.floor(np.random.uniform(0, m))
j = np.floor(np.random.uniform(0, (m - 1)))
if (j == i):
j = (j + 1)
sample.append(int(i))
sample.append(int(j))
return sample | SampleRandomPair: Samples a pair [i,j] with 0<=i,j<=m-1 and i!=j, uniformly from the set of all such pairs
It returns a list.
>> SampleRandomPair(10) | TestEnvironment.py | SampleRandomPair | bjoernhad/CondorcetWinnerTestification | 1 | python | def SampleRandomPair(m):
'SampleRandomPair: Samples a pair [i,j] with 0<=i,j<=m-1 and i!=j, uniformly from the set of all such pairs\n It returns a list.\n \n >> SampleRandomPair(10)\n '
assert ((type(m) is int) and (m >= 2)), "'m' has to be an integer, which is >=2."
sample = list()
i = np.floor(np.random.uniform(0, m))
j = np.floor(np.random.uniform(0, (m - 1)))
if (j == i):
j = (j + 1)
sample.append(int(i))
sample.append(int(j))
return sample | def SampleRandomPair(m):
'SampleRandomPair: Samples a pair [i,j] with 0<=i,j<=m-1 and i!=j, uniformly from the set of all such pairs\n It returns a list.\n \n >> SampleRandomPair(10)\n '
assert ((type(m) is int) and (m >= 2)), "'m' has to be an integer, which is >=2."
sample = list()
i = np.floor(np.random.uniform(0, m))
j = np.floor(np.random.uniform(0, (m - 1)))
if (j == i):
j = (j + 1)
sample.append(int(i))
sample.append(int(j))
return sample<|docstring|>SampleRandomPair: Samples a pair [i,j] with 0<=i,j<=m-1 and i!=j, uniformly from the set of all such pairs
It returns a list.
>> SampleRandomPair(10)<|endoftext|> |
80b705a89076fa9a37770bcc37c3344ea41cfa103cfb58ed8356eddf4ceb35e1 | def __init__(self, P, N=False, R=False):
'TestEnvironment: Models the Dueling Bandits Setting. The ground-truth probabilities of alternative i winning against j is represented by a Reciprocal Relation P. \n Moreover, it tracks the information N[i,j] how often alternatives i and j are compared \n as well as the number R[i,j] how often alternative i won against j.\n The `time` is the total number of comparisons currently made, i.e. time = \\sum_{i,j} R[i,j]"\n P: Reciprocal Relation, in which the (i,j)-entry P.getEntry([i,j]) denotes the probability that i is preferred to j \n N: Either `False` or a NumPy array of size (m,m) with N[i,i]=0 and N[i,j]=N[j,i] for all i,j.\n R: Either `False` or a NumPy array of size (m,m) with R[i,j]+R[j,i]=N[i,j] for all i,j\n \n >> P=rr.sampleWST(3)\n >> N=np.array([[0,2,1],[2,0,1],[1,1,0]])\n >> R=np.array([[0,1,1],[1,0,0],[0,1,0]])\n >> TE = TestEnvironment(P,N,R)\n '
assert (type(P) is rr.ReciprocalRelation), '`P` has to be a Reciprocal Relation'
assert (((type(N) == bool) and (N == False)) or ((type(N) is np.ndarray) and (N.shape == (P.m, P.m)))), 'N either has to be `False` or a NumPy array of size (m,m) with N[i,i] = 0 and N[i,j]=N[j,i] for all distinct i,j. (Here, m is the number of alternatives, i.e. m=P.m)'
assert (((type(N) == bool) and (N == False)) or (all(((N[(i, i)] == 0) for i in range(0, P.m))) and ((N - np.matrix.transpose(N)) == np.zeros(N.shape)).all())), '`N` has to fulfill N[i,i] = 0 and N[i,j]=N[j,i] for all distinct i,j.'
assert (((type(R) == bool) and (R == False)) or ((type(R) is np.ndarray) and (type(N) is np.ndarray) and (R.shape == (P.m, P.m)) and ((R + np.matrix.transpose(R)) == N).all())), '`R` has to be `False` or a NumPy array of size (m,m) with R[i,j]+R[j,i]=N[i,j] for all i,j. ((Here, m is the number of alternatives, i.e. m=P.m)'
self.P = P
if (type(N) == bool):
self.N = np.array((np.zeros((P.m, P.m)) + 1), dtype=int)
for i in range(0, P.m):
self.N[(i, i)] = 0
else:
self.N = N.astype(int)
if (type(R) == bool):
self.R = np.random.binomial(self.N, self.P.Q)
for i in range(0, self.P.m):
self.R[(i, i)]
for j in range((i + 1), self.P.m):
self.R[(j, i)] = (self.N[(i, j)] - self.R[(i, j)])
else:
self.R = R.astype(int)
self.time = int(np.sum(self.R)) | TestEnvironment: Models the Dueling Bandits Setting. The ground-truth probabilities of alternative i winning against j is represented by a Reciprocal Relation P.
Moreover, it tracks the information N[i,j] how often alternatives i and j are compared
as well as the number R[i,j] how often alternative i won against j.
The `time` is the total number of comparisons currently made, i.e. time = \sum_{i,j} R[i,j]"
P: Reciprocal Relation, in which the (i,j)-entry P.getEntry([i,j]) denotes the probability that i is preferred to j
N: Either `False` or a NumPy array of size (m,m) with N[i,i]=0 and N[i,j]=N[j,i] for all i,j.
R: Either `False` or a NumPy array of size (m,m) with R[i,j]+R[j,i]=N[i,j] for all i,j
>> P=rr.sampleWST(3)
>> N=np.array([[0,2,1],[2,0,1],[1,1,0]])
>> R=np.array([[0,1,1],[1,0,0],[0,1,0]])
>> TE = TestEnvironment(P,N,R) | TestEnvironment.py | __init__ | bjoernhad/CondorcetWinnerTestification | 1 | python | def __init__(self, P, N=False, R=False):
'TestEnvironment: Models the Dueling Bandits Setting. The ground-truth probabilities of alternative i winning against j is represented by a Reciprocal Relation P. \n Moreover, it tracks the information N[i,j] how often alternatives i and j are compared \n as well as the number R[i,j] how often alternative i won against j.\n The `time` is the total number of comparisons currently made, i.e. time = \\sum_{i,j} R[i,j]"\n P: Reciprocal Relation, in which the (i,j)-entry P.getEntry([i,j]) denotes the probability that i is preferred to j \n N: Either `False` or a NumPy array of size (m,m) with N[i,i]=0 and N[i,j]=N[j,i] for all i,j.\n R: Either `False` or a NumPy array of size (m,m) with R[i,j]+R[j,i]=N[i,j] for all i,j\n \n >> P=rr.sampleWST(3)\n >> N=np.array([[0,2,1],[2,0,1],[1,1,0]])\n >> R=np.array([[0,1,1],[1,0,0],[0,1,0]])\n >> TE = TestEnvironment(P,N,R)\n '
assert (type(P) is rr.ReciprocalRelation), '`P` has to be a Reciprocal Relation'
assert (((type(N) == bool) and (N == False)) or ((type(N) is np.ndarray) and (N.shape == (P.m, P.m)))), 'N either has to be `False` or a NumPy array of size (m,m) with N[i,i] = 0 and N[i,j]=N[j,i] for all distinct i,j. (Here, m is the number of alternatives, i.e. m=P.m)'
assert (((type(N) == bool) and (N == False)) or (all(((N[(i, i)] == 0) for i in range(0, P.m))) and ((N - np.matrix.transpose(N)) == np.zeros(N.shape)).all())), '`N` has to fulfill N[i,i] = 0 and N[i,j]=N[j,i] for all distinct i,j.'
assert (((type(R) == bool) and (R == False)) or ((type(R) is np.ndarray) and (type(N) is np.ndarray) and (R.shape == (P.m, P.m)) and ((R + np.matrix.transpose(R)) == N).all())), '`R` has to be `False` or a NumPy array of size (m,m) with R[i,j]+R[j,i]=N[i,j] for all i,j. ((Here, m is the number of alternatives, i.e. m=P.m)'
self.P = P
if (type(N) == bool):
self.N = np.array((np.zeros((P.m, P.m)) + 1), dtype=int)
for i in range(0, P.m):
self.N[(i, i)] = 0
else:
self.N = N.astype(int)
if (type(R) == bool):
self.R = np.random.binomial(self.N, self.P.Q)
for i in range(0, self.P.m):
self.R[(i, i)]
for j in range((i + 1), self.P.m):
self.R[(j, i)] = (self.N[(i, j)] - self.R[(i, j)])
else:
self.R = R.astype(int)
self.time = int(np.sum(self.R)) | def __init__(self, P, N=False, R=False):
'TestEnvironment: Models the Dueling Bandits Setting. The ground-truth probabilities of alternative i winning against j is represented by a Reciprocal Relation P. \n Moreover, it tracks the information N[i,j] how often alternatives i and j are compared \n as well as the number R[i,j] how often alternative i won against j.\n The `time` is the total number of comparisons currently made, i.e. time = \\sum_{i,j} R[i,j]"\n P: Reciprocal Relation, in which the (i,j)-entry P.getEntry([i,j]) denotes the probability that i is preferred to j \n N: Either `False` or a NumPy array of size (m,m) with N[i,i]=0 and N[i,j]=N[j,i] for all i,j.\n R: Either `False` or a NumPy array of size (m,m) with R[i,j]+R[j,i]=N[i,j] for all i,j\n \n >> P=rr.sampleWST(3)\n >> N=np.array([[0,2,1],[2,0,1],[1,1,0]])\n >> R=np.array([[0,1,1],[1,0,0],[0,1,0]])\n >> TE = TestEnvironment(P,N,R)\n '
assert (type(P) is rr.ReciprocalRelation), '`P` has to be a Reciprocal Relation'
assert (((type(N) == bool) and (N == False)) or ((type(N) is np.ndarray) and (N.shape == (P.m, P.m)))), 'N either has to be `False` or a NumPy array of size (m,m) with N[i,i] = 0 and N[i,j]=N[j,i] for all distinct i,j. (Here, m is the number of alternatives, i.e. m=P.m)'
assert (((type(N) == bool) and (N == False)) or (all(((N[(i, i)] == 0) for i in range(0, P.m))) and ((N - np.matrix.transpose(N)) == np.zeros(N.shape)).all())), '`N` has to fulfill N[i,i] = 0 and N[i,j]=N[j,i] for all distinct i,j.'
assert (((type(R) == bool) and (R == False)) or ((type(R) is np.ndarray) and (type(N) is np.ndarray) and (R.shape == (P.m, P.m)) and ((R + np.matrix.transpose(R)) == N).all())), '`R` has to be `False` or a NumPy array of size (m,m) with R[i,j]+R[j,i]=N[i,j] for all i,j. ((Here, m is the number of alternatives, i.e. m=P.m)'
self.P = P
if (type(N) == bool):
self.N = np.array((np.zeros((P.m, P.m)) + 1), dtype=int)
for i in range(0, P.m):
self.N[(i, i)] = 0
else:
self.N = N.astype(int)
if (type(R) == bool):
self.R = np.random.binomial(self.N, self.P.Q)
for i in range(0, self.P.m):
self.R[(i, i)]
for j in range((i + 1), self.P.m):
self.R[(j, i)] = (self.N[(i, j)] - self.R[(i, j)])
else:
self.R = R.astype(int)
self.time = int(np.sum(self.R))<|docstring|>TestEnvironment: Models the Dueling Bandits Setting. The ground-truth probabilities of alternative i winning against j is represented by a Reciprocal Relation P.
Moreover, it tracks the information N[i,j] how often alternatives i and j are compared
as well as the number R[i,j] how often alternative i won against j.
The `time` is the total number of comparisons currently made, i.e. time = \sum_{i,j} R[i,j]"
P: Reciprocal Relation, in which the (i,j)-entry P.getEntry([i,j]) denotes the probability that i is preferred to j
N: Either `False` or a NumPy array of size (m,m) with N[i,i]=0 and N[i,j]=N[j,i] for all i,j.
R: Either `False` or a NumPy array of size (m,m) with R[i,j]+R[j,i]=N[i,j] for all i,j
>> P=rr.sampleWST(3)
>> N=np.array([[0,2,1],[2,0,1],[1,1,0]])
>> R=np.array([[0,1,1],[1,0,0],[0,1,0]])
>> TE = TestEnvironment(P,N,R)<|endoftext|> |
2ff07b30ab129c1974da3ab244160b8c90cdfbefa81e1ee9f16d40f6112c8d7f | def show(self):
'show: Method to show the internal statistics P,N,R and time. For Debugging.\n \n >> TE=TestEnvironment(rr.sampleWST(4))\n >> TE.show()\n '
print('The (current) values of P,N,R and time are:\n', self.P.getRel(), ',\n', self.N, ',\n', self.R, ',\n', self.time) | show: Method to show the internal statistics P,N,R and time. For Debugging.
>> TE=TestEnvironment(rr.sampleWST(4))
>> TE.show() | TestEnvironment.py | show | bjoernhad/CondorcetWinnerTestification | 1 | python | def show(self):
'show: Method to show the internal statistics P,N,R and time. For Debugging.\n \n >> TE=TestEnvironment(rr.sampleWST(4))\n >> TE.show()\n '
print('The (current) values of P,N,R and time are:\n', self.P.getRel(), ',\n', self.N, ',\n', self.R, ',\n', self.time) | def show(self):
'show: Method to show the internal statistics P,N,R and time. For Debugging.\n \n >> TE=TestEnvironment(rr.sampleWST(4))\n >> TE.show()\n '
print('The (current) values of P,N,R and time are:\n', self.P.getRel(), ',\n', self.N, ',\n', self.R, ',\n', self.time)<|docstring|>show: Method to show the internal statistics P,N,R and time. For Debugging.
>> TE=TestEnvironment(rr.sampleWST(4))
>> TE.show()<|endoftext|> |
12ead27095125137f5f3157801177d96120b1ff986be492202460393e520d393 | def pullArmPair(self, i, j):
'pullArmPair: Models one comparison between alternative i and alternative j\n i: integer in 0,...,m-1 (m: number of alternatives)\n j: integer in 0,...,m-1 (m: number of alternatives)\n Returns "1" if i is the winner and "0" if j is the winner of the duel.\n \n >> TE=TestEnvironment(rr.sampleWST(4))\n >> TE.pullArmPair(1,2)\n '
assert ((type(i) == int) and (0 <= i) and (i < self.P.m)), '`i` has to be an integer in 0,...,m-1 (m: number of alternatives)'
assert ((type(j) == int) and (0 <= j) and (j < self.P.m)), '`j` has to be an integer in 0,...,m-1 (m: number of alternatives)'
assert (i != j), 'i and j have to be two DISTINCT arms.'
self.N[(i, j)] += 1
self.N[(j, i)] += 1
winner = np.random.binomial(1, self.P.Q[(i, j)])
self.R[(i, j)] += winner
self.R[(j, i)] += (1 - winner)
self.time += 1
return winner | pullArmPair: Models one comparison between alternative i and alternative j
i: integer in 0,...,m-1 (m: number of alternatives)
j: integer in 0,...,m-1 (m: number of alternatives)
Returns "1" if i is the winner and "0" if j is the winner of the duel.
>> TE=TestEnvironment(rr.sampleWST(4))
>> TE.pullArmPair(1,2) | TestEnvironment.py | pullArmPair | bjoernhad/CondorcetWinnerTestification | 1 | python | def pullArmPair(self, i, j):
'pullArmPair: Models one comparison between alternative i and alternative j\n i: integer in 0,...,m-1 (m: number of alternatives)\n j: integer in 0,...,m-1 (m: number of alternatives)\n Returns "1" if i is the winner and "0" if j is the winner of the duel.\n \n >> TE=TestEnvironment(rr.sampleWST(4))\n >> TE.pullArmPair(1,2)\n '
assert ((type(i) == int) and (0 <= i) and (i < self.P.m)), '`i` has to be an integer in 0,...,m-1 (m: number of alternatives)'
assert ((type(j) == int) and (0 <= j) and (j < self.P.m)), '`j` has to be an integer in 0,...,m-1 (m: number of alternatives)'
assert (i != j), 'i and j have to be two DISTINCT arms.'
self.N[(i, j)] += 1
self.N[(j, i)] += 1
winner = np.random.binomial(1, self.P.Q[(i, j)])
self.R[(i, j)] += winner
self.R[(j, i)] += (1 - winner)
self.time += 1
return winner | def pullArmPair(self, i, j):
'pullArmPair: Models one comparison between alternative i and alternative j\n i: integer in 0,...,m-1 (m: number of alternatives)\n j: integer in 0,...,m-1 (m: number of alternatives)\n Returns "1" if i is the winner and "0" if j is the winner of the duel.\n \n >> TE=TestEnvironment(rr.sampleWST(4))\n >> TE.pullArmPair(1,2)\n '
assert ((type(i) == int) and (0 <= i) and (i < self.P.m)), '`i` has to be an integer in 0,...,m-1 (m: number of alternatives)'
assert ((type(j) == int) and (0 <= j) and (j < self.P.m)), '`j` has to be an integer in 0,...,m-1 (m: number of alternatives)'
assert (i != j), 'i and j have to be two DISTINCT arms.'
self.N[(i, j)] += 1
self.N[(j, i)] += 1
winner = np.random.binomial(1, self.P.Q[(i, j)])
self.R[(i, j)] += winner
self.R[(j, i)] += (1 - winner)
self.time += 1
return winner<|docstring|>pullArmPair: Models one comparison between alternative i and alternative j
i: integer in 0,...,m-1 (m: number of alternatives)
j: integer in 0,...,m-1 (m: number of alternatives)
Returns "1" if i is the winner and "0" if j is the winner of the duel.
>> TE=TestEnvironment(rr.sampleWST(4))
>> TE.pullArmPair(1,2)<|endoftext|> |
f13620064318099595b81e439625e1182f1d48487fc8039ca0fa3b16593d38cc | def pullRandomArmPair(self):
'pullRandomArmPair: Samples an arm uniformly at random from the set of all possible arms and and pulls it. Returns the result in form of the lista list "[pair,winner] = [[pair[0],pair[1]],winner]".\n \n >> TE=TestEnvironment(rr.sampleWST(4))\n >> TE.pullRandomArmPair()\n '
pair = SampleRandomPair(self.P.m)
winner = self.pullArmPair(pair[0], pair[1])
return [pair, winner] | pullRandomArmPair: Samples an arm uniformly at random from the set of all possible arms and and pulls it. Returns the result in form of the lista list "[pair,winner] = [[pair[0],pair[1]],winner]".
>> TE=TestEnvironment(rr.sampleWST(4))
>> TE.pullRandomArmPair() | TestEnvironment.py | pullRandomArmPair | bjoernhad/CondorcetWinnerTestification | 1 | python | def pullRandomArmPair(self):
'pullRandomArmPair: Samples an arm uniformly at random from the set of all possible arms and and pulls it. Returns the result in form of the lista list "[pair,winner] = [[pair[0],pair[1]],winner]".\n \n >> TE=TestEnvironment(rr.sampleWST(4))\n >> TE.pullRandomArmPair()\n '
pair = SampleRandomPair(self.P.m)
winner = self.pullArmPair(pair[0], pair[1])
return [pair, winner] | def pullRandomArmPair(self):
'pullRandomArmPair: Samples an arm uniformly at random from the set of all possible arms and and pulls it. Returns the result in form of the lista list "[pair,winner] = [[pair[0],pair[1]],winner]".\n \n >> TE=TestEnvironment(rr.sampleWST(4))\n >> TE.pullRandomArmPair()\n '
pair = SampleRandomPair(self.P.m)
winner = self.pullArmPair(pair[0], pair[1])
return [pair, winner]<|docstring|>pullRandomArmPair: Samples an arm uniformly at random from the set of all possible arms and and pulls it. Returns the result in form of the lista list "[pair,winner] = [[pair[0],pair[1]],winner]".
>> TE=TestEnvironment(rr.sampleWST(4))
>> TE.pullRandomArmPair()<|endoftext|> |
ff037ee55eca10c34dd8c8fc730a78df6b5af13697d527b92e76a3b52182bd10 | def pullAllArmPairs(self, number_of_times=1):
'pullAllArmPairs: Pulls each pairs of arms `number_of_times` often\n number_of_times: positive integer \n \n >> TE=TestEnvironment(rr.sampleWST(4))\n >> TE.pullAllArmPairs(3)\n '
bufN = (number_of_times * np.ones((self.P.m, self.P.m)))
for i in range(0, self.P.m):
bufN[(i, i)] = 0
bufR = np.random.binomial(bufN.astype(int), self.P.Q)
for i in range(0, self.P.m):
bufR[(i, i)] = 0
for j in range((i + 1), self.P.m):
bufR[(j, i)] = (number_of_times - bufR[(i, j)])
self.__init__(self.P, (self.N + bufN).astype(int), (self.R + bufR).astype(int)) | pullAllArmPairs: Pulls each pairs of arms `number_of_times` often
number_of_times: positive integer
>> TE=TestEnvironment(rr.sampleWST(4))
>> TE.pullAllArmPairs(3) | TestEnvironment.py | pullAllArmPairs | bjoernhad/CondorcetWinnerTestification | 1 | python | def pullAllArmPairs(self, number_of_times=1):
'pullAllArmPairs: Pulls each pairs of arms `number_of_times` often\n number_of_times: positive integer \n \n >> TE=TestEnvironment(rr.sampleWST(4))\n >> TE.pullAllArmPairs(3)\n '
bufN = (number_of_times * np.ones((self.P.m, self.P.m)))
for i in range(0, self.P.m):
bufN[(i, i)] = 0
bufR = np.random.binomial(bufN.astype(int), self.P.Q)
for i in range(0, self.P.m):
bufR[(i, i)] = 0
for j in range((i + 1), self.P.m):
bufR[(j, i)] = (number_of_times - bufR[(i, j)])
self.__init__(self.P, (self.N + bufN).astype(int), (self.R + bufR).astype(int)) | def pullAllArmPairs(self, number_of_times=1):
'pullAllArmPairs: Pulls each pairs of arms `number_of_times` often\n number_of_times: positive integer \n \n >> TE=TestEnvironment(rr.sampleWST(4))\n >> TE.pullAllArmPairs(3)\n '
bufN = (number_of_times * np.ones((self.P.m, self.P.m)))
for i in range(0, self.P.m):
bufN[(i, i)] = 0
bufR = np.random.binomial(bufN.astype(int), self.P.Q)
for i in range(0, self.P.m):
bufR[(i, i)] = 0
for j in range((i + 1), self.P.m):
bufR[(j, i)] = (number_of_times - bufR[(i, j)])
self.__init__(self.P, (self.N + bufN).astype(int), (self.R + bufR).astype(int))<|docstring|>pullAllArmPairs: Pulls each pairs of arms `number_of_times` often
number_of_times: positive integer
>> TE=TestEnvironment(rr.sampleWST(4))
>> TE.pullAllArmPairs(3)<|endoftext|> |
452842a4ae60d64f84f724def4309f04b26b0a4f2a969758a86fd75f6f71ce3e | def open_all_files_with_ext(dirs, ext: str) -> list:
'\n Retrieve all filenames ending ext\n '
fnames = []
for dir_ in tqdm(dirs):
for (root, subs, files) in os.walk(dir_):
for fn in files:
if fn.lower().endswith(ext):
fnames.append(os.path.join(root, fn))
return fnames | Retrieve all filenames ending ext | mu7ron/utils.py | open_all_files_with_ext | eM7RON/mu7RON | 0 | python | def open_all_files_with_ext(dirs, ext: str) -> list:
'\n \n '
fnames = []
for dir_ in tqdm(dirs):
for (root, subs, files) in os.walk(dir_):
for fn in files:
if fn.lower().endswith(ext):
fnames.append(os.path.join(root, fn))
return fnames | def open_all_files_with_ext(dirs, ext: str) -> list:
'\n \n '
fnames = []
for dir_ in tqdm(dirs):
for (root, subs, files) in os.walk(dir_):
for fn in files:
if fn.lower().endswith(ext):
fnames.append(os.path.join(root, fn))
return fnames<|docstring|>Retrieve all filenames ending ext<|endoftext|> |
0f73eb3345c4d6455c787ef8252fc34cb5393259be80a5bfccdeda232c69f94e | def tstamp(name: str='mugen', fmt: str='%d_%b_%Y_%H-%M-%S'):
"\n Concatenates the current date & time to a string.\n Format: '%d_%b_%Y_%H-%M-%S'\n "
return f'{name}_{datetime.datetime.now().strftime(fmt)}' | Concatenates the current date & time to a string.
Format: '%d_%b_%Y_%H-%M-%S' | mu7ron/utils.py | tstamp | eM7RON/mu7RON | 0 | python | def tstamp(name: str='mugen', fmt: str='%d_%b_%Y_%H-%M-%S'):
"\n Concatenates the current date & time to a string.\n Format: '%d_%b_%Y_%H-%M-%S'\n "
return f'{name}_{datetime.datetime.now().strftime(fmt)}' | def tstamp(name: str='mugen', fmt: str='%d_%b_%Y_%H-%M-%S'):
"\n Concatenates the current date & time to a string.\n Format: '%d_%b_%Y_%H-%M-%S'\n "
return f'{name}_{datetime.datetime.now().strftime(fmt)}'<|docstring|>Concatenates the current date & time to a string.
Format: '%d_%b_%Y_%H-%M-%S'<|endoftext|> |
e717482e53da084ecb945d66508d2e8f22aa0b8c45f1ba24e14c4bbe78f2069f | def safe_len(x):
'\n safely returns the length of an object without throwing an exception\n if the object is a number\n '
try:
ret = len(x)
except TypeError:
ret = False
return ret | safely returns the length of an object without throwing an exception
if the object is a number | mu7ron/utils.py | safe_len | eM7RON/mu7RON | 0 | python | def safe_len(x):
'\n safely returns the length of an object without throwing an exception\n if the object is a number\n '
try:
ret = len(x)
except TypeError:
ret = False
return ret | def safe_len(x):
'\n safely returns the length of an object without throwing an exception\n if the object is a number\n '
try:
ret = len(x)
except TypeError:
ret = False
return ret<|docstring|>safely returns the length of an object without throwing an exception
if the object is a number<|endoftext|> |
f34a649d6d94e993123112e77e88618607568fa476ae998064066a80681cf8dd | def flatten(alist, depth=0):
'\n A generator that flattens nested containers (list, tuple, set, np.ndarray) of any nested degree\n '
if (depth is 1):
for sublist in alist:
for item in sublist:
(yield item)
else:
for item in alist:
if (isinstance(item, (list, tuple, set, np.ndarray)) and (not isinstance(item, (str, bytes)))):
(yield from flatten(item))
else:
(yield item) | A generator that flattens nested containers (list, tuple, set, np.ndarray) of any nested degree | mu7ron/utils.py | flatten | eM7RON/mu7RON | 0 | python | def flatten(alist, depth=0):
'\n \n '
if (depth is 1):
for sublist in alist:
for item in sublist:
(yield item)
else:
for item in alist:
if (isinstance(item, (list, tuple, set, np.ndarray)) and (not isinstance(item, (str, bytes)))):
(yield from flatten(item))
else:
(yield item) | def flatten(alist, depth=0):
'\n \n '
if (depth is 1):
for sublist in alist:
for item in sublist:
(yield item)
else:
for item in alist:
if (isinstance(item, (list, tuple, set, np.ndarray)) and (not isinstance(item, (str, bytes)))):
(yield from flatten(item))
else:
(yield item)<|docstring|>A generator that flattens nested containers (list, tuple, set, np.ndarray) of any nested degree<|endoftext|> |
b10a075db510705c35ece80aa07eec3612ba4b5ca9146a555e2ecb6a2f9b5838 | def play(x, t=None):
'\n A quick way to play a midi.Track or midi.Pattern. t=number\n of seconds to play the sequence.\n '
if isinstance(x, str):
sname = x
else:
if isinstance(x, midi.Track):
ptrn = midi.Pattern(format=1, resolution=480, tick_relative=True)
ptrn.append(x)
elif isinstance(x, midi.Pattern):
ptrn = x
else:
raise TypeError
working_dir = ''
for s in ['data', 'midi', 'temp', 'working']:
working_dir = os.path.join(working_dir, s)
if (not os.path.isdir(working_dir)):
os.mkdir(working_dir)
valid = False
while (not valid):
i = 0
sname = os.path.join(working_dir, f'temp{i}.mid')
try:
if os.path.exists(sname):
os.remove(sname)
except PermissionError:
i += 1
else:
break
midi.write_midifile(sname, ptrn)
pygame.init()
pygame.mixer.music.load(sname)
if (t is not None):
t_end = (time.time() + t)
pygame.mixer.music.play()
while (time.time() < t_end):
pass
pygame.mixer.music.stop()
else:
pygame.mixer.music.play() | A quick way to play a midi.Track or midi.Pattern. t=number
of seconds to play the sequence. | mu7ron/utils.py | play | eM7RON/mu7RON | 0 | python | def play(x, t=None):
'\n A quick way to play a midi.Track or midi.Pattern. t=number\n of seconds to play the sequence.\n '
if isinstance(x, str):
sname = x
else:
if isinstance(x, midi.Track):
ptrn = midi.Pattern(format=1, resolution=480, tick_relative=True)
ptrn.append(x)
elif isinstance(x, midi.Pattern):
ptrn = x
else:
raise TypeError
working_dir =
for s in ['data', 'midi', 'temp', 'working']:
working_dir = os.path.join(working_dir, s)
if (not os.path.isdir(working_dir)):
os.mkdir(working_dir)
valid = False
while (not valid):
i = 0
sname = os.path.join(working_dir, f'temp{i}.mid')
try:
if os.path.exists(sname):
os.remove(sname)
except PermissionError:
i += 1
else:
break
midi.write_midifile(sname, ptrn)
pygame.init()
pygame.mixer.music.load(sname)
if (t is not None):
t_end = (time.time() + t)
pygame.mixer.music.play()
while (time.time() < t_end):
pass
pygame.mixer.music.stop()
else:
pygame.mixer.music.play() | def play(x, t=None):
'\n A quick way to play a midi.Track or midi.Pattern. t=number\n of seconds to play the sequence.\n '
if isinstance(x, str):
sname = x
else:
if isinstance(x, midi.Track):
ptrn = midi.Pattern(format=1, resolution=480, tick_relative=True)
ptrn.append(x)
elif isinstance(x, midi.Pattern):
ptrn = x
else:
raise TypeError
working_dir =
for s in ['data', 'midi', 'temp', 'working']:
working_dir = os.path.join(working_dir, s)
if (not os.path.isdir(working_dir)):
os.mkdir(working_dir)
valid = False
while (not valid):
i = 0
sname = os.path.join(working_dir, f'temp{i}.mid')
try:
if os.path.exists(sname):
os.remove(sname)
except PermissionError:
i += 1
else:
break
midi.write_midifile(sname, ptrn)
pygame.init()
pygame.mixer.music.load(sname)
if (t is not None):
t_end = (time.time() + t)
pygame.mixer.music.play()
while (time.time() < t_end):
pass
pygame.mixer.music.stop()
else:
pygame.mixer.music.play()<|docstring|>A quick way to play a midi.Track or midi.Pattern. t=number
of seconds to play the sequence.<|endoftext|> |
89c653b81cfd498749617b94bfea04f8aaeb76e1daadd83bbd928c40f892f7f1 | def trck_gen(x):
'\n loops through each track in x\n '
if isinstance(x, midi.Pattern):
for trck in x:
(yield trck)
elif isinstance(x, MidiObj):
for trck in x.ptrn:
(yield trck)
elif isinstance(x[0], MidiObj):
for obj in x:
for trck in obj.ptrn:
(yield trck)
else:
for ptrn in x:
for trck in ptrn:
(yield trck) | loops through each track in x | mu7ron/utils.py | trck_gen | eM7RON/mu7RON | 0 | python | def trck_gen(x):
'\n \n '
if isinstance(x, midi.Pattern):
for trck in x:
(yield trck)
elif isinstance(x, MidiObj):
for trck in x.ptrn:
(yield trck)
elif isinstance(x[0], MidiObj):
for obj in x:
for trck in obj.ptrn:
(yield trck)
else:
for ptrn in x:
for trck in ptrn:
(yield trck) | def trck_gen(x):
'\n \n '
if isinstance(x, midi.Pattern):
for trck in x:
(yield trck)
elif isinstance(x, MidiObj):
for trck in x.ptrn:
(yield trck)
elif isinstance(x[0], MidiObj):
for obj in x:
for trck in obj.ptrn:
(yield trck)
else:
for ptrn in x:
for trck in ptrn:
(yield trck)<|docstring|>loops through each track in x<|endoftext|> |
d92026c71622863b3ac7a8413913686d2fcfdf2ad79b3bf4445729163fb6b061 | def evnt_gen(x):
'\n loops through each event in x\n '
if isinstance(x, midi.Track):
for evnt in x:
(yield evnt)
else:
for trck in trck_gen(x):
for evnt in trck:
(yield evnt) | loops through each event in x | mu7ron/utils.py | evnt_gen | eM7RON/mu7RON | 0 | python | def evnt_gen(x):
'\n \n '
if isinstance(x, midi.Track):
for evnt in x:
(yield evnt)
else:
for trck in trck_gen(x):
for evnt in trck:
(yield evnt) | def evnt_gen(x):
'\n \n '
if isinstance(x, midi.Track):
for evnt in x:
(yield evnt)
else:
for trck in trck_gen(x):
for evnt in trck:
(yield evnt)<|docstring|>loops through each event in x<|endoftext|> |
8cd6b579e897bc23822777a2577c10bcdfc08a9b430d6156f0f18b525d4c2733 | def counter(alist, func=None):
'\n - counts the number of things in a list\n - can apply a function (func) to item\n '
adict = {}
for item in alist:
if (func is not None):
item = func(item)
if (item is not None):
adict[item] = (adict.get(item, 0) + 1)
return adict | - counts the number of things in a list
- can apply a function (func) to item | mu7ron/utils.py | counter | eM7RON/mu7RON | 0 | python | def counter(alist, func=None):
'\n - counts the number of things in a list\n - can apply a function (func) to item\n '
adict = {}
for item in alist:
if (func is not None):
item = func(item)
if (item is not None):
adict[item] = (adict.get(item, 0) + 1)
return adict | def counter(alist, func=None):
'\n - counts the number of things in a list\n - can apply a function (func) to item\n '
adict = {}
for item in alist:
if (func is not None):
item = func(item)
if (item is not None):
adict[item] = (adict.get(item, 0) + 1)
return adict<|docstring|>- counts the number of things in a list
- can apply a function (func) to item<|endoftext|> |
e301f74fa2ed648a5c3dfcaf80d411b584a88308d9b8dc221b6df48f6fe12e9f | def nptf(x):
'\n Negative Power To Fraction\n For converting the second value in midi.TimesignatureEvent data from \n a negative power to a fraction\n '
return round((1 // (2 ** (- x)))) | Negative Power To Fraction
For converting the second value in midi.TimesignatureEvent data from
a negative power to a fraction | mu7ron/utils.py | nptf | eM7RON/mu7RON | 0 | python | def nptf(x):
'\n Negative Power To Fraction\n For converting the second value in midi.TimesignatureEvent data from \n a negative power to a fraction\n '
return round((1 // (2 ** (- x)))) | def nptf(x):
'\n Negative Power To Fraction\n For converting the second value in midi.TimesignatureEvent data from \n a negative power to a fraction\n '
return round((1 // (2 ** (- x))))<|docstring|>Negative Power To Fraction
For converting the second value in midi.TimesignatureEvent data from
a negative power to a fraction<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.