body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
f3fe689c45f0e47b5af7de3373ad0ba17ebff02c9534a334be69cc6e91b09054 | def bank(self, Delta_dB=50.0):
'Function [RIR_cell] = ISM_RIR_bank(setupstruc,RIRFileName,varargin)\n\n ISM_RIR_bank Bank of RIRs using Lehmann & Johansson\'s image-source method\n\n [RIR_CELL] = ISM_RIR_bank(SETUP_STRUC,RIR_FILE_NAME)\n\n This function generates a bank of room impulse responses (RIRs) for a\n particular user-defined room setup, using Lehmann and Johansson\'s\n implementation of the image-source method (see: "Prediction of energy\n decay in room impulse responses simulated with an image-source model", J.\n Acoust. Soc. Am., vol. 124(1), pp. 269-277, July 2008). The input\n SETUP_STRUC is a structure of enviromental parameters containing the\n following fields:\n\n Fs: sampling frequency (in Hz).\n room: 1-by-3 vector of enclosure dimensions (in m),\n [x_length y_length z_length].\n mic_pos: N-by-3 matrix, [x1 y1 z1; x2 y2 z2; ...] positions of N\n microphones (in m).\n src_traj: M-by-3 matrix, [x1 y1 z1; x2 y2 z2; ...] positions of M\n source trajectory points (in m).\n reverberation (T20 or T60): scalar value (in s), desired reverberation time.\n c: (optional) sound velocity (in m/s).\n abs_weights: (optional) 1-by-6 vector of absorption coefficients weights,\n [w_x1 w_x2 w_y1 w_y2 w_z1 w_z2].\n\n If the field SETUP_STRUC.c is undefined, the function assumes a default\n value of sound velocity of 343 m/s.\n\n The field \'abs_weight\' corresponds to the relative weights of each of the\n six absorption coefficients resulting from the desired reverberation time.\n For instance, defining \'abs_weights\' as [1 1 0.8 0.8 0.6 0.6] will result\n in the absorption coefficients (alpha) for the walls in the y-dimension\n being 20% smaller compared to the x-dimension walls, whereas the floor\n and ceiling will end up with absorption coefficients 40% smaller (e.g.,\n to simulate the effects of a concrete floor and ceiling). If this field\n is omitted, the parameter \'abs_weight\' will default to [1 1 1 1 1 1],\n which leads to uniform absorption coefficients for all room boundaries.\n\n The structure SETUP_STRUC may contain one of the two fields \'T60\' or\n \'T20\'. This function will automatically determine which reverberation\n type is used and compute the desired room absorption coefficients\n accordingly. T20 is defined as the time required for the impulse response\n energy to decay from -5 to -25dB, whereas T60 corresponds to the time\n required by the impulse response energy to decay by 60dB. Setting the\n corresponding field value to 0 achieves anechoic impulse responses\n (direct path only).\n\n In addition, a number of other (optional) parameters can be set using a\n series of \'argument\'--value pairs. The following parameters (arguments)\n can be used:\n\n \'Delta_dB\': scalar (in dB), parameter determining how much the resulting\n impulse response is cropped: the impulse response is\n computed until the time index where its overall energy\n content has decreased by \'Delta_dB\' decibels, after which\n the computations stop. Not relevant if the reverberation\n time is set to 0 (anechoic case). Defaults to 50.\n\n This function returns a 2-dimensional cell array RIR_CELL containing the\n RIRs for each source trajectory point and each microphone, organised as\n follows: RIR_CELL{mic_index,traj_index}. The resulting filter length\n may differ slightly in each computed RIR.\n\n This function also saves the computation results on file. The argument\n RIR_FILE_NAME determines the name of the .mat file where the variable\n RIR_CELL is to be saved. If a file already exists with the same name as\n the input argument, the user will be prompted to determine whether the\n file is to be overwritten or not. The given parameter RIR_FILE_NAME can\n be a full access path to the desired file. If no access path is given,\n the file is saved in the current working directory.\n '
if (self.abs_weights is None):
self.abs_weights = np.ones((1, 6))
elif (self.abs_weights.shape[1] != 6):
logging.warning('The given weights is not an array of 6, the values will be set to 1')
self.abs_weights = np.ones((1, 6))
if (self.c is None):
self.c = 343.0
if (self.reverberation[0] == 60):
alpha = AbsCoeff('t60', self.reverberation[1], self.room, self.abs_weights, self.method, self.c)
elif (self.reverberation[0] == 20):
alpha = AbsCoeff('t20', self.reverberation[1], self.room, self.abs_weights, self.method, self.c)
else:
raise ValueError('Missing T60 or T20 field.')
rttype = self.reverberation[0]
rtval = self.reverberation[1]
beta = np.sqrt((1 - alpha))
nMics = self.mic_pos.shape[0]
nSPts = self.source_trajectory.shape[0]
RIR_cell = np.empty((nMics, nSPts), dtype=object)
logging.info('Computing room impulse responses. ')
mics_range = range(nMics)
if (self.processes > 1):
from pathos.multiprocessing import ProcessingPool as Pool
this_map = Pool(node=self.processes).map
X_src = (self.mic_pos[(mm, :)] for mm in mics_range for tt in range(nSPts))
X_rcv = (self.source_trajectory[(tt, :)] for mm in mics_range for tt in range(nSPts))
m_freq = (self.sampling_freq for mm in mics_range for tt in range(nSPts))
m_beta = (beta for mm in mics_range for tt in range(nSPts))
m_rttype = (rttype for mm in mics_range for tt in range(nSPts))
m_rtval = (rtval for mm in mics_range for tt in range(nSPts))
m_room = (self.room for mm in mics_range for tt in range(nSPts))
m_c = (self.c for mm in mics_range for tt in range(nSPts))
m_Delta_dB = (Delta_dB for mm in mics_range for tt in range(nSPts))
imps = this_map(ISM_RoomResp, m_freq, m_beta, m_rttype, m_rtval, X_src, X_rcv, m_room, m_c, m_Delta_dB)
for mm in mics_range:
for tt in range(nSPts):
RIR_cell[(mm, tt)] = imps[((mm * nSPts) + tt)]
logging.info('Room impulse responses completed. ')
else:
if self.verbose:
from tqdm import tqdm
mics_range = tqdm(mics_range)
for mm in mics_range:
X_rcv = self.mic_pos[(mm, :)]
for tt in range(nSPts):
X_src = self.source_trajectory[(tt, :)]
RIR_cell[(mm, tt)] = ISM_RoomResp(self.sampling_freq, beta, rttype, rtval, X_src, X_rcv, self.room, self.c, Delta_dB)
self.RIR_cell = RIR_cell
return RIR_cell | Function [RIR_cell] = ISM_RIR_bank(setupstruc,RIRFileName,varargin)
ISM_RIR_bank Bank of RIRs using Lehmann & Johansson's image-source method
[RIR_CELL] = ISM_RIR_bank(SETUP_STRUC,RIR_FILE_NAME)
This function generates a bank of room impulse responses (RIRs) for a
particular user-defined room setup, using Lehmann and Johansson's
implementation of the image-source method (see: "Prediction of energy
decay in room impulse responses simulated with an image-source model", J.
Acoust. Soc. Am., vol. 124(1), pp. 269-277, July 2008). The input
SETUP_STRUC is a structure of enviromental parameters containing the
following fields:
Fs: sampling frequency (in Hz).
room: 1-by-3 vector of enclosure dimensions (in m),
[x_length y_length z_length].
mic_pos: N-by-3 matrix, [x1 y1 z1; x2 y2 z2; ...] positions of N
microphones (in m).
src_traj: M-by-3 matrix, [x1 y1 z1; x2 y2 z2; ...] positions of M
source trajectory points (in m).
reverberation (T20 or T60): scalar value (in s), desired reverberation time.
c: (optional) sound velocity (in m/s).
abs_weights: (optional) 1-by-6 vector of absorption coefficients weights,
[w_x1 w_x2 w_y1 w_y2 w_z1 w_z2].
If the field SETUP_STRUC.c is undefined, the function assumes a default
value of sound velocity of 343 m/s.
The field 'abs_weight' corresponds to the relative weights of each of the
six absorption coefficients resulting from the desired reverberation time.
For instance, defining 'abs_weights' as [1 1 0.8 0.8 0.6 0.6] will result
in the absorption coefficients (alpha) for the walls in the y-dimension
being 20% smaller compared to the x-dimension walls, whereas the floor
and ceiling will end up with absorption coefficients 40% smaller (e.g.,
to simulate the effects of a concrete floor and ceiling). If this field
is omitted, the parameter 'abs_weight' will default to [1 1 1 1 1 1],
which leads to uniform absorption coefficients for all room boundaries.
The structure SETUP_STRUC may contain one of the two fields 'T60' or
'T20'. This function will automatically determine which reverberation
type is used and compute the desired room absorption coefficients
accordingly. T20 is defined as the time required for the impulse response
energy to decay from -5 to -25dB, whereas T60 corresponds to the time
required by the impulse response energy to decay by 60dB. Setting the
corresponding field value to 0 achieves anechoic impulse responses
(direct path only).
In addition, a number of other (optional) parameters can be set using a
series of 'argument'--value pairs. The following parameters (arguments)
can be used:
'Delta_dB': scalar (in dB), parameter determining how much the resulting
impulse response is cropped: the impulse response is
computed until the time index where its overall energy
content has decreased by 'Delta_dB' decibels, after which
the computations stop. Not relevant if the reverberation
time is set to 0 (anechoic case). Defaults to 50.
This function returns a 2-dimensional cell array RIR_CELL containing the
RIRs for each source trajectory point and each microphone, organised as
follows: RIR_CELL{mic_index,traj_index}. The resulting filter length
may differ slightly in each computed RIR.
This function also saves the computation results on file. The argument
RIR_FILE_NAME determines the name of the .mat file where the variable
RIR_CELL is to be saved. If a file already exists with the same name as
the input argument, the user will be prompted to determine whether the
file is to be overwritten or not. The given parameter RIR_FILE_NAME can
be a full access path to the desired file. If no access path is given,
the file is saved in the current working directory. | pyimagesource/bank.py | bank | Fhrozen/pyimagesource | 6 | python | def bank(self, Delta_dB=50.0):
'Function [RIR_cell] = ISM_RIR_bank(setupstruc,RIRFileName,varargin)\n\n ISM_RIR_bank Bank of RIRs using Lehmann & Johansson\'s image-source method\n\n [RIR_CELL] = ISM_RIR_bank(SETUP_STRUC,RIR_FILE_NAME)\n\n This function generates a bank of room impulse responses (RIRs) for a\n particular user-defined room setup, using Lehmann and Johansson\'s\n implementation of the image-source method (see: "Prediction of energy\n decay in room impulse responses simulated with an image-source model", J.\n Acoust. Soc. Am., vol. 124(1), pp. 269-277, July 2008). The input\n SETUP_STRUC is a structure of enviromental parameters containing the\n following fields:\n\n Fs: sampling frequency (in Hz).\n room: 1-by-3 vector of enclosure dimensions (in m),\n [x_length y_length z_length].\n mic_pos: N-by-3 matrix, [x1 y1 z1; x2 y2 z2; ...] positions of N\n microphones (in m).\n src_traj: M-by-3 matrix, [x1 y1 z1; x2 y2 z2; ...] positions of M\n source trajectory points (in m).\n reverberation (T20 or T60): scalar value (in s), desired reverberation time.\n c: (optional) sound velocity (in m/s).\n abs_weights: (optional) 1-by-6 vector of absorption coefficients weights,\n [w_x1 w_x2 w_y1 w_y2 w_z1 w_z2].\n\n If the field SETUP_STRUC.c is undefined, the function assumes a default\n value of sound velocity of 343 m/s.\n\n The field \'abs_weight\' corresponds to the relative weights of each of the\n six absorption coefficients resulting from the desired reverberation time.\n For instance, defining \'abs_weights\' as [1 1 0.8 0.8 0.6 0.6] will result\n in the absorption coefficients (alpha) for the walls in the y-dimension\n being 20% smaller compared to the x-dimension walls, whereas the floor\n and ceiling will end up with absorption coefficients 40% smaller (e.g.,\n to simulate the effects of a concrete floor and ceiling). If this field\n is omitted, the parameter \'abs_weight\' will default to [1 1 1 1 1 1],\n which leads to uniform absorption coefficients for all room boundaries.\n\n The structure SETUP_STRUC may contain one of the two fields \'T60\' or\n \'T20\'. This function will automatically determine which reverberation\n type is used and compute the desired room absorption coefficients\n accordingly. T20 is defined as the time required for the impulse response\n energy to decay from -5 to -25dB, whereas T60 corresponds to the time\n required by the impulse response energy to decay by 60dB. Setting the\n corresponding field value to 0 achieves anechoic impulse responses\n (direct path only).\n\n In addition, a number of other (optional) parameters can be set using a\n series of \'argument\'--value pairs. The following parameters (arguments)\n can be used:\n\n \'Delta_dB\': scalar (in dB), parameter determining how much the resulting\n impulse response is cropped: the impulse response is\n computed until the time index where its overall energy\n content has decreased by \'Delta_dB\' decibels, after which\n the computations stop. Not relevant if the reverberation\n time is set to 0 (anechoic case). Defaults to 50.\n\n This function returns a 2-dimensional cell array RIR_CELL containing the\n RIRs for each source trajectory point and each microphone, organised as\n follows: RIR_CELL{mic_index,traj_index}. The resulting filter length\n may differ slightly in each computed RIR.\n\n This function also saves the computation results on file. The argument\n RIR_FILE_NAME determines the name of the .mat file where the variable\n RIR_CELL is to be saved. If a file already exists with the same name as\n the input argument, the user will be prompted to determine whether the\n file is to be overwritten or not. The given parameter RIR_FILE_NAME can\n be a full access path to the desired file. If no access path is given,\n the file is saved in the current working directory.\n '
if (self.abs_weights is None):
self.abs_weights = np.ones((1, 6))
elif (self.abs_weights.shape[1] != 6):
logging.warning('The given weights is not an array of 6, the values will be set to 1')
self.abs_weights = np.ones((1, 6))
if (self.c is None):
self.c = 343.0
if (self.reverberation[0] == 60):
alpha = AbsCoeff('t60', self.reverberation[1], self.room, self.abs_weights, self.method, self.c)
elif (self.reverberation[0] == 20):
alpha = AbsCoeff('t20', self.reverberation[1], self.room, self.abs_weights, self.method, self.c)
else:
raise ValueError('Missing T60 or T20 field.')
rttype = self.reverberation[0]
rtval = self.reverberation[1]
beta = np.sqrt((1 - alpha))
nMics = self.mic_pos.shape[0]
nSPts = self.source_trajectory.shape[0]
RIR_cell = np.empty((nMics, nSPts), dtype=object)
logging.info('Computing room impulse responses. ')
mics_range = range(nMics)
if (self.processes > 1):
from pathos.multiprocessing import ProcessingPool as Pool
this_map = Pool(node=self.processes).map
X_src = (self.mic_pos[(mm, :)] for mm in mics_range for tt in range(nSPts))
X_rcv = (self.source_trajectory[(tt, :)] for mm in mics_range for tt in range(nSPts))
m_freq = (self.sampling_freq for mm in mics_range for tt in range(nSPts))
m_beta = (beta for mm in mics_range for tt in range(nSPts))
m_rttype = (rttype for mm in mics_range for tt in range(nSPts))
m_rtval = (rtval for mm in mics_range for tt in range(nSPts))
m_room = (self.room for mm in mics_range for tt in range(nSPts))
m_c = (self.c for mm in mics_range for tt in range(nSPts))
m_Delta_dB = (Delta_dB for mm in mics_range for tt in range(nSPts))
imps = this_map(ISM_RoomResp, m_freq, m_beta, m_rttype, m_rtval, X_src, X_rcv, m_room, m_c, m_Delta_dB)
for mm in mics_range:
for tt in range(nSPts):
RIR_cell[(mm, tt)] = imps[((mm * nSPts) + tt)]
logging.info('Room impulse responses completed. ')
else:
if self.verbose:
from tqdm import tqdm
mics_range = tqdm(mics_range)
for mm in mics_range:
X_rcv = self.mic_pos[(mm, :)]
for tt in range(nSPts):
X_src = self.source_trajectory[(tt, :)]
RIR_cell[(mm, tt)] = ISM_RoomResp(self.sampling_freq, beta, rttype, rtval, X_src, X_rcv, self.room, self.c, Delta_dB)
self.RIR_cell = RIR_cell
return RIR_cell | def bank(self, Delta_dB=50.0):
'Function [RIR_cell] = ISM_RIR_bank(setupstruc,RIRFileName,varargin)\n\n ISM_RIR_bank Bank of RIRs using Lehmann & Johansson\'s image-source method\n\n [RIR_CELL] = ISM_RIR_bank(SETUP_STRUC,RIR_FILE_NAME)\n\n This function generates a bank of room impulse responses (RIRs) for a\n particular user-defined room setup, using Lehmann and Johansson\'s\n implementation of the image-source method (see: "Prediction of energy\n decay in room impulse responses simulated with an image-source model", J.\n Acoust. Soc. Am., vol. 124(1), pp. 269-277, July 2008). The input\n SETUP_STRUC is a structure of enviromental parameters containing the\n following fields:\n\n Fs: sampling frequency (in Hz).\n room: 1-by-3 vector of enclosure dimensions (in m),\n [x_length y_length z_length].\n mic_pos: N-by-3 matrix, [x1 y1 z1; x2 y2 z2; ...] positions of N\n microphones (in m).\n src_traj: M-by-3 matrix, [x1 y1 z1; x2 y2 z2; ...] positions of M\n source trajectory points (in m).\n reverberation (T20 or T60): scalar value (in s), desired reverberation time.\n c: (optional) sound velocity (in m/s).\n abs_weights: (optional) 1-by-6 vector of absorption coefficients weights,\n [w_x1 w_x2 w_y1 w_y2 w_z1 w_z2].\n\n If the field SETUP_STRUC.c is undefined, the function assumes a default\n value of sound velocity of 343 m/s.\n\n The field \'abs_weight\' corresponds to the relative weights of each of the\n six absorption coefficients resulting from the desired reverberation time.\n For instance, defining \'abs_weights\' as [1 1 0.8 0.8 0.6 0.6] will result\n in the absorption coefficients (alpha) for the walls in the y-dimension\n being 20% smaller compared to the x-dimension walls, whereas the floor\n and ceiling will end up with absorption coefficients 40% smaller (e.g.,\n to simulate the effects of a concrete floor and ceiling). If this field\n is omitted, the parameter \'abs_weight\' will default to [1 1 1 1 1 1],\n which leads to uniform absorption coefficients for all room boundaries.\n\n The structure SETUP_STRUC may contain one of the two fields \'T60\' or\n \'T20\'. This function will automatically determine which reverberation\n type is used and compute the desired room absorption coefficients\n accordingly. T20 is defined as the time required for the impulse response\n energy to decay from -5 to -25dB, whereas T60 corresponds to the time\n required by the impulse response energy to decay by 60dB. Setting the\n corresponding field value to 0 achieves anechoic impulse responses\n (direct path only).\n\n In addition, a number of other (optional) parameters can be set using a\n series of \'argument\'--value pairs. The following parameters (arguments)\n can be used:\n\n \'Delta_dB\': scalar (in dB), parameter determining how much the resulting\n impulse response is cropped: the impulse response is\n computed until the time index where its overall energy\n content has decreased by \'Delta_dB\' decibels, after which\n the computations stop. Not relevant if the reverberation\n time is set to 0 (anechoic case). Defaults to 50.\n\n This function returns a 2-dimensional cell array RIR_CELL containing the\n RIRs for each source trajectory point and each microphone, organised as\n follows: RIR_CELL{mic_index,traj_index}. The resulting filter length\n may differ slightly in each computed RIR.\n\n This function also saves the computation results on file. The argument\n RIR_FILE_NAME determines the name of the .mat file where the variable\n RIR_CELL is to be saved. If a file already exists with the same name as\n the input argument, the user will be prompted to determine whether the\n file is to be overwritten or not. The given parameter RIR_FILE_NAME can\n be a full access path to the desired file. If no access path is given,\n the file is saved in the current working directory.\n '
if (self.abs_weights is None):
self.abs_weights = np.ones((1, 6))
elif (self.abs_weights.shape[1] != 6):
logging.warning('The given weights is not an array of 6, the values will be set to 1')
self.abs_weights = np.ones((1, 6))
if (self.c is None):
self.c = 343.0
if (self.reverberation[0] == 60):
alpha = AbsCoeff('t60', self.reverberation[1], self.room, self.abs_weights, self.method, self.c)
elif (self.reverberation[0] == 20):
alpha = AbsCoeff('t20', self.reverberation[1], self.room, self.abs_weights, self.method, self.c)
else:
raise ValueError('Missing T60 or T20 field.')
rttype = self.reverberation[0]
rtval = self.reverberation[1]
beta = np.sqrt((1 - alpha))
nMics = self.mic_pos.shape[0]
nSPts = self.source_trajectory.shape[0]
RIR_cell = np.empty((nMics, nSPts), dtype=object)
logging.info('Computing room impulse responses. ')
mics_range = range(nMics)
if (self.processes > 1):
from pathos.multiprocessing import ProcessingPool as Pool
this_map = Pool(node=self.processes).map
X_src = (self.mic_pos[(mm, :)] for mm in mics_range for tt in range(nSPts))
X_rcv = (self.source_trajectory[(tt, :)] for mm in mics_range for tt in range(nSPts))
m_freq = (self.sampling_freq for mm in mics_range for tt in range(nSPts))
m_beta = (beta for mm in mics_range for tt in range(nSPts))
m_rttype = (rttype for mm in mics_range for tt in range(nSPts))
m_rtval = (rtval for mm in mics_range for tt in range(nSPts))
m_room = (self.room for mm in mics_range for tt in range(nSPts))
m_c = (self.c for mm in mics_range for tt in range(nSPts))
m_Delta_dB = (Delta_dB for mm in mics_range for tt in range(nSPts))
imps = this_map(ISM_RoomResp, m_freq, m_beta, m_rttype, m_rtval, X_src, X_rcv, m_room, m_c, m_Delta_dB)
for mm in mics_range:
for tt in range(nSPts):
RIR_cell[(mm, tt)] = imps[((mm * nSPts) + tt)]
logging.info('Room impulse responses completed. ')
else:
if self.verbose:
from tqdm import tqdm
mics_range = tqdm(mics_range)
for mm in mics_range:
X_rcv = self.mic_pos[(mm, :)]
for tt in range(nSPts):
X_src = self.source_trajectory[(tt, :)]
RIR_cell[(mm, tt)] = ISM_RoomResp(self.sampling_freq, beta, rttype, rtval, X_src, X_rcv, self.room, self.c, Delta_dB)
self.RIR_cell = RIR_cell
return RIR_cell<|docstring|>Function [RIR_cell] = ISM_RIR_bank(setupstruc,RIRFileName,varargin)
ISM_RIR_bank Bank of RIRs using Lehmann & Johansson's image-source method
[RIR_CELL] = ISM_RIR_bank(SETUP_STRUC,RIR_FILE_NAME)
This function generates a bank of room impulse responses (RIRs) for a
particular user-defined room setup, using Lehmann and Johansson's
implementation of the image-source method (see: "Prediction of energy
decay in room impulse responses simulated with an image-source model", J.
Acoust. Soc. Am., vol. 124(1), pp. 269-277, July 2008). The input
SETUP_STRUC is a structure of enviromental parameters containing the
following fields:
Fs: sampling frequency (in Hz).
room: 1-by-3 vector of enclosure dimensions (in m),
[x_length y_length z_length].
mic_pos: N-by-3 matrix, [x1 y1 z1; x2 y2 z2; ...] positions of N
microphones (in m).
src_traj: M-by-3 matrix, [x1 y1 z1; x2 y2 z2; ...] positions of M
source trajectory points (in m).
reverberation (T20 or T60): scalar value (in s), desired reverberation time.
c: (optional) sound velocity (in m/s).
abs_weights: (optional) 1-by-6 vector of absorption coefficients weights,
[w_x1 w_x2 w_y1 w_y2 w_z1 w_z2].
If the field SETUP_STRUC.c is undefined, the function assumes a default
value of sound velocity of 343 m/s.
The field 'abs_weight' corresponds to the relative weights of each of the
six absorption coefficients resulting from the desired reverberation time.
For instance, defining 'abs_weights' as [1 1 0.8 0.8 0.6 0.6] will result
in the absorption coefficients (alpha) for the walls in the y-dimension
being 20% smaller compared to the x-dimension walls, whereas the floor
and ceiling will end up with absorption coefficients 40% smaller (e.g.,
to simulate the effects of a concrete floor and ceiling). If this field
is omitted, the parameter 'abs_weight' will default to [1 1 1 1 1 1],
which leads to uniform absorption coefficients for all room boundaries.
The structure SETUP_STRUC may contain one of the two fields 'T60' or
'T20'. This function will automatically determine which reverberation
type is used and compute the desired room absorption coefficients
accordingly. T20 is defined as the time required for the impulse response
energy to decay from -5 to -25dB, whereas T60 corresponds to the time
required by the impulse response energy to decay by 60dB. Setting the
corresponding field value to 0 achieves anechoic impulse responses
(direct path only).
In addition, a number of other (optional) parameters can be set using a
series of 'argument'--value pairs. The following parameters (arguments)
can be used:
'Delta_dB': scalar (in dB), parameter determining how much the resulting
impulse response is cropped: the impulse response is
computed until the time index where its overall energy
content has decreased by 'Delta_dB' decibels, after which
the computations stop. Not relevant if the reverberation
time is set to 0 (anechoic case). Defaults to 50.
This function returns a 2-dimensional cell array RIR_CELL containing the
RIRs for each source trajectory point and each microphone, organised as
follows: RIR_CELL{mic_index,traj_index}. The resulting filter length
may differ slightly in each computed RIR.
This function also saves the computation results on file. The argument
RIR_FILE_NAME determines the name of the .mat file where the variable
RIR_CELL is to be saved. If a file already exists with the same name as
the input argument, the user will be prompted to determine whether the
file is to be overwritten or not. The given parameter RIR_FILE_NAME can
be a full access path to the desired file. If no access path is given,
the file is saved in the current working directory.<|endoftext|> |
47e66fbec8a8d9b206d2ea23a7c15dc179048dec48aca7a1597d66acaeea803a | def compress(x, shape, k, qTable):
'\n\tCompress an image x through CompressionCodecs\n\tin package "compression".\n\n\tArguments:\n\t\tx -- A flattened 1-D array representing \n\t\t\tthe input image.\n\t\tshape -- Original shape of x before being\n\t\t\tflattened.\n\t\tk, qTable -- Arguments for Quantization, see \n\t\t\tQuantizationCodec in compression.Codec\n\t\t\tfor further description.\n\t'
x = np.reshape(x, shape, 'F')
qTable = np.reshape(qTable, [8, 8], 'F')
c = CompressionCodecs(k=k, qTable=qTable)
y = c.compress(x)
return y | Compress an image x through CompressionCodecs
in package "compression".
Arguments:
x -- A flattened 1-D array representing
the input image.
shape -- Original shape of x before being
flattened.
k, qTable -- Arguments for Quantization, see
QuantizationCodec in compression.Codec
for further description. | report/Code/matlab.py | compress | daidahao/Loughborough-University-MSc-Advanced-Computer-Science-Report-Template | 0 | python | def compress(x, shape, k, qTable):
'\n\tCompress an image x through CompressionCodecs\n\tin package "compression".\n\n\tArguments:\n\t\tx -- A flattened 1-D array representing \n\t\t\tthe input image.\n\t\tshape -- Original shape of x before being\n\t\t\tflattened.\n\t\tk, qTable -- Arguments for Quantization, see \n\t\t\tQuantizationCodec in compression.Codec\n\t\t\tfor further description.\n\t'
x = np.reshape(x, shape, 'F')
qTable = np.reshape(qTable, [8, 8], 'F')
c = CompressionCodecs(k=k, qTable=qTable)
y = c.compress(x)
return y | def compress(x, shape, k, qTable):
'\n\tCompress an image x through CompressionCodecs\n\tin package "compression".\n\n\tArguments:\n\t\tx -- A flattened 1-D array representing \n\t\t\tthe input image.\n\t\tshape -- Original shape of x before being\n\t\t\tflattened.\n\t\tk, qTable -- Arguments for Quantization, see \n\t\t\tQuantizationCodec in compression.Codec\n\t\t\tfor further description.\n\t'
x = np.reshape(x, shape, 'F')
qTable = np.reshape(qTable, [8, 8], 'F')
c = CompressionCodecs(k=k, qTable=qTable)
y = c.compress(x)
return y<|docstring|>Compress an image x through CompressionCodecs
in package "compression".
Arguments:
x -- A flattened 1-D array representing
the input image.
shape -- Original shape of x before being
flattened.
k, qTable -- Arguments for Quantization, see
QuantizationCodec in compression.Codec
for further description.<|endoftext|> |
c0a7c9ddc97d1c0814e7252cee3bb60c0b5854d52b344ff812f306f42db25689 | def unwrap(func, *, stop=None):
"This is the inspect.unwrap() method copied from Python 3.5's standard library."
if (stop is None):
def _is_wrapper(f):
return hasattr(f, '__wrapped__')
else:
def _is_wrapper(f):
return (hasattr(f, '__wrapped__') and (not stop(f)))
f = func
memo = {id(f)}
while _is_wrapper(func):
func = func.__wrapped__
id_func = id(func)
if (id_func in memo):
raise ValueError('wrapper loop when unwrapping {!r}'.format(f))
memo.add(id_func)
return func | This is the inspect.unwrap() method copied from Python 3.5's standard library. | sphinx_autodoc_typehints.py | unwrap | kmyk/sphinx-autodoc-typehints | 0 | python | def unwrap(func, *, stop=None):
if (stop is None):
def _is_wrapper(f):
return hasattr(f, '__wrapped__')
else:
def _is_wrapper(f):
return (hasattr(f, '__wrapped__') and (not stop(f)))
f = func
memo = {id(f)}
while _is_wrapper(func):
func = func.__wrapped__
id_func = id(func)
if (id_func in memo):
raise ValueError('wrapper loop when unwrapping {!r}'.format(f))
memo.add(id_func)
return func | def unwrap(func, *, stop=None):
if (stop is None):
def _is_wrapper(f):
return hasattr(f, '__wrapped__')
else:
def _is_wrapper(f):
return (hasattr(f, '__wrapped__') and (not stop(f)))
f = func
memo = {id(f)}
while _is_wrapper(func):
func = func.__wrapped__
id_func = id(func)
if (id_func in memo):
raise ValueError('wrapper loop when unwrapping {!r}'.format(f))
memo.add(id_func)
return func<|docstring|>This is the inspect.unwrap() method copied from Python 3.5's standard library.<|endoftext|> |
b4a77d9cdb745c602f6c513da3d41ad823f84bcc52dd6ca8787f1204a38b2e84 | def partfile(path, raw=False):
'Split directory into directory, basename and/or extension '
(dirpath, filename) = os.path.split(path)
if (not raw):
(basename, ext) = os.path.splitext(filename)
else:
(basename, ext) = (filename, '')
return (dirpath, basename, ext) | Split directory into directory, basename and/or extension | batchren/renamer.py | partfile | matvign/batchrenamer | 0 | python | def partfile(path, raw=False):
' '
(dirpath, filename) = os.path.split(path)
if (not raw):
(basename, ext) = os.path.splitext(filename)
else:
(basename, ext) = (filename, )
return (dirpath, basename, ext) | def partfile(path, raw=False):
' '
(dirpath, filename) = os.path.split(path)
if (not raw):
(basename, ext) = os.path.splitext(filename)
else:
(basename, ext) = (filename, )
return (dirpath, basename, ext)<|docstring|>Split directory into directory, basename and/or extension<|endoftext|> |
9e00f4ab83ffadf31a3bd6f649567b0edf105ae93f553d2f63610e5b13857ac1 | def initfilters(args):
'Create functions in a list '
filters = []
if args.regex:
try:
repl = _repl_decorator(*args.regex)
except re.error as re_err:
sys.exit(('A regex compilation error occurred: ' + str(re_err)))
except sre_constants.error as sre_err:
sys.exit(('A regex compilation error occurred: ' + str(sre_err)))
filters.append(repl)
if args.bracket_remove:
maps = helper.bracket_map(args.bracket_remove[0])
count = args.bracket_remove[1]
bracr = (lambda x: helper.bracket_remove(x, *maps, count))
filters.append(bracr)
if args.slice:
slash = (lambda x: x[args.slice])
filters.append(slash)
if args.shave:
shave = (lambda x: x[args.shave[0]][args.shave[1]])
filters.append(shave)
if args.translate:
translmap = str.maketrans(*args.translate)
translate = (lambda x: x.translate(translmap))
filters.append(translate)
if (args.spaces is not None):
space = (lambda x: re.sub('\\s+', args.spaces, x))
filters.append(space)
if args.case:
if (args.case == 'upper'):
case = (lambda x: x.upper())
elif (args.case == 'lower'):
case = (lambda x: x.lower())
elif (args.case == 'swap'):
case = (lambda x: x.swapcase())
elif (args.case == 'cap'):
case = (lambda x: str.title(x))
filters.append(case)
if args.sequence:
filters.append(args.sequence)
if (args.prepend is not None):
prepend = (lambda x: (args.prepend + x))
filters.append(prepend)
if (args.postpend is not None):
postpend = (lambda x: (x + args.postpend))
filters.append(postpend)
return filters | Create functions in a list | batchren/renamer.py | initfilters | matvign/batchrenamer | 0 | python | def initfilters(args):
' '
filters = []
if args.regex:
try:
repl = _repl_decorator(*args.regex)
except re.error as re_err:
sys.exit(('A regex compilation error occurred: ' + str(re_err)))
except sre_constants.error as sre_err:
sys.exit(('A regex compilation error occurred: ' + str(sre_err)))
filters.append(repl)
if args.bracket_remove:
maps = helper.bracket_map(args.bracket_remove[0])
count = args.bracket_remove[1]
bracr = (lambda x: helper.bracket_remove(x, *maps, count))
filters.append(bracr)
if args.slice:
slash = (lambda x: x[args.slice])
filters.append(slash)
if args.shave:
shave = (lambda x: x[args.shave[0]][args.shave[1]])
filters.append(shave)
if args.translate:
translmap = str.maketrans(*args.translate)
translate = (lambda x: x.translate(translmap))
filters.append(translate)
if (args.spaces is not None):
space = (lambda x: re.sub('\\s+', args.spaces, x))
filters.append(space)
if args.case:
if (args.case == 'upper'):
case = (lambda x: x.upper())
elif (args.case == 'lower'):
case = (lambda x: x.lower())
elif (args.case == 'swap'):
case = (lambda x: x.swapcase())
elif (args.case == 'cap'):
case = (lambda x: str.title(x))
filters.append(case)
if args.sequence:
filters.append(args.sequence)
if (args.prepend is not None):
prepend = (lambda x: (args.prepend + x))
filters.append(prepend)
if (args.postpend is not None):
postpend = (lambda x: (x + args.postpend))
filters.append(postpend)
return filters | def initfilters(args):
' '
filters = []
if args.regex:
try:
repl = _repl_decorator(*args.regex)
except re.error as re_err:
sys.exit(('A regex compilation error occurred: ' + str(re_err)))
except sre_constants.error as sre_err:
sys.exit(('A regex compilation error occurred: ' + str(sre_err)))
filters.append(repl)
if args.bracket_remove:
maps = helper.bracket_map(args.bracket_remove[0])
count = args.bracket_remove[1]
bracr = (lambda x: helper.bracket_remove(x, *maps, count))
filters.append(bracr)
if args.slice:
slash = (lambda x: x[args.slice])
filters.append(slash)
if args.shave:
shave = (lambda x: x[args.shave[0]][args.shave[1]])
filters.append(shave)
if args.translate:
translmap = str.maketrans(*args.translate)
translate = (lambda x: x.translate(translmap))
filters.append(translate)
if (args.spaces is not None):
space = (lambda x: re.sub('\\s+', args.spaces, x))
filters.append(space)
if args.case:
if (args.case == 'upper'):
case = (lambda x: x.upper())
elif (args.case == 'lower'):
case = (lambda x: x.lower())
elif (args.case == 'swap'):
case = (lambda x: x.swapcase())
elif (args.case == 'cap'):
case = (lambda x: str.title(x))
filters.append(case)
if args.sequence:
filters.append(args.sequence)
if (args.prepend is not None):
prepend = (lambda x: (args.prepend + x))
filters.append(prepend)
if (args.postpend is not None):
postpend = (lambda x: (x + args.postpend))
filters.append(postpend)
return filters<|docstring|>Create functions in a list<|endoftext|> |
e11a0dd6602361ea7c58552f1337a6f6f30786f3f07d5d35d8db32c3e3110f58 | def _repl_decorator(pattern, repl='', count=0):
'Decorator function for regex replacement\n\n Return one of two functions:\n\n 1. Normal re.sub\n 2. re.sub with counter to remove nth instance.\n '
def repl_all(x):
return re.sub(pattern, repl, x)
def repl_nth(x):
f = re.sub(pattern, replacer, x, count)
replacer._count = 1
return f
def replacer(matchobj):
'Function to be used with re.sub\n\n Replace string match with repl if count = count\n\n Otherwise return the string match\n\n '
if (matchobj.group() and (replacer._count == count)):
res = repl
else:
res = matchobj.group()
replacer._count += 1
return res
replacer._count = 1
return (repl_all if (not count) else repl_nth) | Decorator function for regex replacement
Return one of two functions:
1. Normal re.sub
2. re.sub with counter to remove nth instance. | batchren/renamer.py | _repl_decorator | matvign/batchrenamer | 0 | python | def _repl_decorator(pattern, repl=, count=0):
'Decorator function for regex replacement\n\n Return one of two functions:\n\n 1. Normal re.sub\n 2. re.sub with counter to remove nth instance.\n '
def repl_all(x):
return re.sub(pattern, repl, x)
def repl_nth(x):
f = re.sub(pattern, replacer, x, count)
replacer._count = 1
return f
def replacer(matchobj):
'Function to be used with re.sub\n\n Replace string match with repl if count = count\n\n Otherwise return the string match\n\n '
if (matchobj.group() and (replacer._count == count)):
res = repl
else:
res = matchobj.group()
replacer._count += 1
return res
replacer._count = 1
return (repl_all if (not count) else repl_nth) | def _repl_decorator(pattern, repl=, count=0):
'Decorator function for regex replacement\n\n Return one of two functions:\n\n 1. Normal re.sub\n 2. re.sub with counter to remove nth instance.\n '
def repl_all(x):
return re.sub(pattern, repl, x)
def repl_nth(x):
f = re.sub(pattern, replacer, x, count)
replacer._count = 1
return f
def replacer(matchobj):
'Function to be used with re.sub\n\n Replace string match with repl if count = count\n\n Otherwise return the string match\n\n '
if (matchobj.group() and (replacer._count == count)):
res = repl
else:
res = matchobj.group()
replacer._count += 1
return res
replacer._count = 1
return (repl_all if (not count) else repl_nth)<|docstring|>Decorator function for regex replacement
Return one of two functions:
1. Normal re.sub
2. re.sub with counter to remove nth instance.<|endoftext|> |
a74c1467f5833c4b5ce31345990315b0c9408861c6b89723f928cced84fd0cbf | def get_renames(src_files, filters, ext, raw):
'Rename list of files with a list of functions '
dest_files = []
for src in src_files:
dest = runfilters(src, filters, ext, raw)
dest_files.append(dest)
return dest_files | Rename list of files with a list of functions | batchren/renamer.py | get_renames | matvign/batchrenamer | 0 | python | def get_renames(src_files, filters, ext, raw):
' '
dest_files = []
for src in src_files:
dest = runfilters(src, filters, ext, raw)
dest_files.append(dest)
return dest_files | def get_renames(src_files, filters, ext, raw):
' '
dest_files = []
for src in src_files:
dest = runfilters(src, filters, ext, raw)
dest_files.append(dest)
return dest_files<|docstring|>Rename list of files with a list of functions<|endoftext|> |
353994fbb90ec6ec544627f3ecd2108fc16972c7a0b2787a9d9c2bcb5920a219 | def runfilters(path, filters, extension=None, raw=False):
'Rename file with a list of functions '
(dirpath, bname, ext) = partfile(path, raw)
for runf in filters:
try:
if isinstance(runf, StringSeq.StringSequence):
bname = runf(path, dirpath, bname)
else:
bname = runf(bname)
except re.error as re_err:
sys.exit(('A regex error occurred: ' + str(re_err)))
except OSError as os_err:
sys.exit(('A filesystem error occurred: ' + str(os_err)))
except Exception as exc:
sys.exit(('An unforeseen error occurred: ' + str(exc)))
if (extension is not None):
ext = extension
res = joinparts(dirpath, bname, ext, raw)
return res | Rename file with a list of functions | batchren/renamer.py | runfilters | matvign/batchrenamer | 0 | python | def runfilters(path, filters, extension=None, raw=False):
' '
(dirpath, bname, ext) = partfile(path, raw)
for runf in filters:
try:
if isinstance(runf, StringSeq.StringSequence):
bname = runf(path, dirpath, bname)
else:
bname = runf(bname)
except re.error as re_err:
sys.exit(('A regex error occurred: ' + str(re_err)))
except OSError as os_err:
sys.exit(('A filesystem error occurred: ' + str(os_err)))
except Exception as exc:
sys.exit(('An unforeseen error occurred: ' + str(exc)))
if (extension is not None):
ext = extension
res = joinparts(dirpath, bname, ext, raw)
return res | def runfilters(path, filters, extension=None, raw=False):
' '
(dirpath, bname, ext) = partfile(path, raw)
for runf in filters:
try:
if isinstance(runf, StringSeq.StringSequence):
bname = runf(path, dirpath, bname)
else:
bname = runf(bname)
except re.error as re_err:
sys.exit(('A regex error occurred: ' + str(re_err)))
except OSError as os_err:
sys.exit(('A filesystem error occurred: ' + str(os_err)))
except Exception as exc:
sys.exit(('An unforeseen error occurred: ' + str(exc)))
if (extension is not None):
ext = extension
res = joinparts(dirpath, bname, ext, raw)
return res<|docstring|>Rename file with a list of functions<|endoftext|> |
db1eda78be65ec423f0c13c4f8e7ffc770ef1957bdbff0ec5abedbb93681cd7f | def generate_rentable(src_files, dest_files):
'Generate a table of files that can and cannot be renamed '
if (len(src_files) != len(dest_files)):
raise ValueError('src list and dest list must have the same length')
fileset = set(src_files)
rentable = {'renames': {}, 'conflicts': {}, 'unresolvable': set()}
for (src, dest) in zip(src_files, dest_files):
errset = set()
if (dest in rentable['conflicts']):
rentable['conflicts'][dest]['srcs'].append(src)
rentable['conflicts'][dest]['err'].add(6)
errset = rentable['conflicts'][dest]['err']
cascade(rentable, src)
elif (dest in rentable['renames']):
if (dest == src):
errset.add(0)
errset.add(6)
tmp = rentable['renames'][dest]
del rentable['renames'][dest]
rentable['conflicts'][dest] = {'srcs': [tmp, src], 'err': errset}
for n in rentable['conflicts'][dest]['srcs']:
cascade(rentable, n)
elif (dest in rentable['unresolvable']):
errset.add(6)
rentable['conflicts'][dest] = {'srcs': [src], 'err': errset}
cascade(rentable, src)
else:
(src_dir, _) = os.path.split(src)
(dest_dir, dest_bname) = os.path.split(dest)
if ((dest not in fileset) and os.path.exists(dest)):
errset.add(6)
if (dest == src):
errset.add(0)
if (src_dir != dest_dir):
if (dest and (dest[(- 1)] == '/')):
errset.add(4)
else:
errset.add(5)
if (dest_bname == ''):
errset.add(1)
elif (dest_bname[0] == '.'):
errset.add(2)
if (len(dest_bname) > 255):
errset.add(3)
if errset:
rentable['conflicts'][dest] = {'srcs': [src], 'err': errset}
cascade(rentable, src)
if (not errset):
rentable['renames'][dest] = src
return rentable | Generate a table of files that can and cannot be renamed | batchren/renamer.py | generate_rentable | matvign/batchrenamer | 0 | python | def generate_rentable(src_files, dest_files):
' '
if (len(src_files) != len(dest_files)):
raise ValueError('src list and dest list must have the same length')
fileset = set(src_files)
rentable = {'renames': {}, 'conflicts': {}, 'unresolvable': set()}
for (src, dest) in zip(src_files, dest_files):
errset = set()
if (dest in rentable['conflicts']):
rentable['conflicts'][dest]['srcs'].append(src)
rentable['conflicts'][dest]['err'].add(6)
errset = rentable['conflicts'][dest]['err']
cascade(rentable, src)
elif (dest in rentable['renames']):
if (dest == src):
errset.add(0)
errset.add(6)
tmp = rentable['renames'][dest]
del rentable['renames'][dest]
rentable['conflicts'][dest] = {'srcs': [tmp, src], 'err': errset}
for n in rentable['conflicts'][dest]['srcs']:
cascade(rentable, n)
elif (dest in rentable['unresolvable']):
errset.add(6)
rentable['conflicts'][dest] = {'srcs': [src], 'err': errset}
cascade(rentable, src)
else:
(src_dir, _) = os.path.split(src)
(dest_dir, dest_bname) = os.path.split(dest)
if ((dest not in fileset) and os.path.exists(dest)):
errset.add(6)
if (dest == src):
errset.add(0)
if (src_dir != dest_dir):
if (dest and (dest[(- 1)] == '/')):
errset.add(4)
else:
errset.add(5)
if (dest_bname == ):
errset.add(1)
elif (dest_bname[0] == '.'):
errset.add(2)
if (len(dest_bname) > 255):
errset.add(3)
if errset:
rentable['conflicts'][dest] = {'srcs': [src], 'err': errset}
cascade(rentable, src)
if (not errset):
rentable['renames'][dest] = src
return rentable | def generate_rentable(src_files, dest_files):
' '
if (len(src_files) != len(dest_files)):
raise ValueError('src list and dest list must have the same length')
fileset = set(src_files)
rentable = {'renames': {}, 'conflicts': {}, 'unresolvable': set()}
for (src, dest) in zip(src_files, dest_files):
errset = set()
if (dest in rentable['conflicts']):
rentable['conflicts'][dest]['srcs'].append(src)
rentable['conflicts'][dest]['err'].add(6)
errset = rentable['conflicts'][dest]['err']
cascade(rentable, src)
elif (dest in rentable['renames']):
if (dest == src):
errset.add(0)
errset.add(6)
tmp = rentable['renames'][dest]
del rentable['renames'][dest]
rentable['conflicts'][dest] = {'srcs': [tmp, src], 'err': errset}
for n in rentable['conflicts'][dest]['srcs']:
cascade(rentable, n)
elif (dest in rentable['unresolvable']):
errset.add(6)
rentable['conflicts'][dest] = {'srcs': [src], 'err': errset}
cascade(rentable, src)
else:
(src_dir, _) = os.path.split(src)
(dest_dir, dest_bname) = os.path.split(dest)
if ((dest not in fileset) and os.path.exists(dest)):
errset.add(6)
if (dest == src):
errset.add(0)
if (src_dir != dest_dir):
if (dest and (dest[(- 1)] == '/')):
errset.add(4)
else:
errset.add(5)
if (dest_bname == ):
errset.add(1)
elif (dest_bname[0] == '.'):
errset.add(2)
if (len(dest_bname) > 255):
errset.add(3)
if errset:
rentable['conflicts'][dest] = {'srcs': [src], 'err': errset}
cascade(rentable, src)
if (not errset):
rentable['renames'][dest] = src
return rentable<|docstring|>Generate a table of files that can and cannot be renamed<|endoftext|> |
250e29b36cda3d72ac88caac0be7e8e77afcce1eeeaf646769acc468541bd3a8 | def cascade(rentable, target):
'Search through rename table and cascade file errors.\n\n Mark src as unresolvable and cascade anything else\n that wants to rename to src.\n '
ndest = target
while True:
rentable['unresolvable'].add(ndest)
if (ndest in rentable['renames']):
tmp = rentable['renames'][ndest]
del rentable['renames'][ndest]
rentable['conflicts'][ndest] = {'srcs': [tmp], 'err': {6}}
ndest = tmp
continue
return | Search through rename table and cascade file errors.
Mark src as unresolvable and cascade anything else
that wants to rename to src. | batchren/renamer.py | cascade | matvign/batchrenamer | 0 | python | def cascade(rentable, target):
'Search through rename table and cascade file errors.\n\n Mark src as unresolvable and cascade anything else\n that wants to rename to src.\n '
ndest = target
while True:
rentable['unresolvable'].add(ndest)
if (ndest in rentable['renames']):
tmp = rentable['renames'][ndest]
del rentable['renames'][ndest]
rentable['conflicts'][ndest] = {'srcs': [tmp], 'err': {6}}
ndest = tmp
continue
return | def cascade(rentable, target):
'Search through rename table and cascade file errors.\n\n Mark src as unresolvable and cascade anything else\n that wants to rename to src.\n '
ndest = target
while True:
rentable['unresolvable'].add(ndest)
if (ndest in rentable['renames']):
tmp = rentable['renames'][ndest]
del rentable['renames'][ndest]
rentable['conflicts'][ndest] = {'srcs': [tmp], 'err': {6}}
ndest = tmp
continue
return<|docstring|>Search through rename table and cascade file errors.
Mark src as unresolvable and cascade anything else
that wants to rename to src.<|endoftext|> |
dc8cad46b694e9461e514284fea64c930963b193a0f886afe4a42b29b8999c18 | def print_rentable(rentable, quiet=False, verbose=False):
"Print contents of table.\n\n - quiet: don't show errors\n - verbose: show detailed errors\n - verbose and no errors: show message\n - not verbose and no errors: show nothing\n - not verbose and errors: show unrenamable files\n\n Always show output for renames\n "
ren = rentable['renames']
conf = rentable['conflicts']
unres = rentable['unresolvable']
if quiet:
pass
elif verbose:
print('{:-^30}'.format(((helper.BOLD + 'issues/conflicts') + helper.END)))
if unres:
print('the following files have conflicts:')
conflicts = natsorted(conf.items(), (lambda x: x[0].replace('.', '~')), alg=ns.PATH)
for (dest, obj) in conflicts:
srcOut = natsorted(obj['srcs'], alg=ns.PATH)
print(', '.join([repr(str(e)) for e in srcOut]))
print("--> '{}'\nerror(s): ".format(dest), end='')
print(', '.join([issues[e] for e in obj['err']]), '\n')
else:
print('no conflicts found', '\n')
elif unres:
print('{:-^30}'.format(((helper.BOLD + 'issues/conflicts') + helper.END)))
print('the following files will NOT be renamed:')
print(*["'{}'".format(s) for s in natsorted(unres, alg=ns.PATH)], '', sep='\n')
print('{:-^30}'.format(((helper.BOLD + 'rename') + helper.END)))
renames = natsorted(ren.items(), key=(lambda x: x[1]), alg=ns.PATH)
if renames:
print('the following files can be renamed:')
for (dest, src) in renames:
print("'{}' rename to '{}'".format(src, dest))
else:
print('no files to rename')
print()
return [(r[1], r[0]) for r in renames] | Print contents of table.
- quiet: don't show errors
- verbose: show detailed errors
- verbose and no errors: show message
- not verbose and no errors: show nothing
- not verbose and errors: show unrenamable files
Always show output for renames | batchren/renamer.py | print_rentable | matvign/batchrenamer | 0 | python | def print_rentable(rentable, quiet=False, verbose=False):
"Print contents of table.\n\n - quiet: don't show errors\n - verbose: show detailed errors\n - verbose and no errors: show message\n - not verbose and no errors: show nothing\n - not verbose and errors: show unrenamable files\n\n Always show output for renames\n "
ren = rentable['renames']
conf = rentable['conflicts']
unres = rentable['unresolvable']
if quiet:
pass
elif verbose:
print('{:-^30}'.format(((helper.BOLD + 'issues/conflicts') + helper.END)))
if unres:
print('the following files have conflicts:')
conflicts = natsorted(conf.items(), (lambda x: x[0].replace('.', '~')), alg=ns.PATH)
for (dest, obj) in conflicts:
srcOut = natsorted(obj['srcs'], alg=ns.PATH)
print(', '.join([repr(str(e)) for e in srcOut]))
print("--> '{}'\nerror(s): ".format(dest), end=)
print(', '.join([issues[e] for e in obj['err']]), '\n')
else:
print('no conflicts found', '\n')
elif unres:
print('{:-^30}'.format(((helper.BOLD + 'issues/conflicts') + helper.END)))
print('the following files will NOT be renamed:')
print(*["'{}'".format(s) for s in natsorted(unres, alg=ns.PATH)], , sep='\n')
print('{:-^30}'.format(((helper.BOLD + 'rename') + helper.END)))
renames = natsorted(ren.items(), key=(lambda x: x[1]), alg=ns.PATH)
if renames:
print('the following files can be renamed:')
for (dest, src) in renames:
print("'{}' rename to '{}'".format(src, dest))
else:
print('no files to rename')
print()
return [(r[1], r[0]) for r in renames] | def print_rentable(rentable, quiet=False, verbose=False):
"Print contents of table.\n\n - quiet: don't show errors\n - verbose: show detailed errors\n - verbose and no errors: show message\n - not verbose and no errors: show nothing\n - not verbose and errors: show unrenamable files\n\n Always show output for renames\n "
ren = rentable['renames']
conf = rentable['conflicts']
unres = rentable['unresolvable']
if quiet:
pass
elif verbose:
print('{:-^30}'.format(((helper.BOLD + 'issues/conflicts') + helper.END)))
if unres:
print('the following files have conflicts:')
conflicts = natsorted(conf.items(), (lambda x: x[0].replace('.', '~')), alg=ns.PATH)
for (dest, obj) in conflicts:
srcOut = natsorted(obj['srcs'], alg=ns.PATH)
print(', '.join([repr(str(e)) for e in srcOut]))
print("--> '{}'\nerror(s): ".format(dest), end=)
print(', '.join([issues[e] for e in obj['err']]), '\n')
else:
print('no conflicts found', '\n')
elif unres:
print('{:-^30}'.format(((helper.BOLD + 'issues/conflicts') + helper.END)))
print('the following files will NOT be renamed:')
print(*["'{}'".format(s) for s in natsorted(unres, alg=ns.PATH)], , sep='\n')
print('{:-^30}'.format(((helper.BOLD + 'rename') + helper.END)))
renames = natsorted(ren.items(), key=(lambda x: x[1]), alg=ns.PATH)
if renames:
print('the following files can be renamed:')
for (dest, src) in renames:
print("'{}' rename to '{}'".format(src, dest))
else:
print('no files to rename')
print()
return [(r[1], r[0]) for r in renames]<|docstring|>Print contents of table.
- quiet: don't show errors
- verbose: show detailed errors
- verbose and no errors: show message
- not verbose and no errors: show nothing
- not verbose and errors: show unrenamable files
Always show output for renames<|endoftext|> |
e61e57ed11805344c1b4b4421a0c04207c8e2ee842113cbf83159d86ce0fd255 | def rename_queue(queue, dryrun=False, verbose=False):
'Rename src to dest from a list of tuples [(src, dest), ...] '
q = deque(queue)
rollback_queue = []
n = name_gen()
next(n)
if dryrun:
print('Running with dryrun, files will NOT be renamed.')
try:
while q:
(src, dest) = q.popleft()
if os.path.exists(dest):
(dirpath, _) = os.path.split(dest)
tmp = n.send(dirpath)
if (verbose or dryrun):
print("Conflict found, temporarily renaming '{}' to '{}'.".format(src, tmp))
if (not dryrun):
rename_file(src, tmp)
rollback_queue.append((tmp, src))
q.append((tmp, dest))
else:
if (verbose or dryrun):
print("rename '{}' to '{}'.".format(src, dest))
if (not dryrun):
rename_file(src, dest)
rollback_queue.append((dest, src))
except Exception:
if dryrun:
sys.exit('An error occurred but no files were renamed as the dryrun option is enabled.')
elif (not rollback_queue):
sys.exit('No files were renamed due to an error.')
else:
rollback(rollback_queue)
print('Finished renaming...') | Rename src to dest from a list of tuples [(src, dest), ...] | batchren/renamer.py | rename_queue | matvign/batchrenamer | 0 | python | def rename_queue(queue, dryrun=False, verbose=False):
' '
q = deque(queue)
rollback_queue = []
n = name_gen()
next(n)
if dryrun:
print('Running with dryrun, files will NOT be renamed.')
try:
while q:
(src, dest) = q.popleft()
if os.path.exists(dest):
(dirpath, _) = os.path.split(dest)
tmp = n.send(dirpath)
if (verbose or dryrun):
print("Conflict found, temporarily renaming '{}' to '{}'.".format(src, tmp))
if (not dryrun):
rename_file(src, tmp)
rollback_queue.append((tmp, src))
q.append((tmp, dest))
else:
if (verbose or dryrun):
print("rename '{}' to '{}'.".format(src, dest))
if (not dryrun):
rename_file(src, dest)
rollback_queue.append((dest, src))
except Exception:
if dryrun:
sys.exit('An error occurred but no files were renamed as the dryrun option is enabled.')
elif (not rollback_queue):
sys.exit('No files were renamed due to an error.')
else:
rollback(rollback_queue)
print('Finished renaming...') | def rename_queue(queue, dryrun=False, verbose=False):
' '
q = deque(queue)
rollback_queue = []
n = name_gen()
next(n)
if dryrun:
print('Running with dryrun, files will NOT be renamed.')
try:
while q:
(src, dest) = q.popleft()
if os.path.exists(dest):
(dirpath, _) = os.path.split(dest)
tmp = n.send(dirpath)
if (verbose or dryrun):
print("Conflict found, temporarily renaming '{}' to '{}'.".format(src, tmp))
if (not dryrun):
rename_file(src, tmp)
rollback_queue.append((tmp, src))
q.append((tmp, dest))
else:
if (verbose or dryrun):
print("rename '{}' to '{}'.".format(src, dest))
if (not dryrun):
rename_file(src, dest)
rollback_queue.append((dest, src))
except Exception:
if dryrun:
sys.exit('An error occurred but no files were renamed as the dryrun option is enabled.')
elif (not rollback_queue):
sys.exit('No files were renamed due to an error.')
else:
rollback(rollback_queue)
print('Finished renaming...')<|docstring|>Rename src to dest from a list of tuples [(src, dest), ...]<|endoftext|> |
4e9822a6e16ae8e95d07c917a7cf793829770ce60ee69f4afd8a230e230027b0 | def replacer(matchobj):
'Function to be used with re.sub\n\n Replace string match with repl if count = count\n\n Otherwise return the string match\n\n '
if (matchobj.group() and (replacer._count == count)):
res = repl
else:
res = matchobj.group()
replacer._count += 1
return res | Function to be used with re.sub
Replace string match with repl if count = count
Otherwise return the string match | batchren/renamer.py | replacer | matvign/batchrenamer | 0 | python | def replacer(matchobj):
'Function to be used with re.sub\n\n Replace string match with repl if count = count\n\n Otherwise return the string match\n\n '
if (matchobj.group() and (replacer._count == count)):
res = repl
else:
res = matchobj.group()
replacer._count += 1
return res | def replacer(matchobj):
'Function to be used with re.sub\n\n Replace string match with repl if count = count\n\n Otherwise return the string match\n\n '
if (matchobj.group() and (replacer._count == count)):
res = repl
else:
res = matchobj.group()
replacer._count += 1
return res<|docstring|>Function to be used with re.sub
Replace string match with repl if count = count
Otherwise return the string match<|endoftext|> |
eb4a422881349dc2ea5b87ec34bdb57815e0be9db2d1f4a570c342600a0e06ea | def __init__(self, space=None, pass_attr=None, postproc=None, **kwargs):
'\n Parameters\n ----------\n space : str, optional\n Name of the \'processing space\'. The actual meaning of this argument\n heavily depends on the sub-class implementation. In general, this is\n a trigger that tells the node to compute and store information about\n the input data that is "interesting" in the context of the\n corresponding processing in the output dataset.\n pass_attr : str, list of str|tuple, optional\n Additional attributes to pass on to an output dataset. Attributes can\n be taken from all three attribute collections of an input dataset\n (sa, fa, a -- see :meth:`Dataset.get_attr`), or from the collection\n of conditional attributes (ca) of a node instance. Corresponding\n collection name prefixes should be used to identify attributes, e.g.\n \'ca.null_prob\' for the conditional attribute \'null_prob\', or\n \'fa.stats\' for the feature attribute stats. In addition to a plain\n attribute identifier it is possible to use a tuple to trigger more\n complex operations. The first tuple element is the attribute\n identifier, as described before. The second element is the name of the\n target attribute collection (sa, fa, or a). The third element is the\n axis number of a multidimensional array that shall be swapped with the\n current first axis. The fourth element is a new name that shall be\n used for an attribute in the output dataset.\n Example: (\'ca.null_prob\', \'fa\', 1, \'pvalues\') will take the\n conditional attribute \'null_prob\' and store it as a feature attribute\n \'pvalues\', while swapping the first and second axes. Simplified\n instructions can be given by leaving out consecutive tuple elements\n starting from the end.\n postproc : Node instance, optional\n Node to perform post-processing of results. This node is applied\n in `__call__()` to perform a final processing step on the to be\n result dataset. If None, nothing is done.\n '
ClassWithCollections.__init__(self, **kwargs)
if __debug__:
debug('NO', "Init node '%s' (space: '%s', postproc: '%s')", (self.__class__.__name__, space, str(postproc)))
self.set_space(space)
self.set_postproc(postproc)
if isinstance(pass_attr, basestring):
pass_attr = (pass_attr,)
self.__pass_attr = pass_attr | Parameters
----------
space : str, optional
Name of the 'processing space'. The actual meaning of this argument
heavily depends on the sub-class implementation. In general, this is
a trigger that tells the node to compute and store information about
the input data that is "interesting" in the context of the
corresponding processing in the output dataset.
pass_attr : str, list of str|tuple, optional
Additional attributes to pass on to an output dataset. Attributes can
be taken from all three attribute collections of an input dataset
(sa, fa, a -- see :meth:`Dataset.get_attr`), or from the collection
of conditional attributes (ca) of a node instance. Corresponding
collection name prefixes should be used to identify attributes, e.g.
'ca.null_prob' for the conditional attribute 'null_prob', or
'fa.stats' for the feature attribute stats. In addition to a plain
attribute identifier it is possible to use a tuple to trigger more
complex operations. The first tuple element is the attribute
identifier, as described before. The second element is the name of the
target attribute collection (sa, fa, or a). The third element is the
axis number of a multidimensional array that shall be swapped with the
current first axis. The fourth element is a new name that shall be
used for an attribute in the output dataset.
Example: ('ca.null_prob', 'fa', 1, 'pvalues') will take the
conditional attribute 'null_prob' and store it as a feature attribute
'pvalues', while swapping the first and second axes. Simplified
instructions can be given by leaving out consecutive tuple elements
starting from the end.
postproc : Node instance, optional
Node to perform post-processing of results. This node is applied
in `__call__()` to perform a final processing step on the to be
result dataset. If None, nothing is done. | mvpa2/base/node.py | __init__ | mikiec84/PyMVPA | 227 | python | def __init__(self, space=None, pass_attr=None, postproc=None, **kwargs):
'\n Parameters\n ----------\n space : str, optional\n Name of the \'processing space\'. The actual meaning of this argument\n heavily depends on the sub-class implementation. In general, this is\n a trigger that tells the node to compute and store information about\n the input data that is "interesting" in the context of the\n corresponding processing in the output dataset.\n pass_attr : str, list of str|tuple, optional\n Additional attributes to pass on to an output dataset. Attributes can\n be taken from all three attribute collections of an input dataset\n (sa, fa, a -- see :meth:`Dataset.get_attr`), or from the collection\n of conditional attributes (ca) of a node instance. Corresponding\n collection name prefixes should be used to identify attributes, e.g.\n \'ca.null_prob\' for the conditional attribute \'null_prob\', or\n \'fa.stats\' for the feature attribute stats. In addition to a plain\n attribute identifier it is possible to use a tuple to trigger more\n complex operations. The first tuple element is the attribute\n identifier, as described before. The second element is the name of the\n target attribute collection (sa, fa, or a). The third element is the\n axis number of a multidimensional array that shall be swapped with the\n current first axis. The fourth element is a new name that shall be\n used for an attribute in the output dataset.\n Example: (\'ca.null_prob\', \'fa\', 1, \'pvalues\') will take the\n conditional attribute \'null_prob\' and store it as a feature attribute\n \'pvalues\', while swapping the first and second axes. Simplified\n instructions can be given by leaving out consecutive tuple elements\n starting from the end.\n postproc : Node instance, optional\n Node to perform post-processing of results. This node is applied\n in `__call__()` to perform a final processing step on the to be\n result dataset. If None, nothing is done.\n '
ClassWithCollections.__init__(self, **kwargs)
if __debug__:
debug('NO', "Init node '%s' (space: '%s', postproc: '%s')", (self.__class__.__name__, space, str(postproc)))
self.set_space(space)
self.set_postproc(postproc)
if isinstance(pass_attr, basestring):
pass_attr = (pass_attr,)
self.__pass_attr = pass_attr | def __init__(self, space=None, pass_attr=None, postproc=None, **kwargs):
'\n Parameters\n ----------\n space : str, optional\n Name of the \'processing space\'. The actual meaning of this argument\n heavily depends on the sub-class implementation. In general, this is\n a trigger that tells the node to compute and store information about\n the input data that is "interesting" in the context of the\n corresponding processing in the output dataset.\n pass_attr : str, list of str|tuple, optional\n Additional attributes to pass on to an output dataset. Attributes can\n be taken from all three attribute collections of an input dataset\n (sa, fa, a -- see :meth:`Dataset.get_attr`), or from the collection\n of conditional attributes (ca) of a node instance. Corresponding\n collection name prefixes should be used to identify attributes, e.g.\n \'ca.null_prob\' for the conditional attribute \'null_prob\', or\n \'fa.stats\' for the feature attribute stats. In addition to a plain\n attribute identifier it is possible to use a tuple to trigger more\n complex operations. The first tuple element is the attribute\n identifier, as described before. The second element is the name of the\n target attribute collection (sa, fa, or a). The third element is the\n axis number of a multidimensional array that shall be swapped with the\n current first axis. The fourth element is a new name that shall be\n used for an attribute in the output dataset.\n Example: (\'ca.null_prob\', \'fa\', 1, \'pvalues\') will take the\n conditional attribute \'null_prob\' and store it as a feature attribute\n \'pvalues\', while swapping the first and second axes. Simplified\n instructions can be given by leaving out consecutive tuple elements\n starting from the end.\n postproc : Node instance, optional\n Node to perform post-processing of results. This node is applied\n in `__call__()` to perform a final processing step on the to be\n result dataset. If None, nothing is done.\n '
ClassWithCollections.__init__(self, **kwargs)
if __debug__:
debug('NO', "Init node '%s' (space: '%s', postproc: '%s')", (self.__class__.__name__, space, str(postproc)))
self.set_space(space)
self.set_postproc(postproc)
if isinstance(pass_attr, basestring):
pass_attr = (pass_attr,)
self.__pass_attr = pass_attr<|docstring|>Parameters
----------
space : str, optional
Name of the 'processing space'. The actual meaning of this argument
heavily depends on the sub-class implementation. In general, this is
a trigger that tells the node to compute and store information about
the input data that is "interesting" in the context of the
corresponding processing in the output dataset.
pass_attr : str, list of str|tuple, optional
Additional attributes to pass on to an output dataset. Attributes can
be taken from all three attribute collections of an input dataset
(sa, fa, a -- see :meth:`Dataset.get_attr`), or from the collection
of conditional attributes (ca) of a node instance. Corresponding
collection name prefixes should be used to identify attributes, e.g.
'ca.null_prob' for the conditional attribute 'null_prob', or
'fa.stats' for the feature attribute stats. In addition to a plain
attribute identifier it is possible to use a tuple to trigger more
complex operations. The first tuple element is the attribute
identifier, as described before. The second element is the name of the
target attribute collection (sa, fa, or a). The third element is the
axis number of a multidimensional array that shall be swapped with the
current first axis. The fourth element is a new name that shall be
used for an attribute in the output dataset.
Example: ('ca.null_prob', 'fa', 1, 'pvalues') will take the
conditional attribute 'null_prob' and store it as a feature attribute
'pvalues', while swapping the first and second axes. Simplified
instructions can be given by leaving out consecutive tuple elements
starting from the end.
postproc : Node instance, optional
Node to perform post-processing of results. This node is applied
in `__call__()` to perform a final processing step on the to be
result dataset. If None, nothing is done.<|endoftext|> |
ae23c794ad2b20d738390efc6f2013284587ee29ea4620a9662a452077d7ebd3 | def _get_call_kwargs(self, ds):
'Helper to provide _call kwargs, to be overriden in sub-classes\n\n To be used if the same state variables should be set/used by\n .generate or direct __call__\n '
return {} | Helper to provide _call kwargs, to be overriden in sub-classes
To be used if the same state variables should be set/used by
.generate or direct __call__ | mvpa2/base/node.py | _get_call_kwargs | mikiec84/PyMVPA | 227 | python | def _get_call_kwargs(self, ds):
'Helper to provide _call kwargs, to be overriden in sub-classes\n\n To be used if the same state variables should be set/used by\n .generate or direct __call__\n '
return {} | def _get_call_kwargs(self, ds):
'Helper to provide _call kwargs, to be overriden in sub-classes\n\n To be used if the same state variables should be set/used by\n .generate or direct __call__\n '
return {}<|docstring|>Helper to provide _call kwargs, to be overriden in sub-classes
To be used if the same state variables should be set/used by
.generate or direct __call__<|endoftext|> |
75286a6cbf3bcb395a70be946b982a4a962a777f03d86a08b68134d1ffd4727d | def __call__(self, ds, _call_kwargs={}):
'\n The default implementation calls ``_precall()``, ``_call()``, and\n finally returns the output of ``_postcall()``.\n\n Parameters\n ----------\n ds: Dataset\n Input dataset.\n _call_kwargs: dict, optional\n Used internally to pass "state" keyword arguments into _call,\n primarily used internally (e.g. by `generate` method). It is up\n for a subclass to implement/use it where necessary. `_get_call_kwargs()`\n method will be used to provide the set of kwargs to be set/used by\n `generate` or direct `__call__` calls\n\n Returns\n -------\n Dataset\n '
t0 = time.time()
self._precall(ds)
result = self._call(ds, **(_call_kwargs or self._get_call_kwargs(ds)))
result = self._postcall(ds, result)
self.ca.calling_time = (time.time() - t0)
return result | The default implementation calls ``_precall()``, ``_call()``, and
finally returns the output of ``_postcall()``.
Parameters
----------
ds: Dataset
Input dataset.
_call_kwargs: dict, optional
Used internally to pass "state" keyword arguments into _call,
primarily used internally (e.g. by `generate` method). It is up
for a subclass to implement/use it where necessary. `_get_call_kwargs()`
method will be used to provide the set of kwargs to be set/used by
`generate` or direct `__call__` calls
Returns
-------
Dataset | mvpa2/base/node.py | __call__ | mikiec84/PyMVPA | 227 | python | def __call__(self, ds, _call_kwargs={}):
'\n The default implementation calls ``_precall()``, ``_call()``, and\n finally returns the output of ``_postcall()``.\n\n Parameters\n ----------\n ds: Dataset\n Input dataset.\n _call_kwargs: dict, optional\n Used internally to pass "state" keyword arguments into _call,\n primarily used internally (e.g. by `generate` method). It is up\n for a subclass to implement/use it where necessary. `_get_call_kwargs()`\n method will be used to provide the set of kwargs to be set/used by\n `generate` or direct `__call__` calls\n\n Returns\n -------\n Dataset\n '
t0 = time.time()
self._precall(ds)
result = self._call(ds, **(_call_kwargs or self._get_call_kwargs(ds)))
result = self._postcall(ds, result)
self.ca.calling_time = (time.time() - t0)
return result | def __call__(self, ds, _call_kwargs={}):
'\n The default implementation calls ``_precall()``, ``_call()``, and\n finally returns the output of ``_postcall()``.\n\n Parameters\n ----------\n ds: Dataset\n Input dataset.\n _call_kwargs: dict, optional\n Used internally to pass "state" keyword arguments into _call,\n primarily used internally (e.g. by `generate` method). It is up\n for a subclass to implement/use it where necessary. `_get_call_kwargs()`\n method will be used to provide the set of kwargs to be set/used by\n `generate` or direct `__call__` calls\n\n Returns\n -------\n Dataset\n '
t0 = time.time()
self._precall(ds)
result = self._call(ds, **(_call_kwargs or self._get_call_kwargs(ds)))
result = self._postcall(ds, result)
self.ca.calling_time = (time.time() - t0)
return result<|docstring|>The default implementation calls ``_precall()``, ``_call()``, and
finally returns the output of ``_postcall()``.
Parameters
----------
ds: Dataset
Input dataset.
_call_kwargs: dict, optional
Used internally to pass "state" keyword arguments into _call,
primarily used internally (e.g. by `generate` method). It is up
for a subclass to implement/use it where necessary. `_get_call_kwargs()`
method will be used to provide the set of kwargs to be set/used by
`generate` or direct `__call__` calls
Returns
-------
Dataset<|endoftext|> |
8ecaf24ab4df985ec23087be275eb30d2102cb3db19f6be2c9d8c252b6fedd1c | def _precall(self, ds):
'Preprocessing of data\n\n By default, does nothing.\n\n Parameters\n ----------\n ds: Dataset\n Original input dataset.\n\n Returns\n -------\n Dataset\n '
return ds | Preprocessing of data
By default, does nothing.
Parameters
----------
ds: Dataset
Original input dataset.
Returns
-------
Dataset | mvpa2/base/node.py | _precall | mikiec84/PyMVPA | 227 | python | def _precall(self, ds):
'Preprocessing of data\n\n By default, does nothing.\n\n Parameters\n ----------\n ds: Dataset\n Original input dataset.\n\n Returns\n -------\n Dataset\n '
return ds | def _precall(self, ds):
'Preprocessing of data\n\n By default, does nothing.\n\n Parameters\n ----------\n ds: Dataset\n Original input dataset.\n\n Returns\n -------\n Dataset\n '
return ds<|docstring|>Preprocessing of data
By default, does nothing.
Parameters
----------
ds: Dataset
Original input dataset.
Returns
-------
Dataset<|endoftext|> |
1bf0dde1445b54198d786f6d13cb1b702b7373dd51e9d2ed4178b0185ffc8359 | def _postcall(self, ds, result):
'Postprocessing of results.\n\n By default, does nothing.\n\n Parameters\n ----------\n ds: Dataset\n Original input dataset.\n result: Dataset\n Preliminary result dataset (as produced by ``_call()``).\n\n Returns\n -------\n Dataset\n '
result = self._pass_attr(ds, result)
result = self._apply_postproc(ds, result)
return result | Postprocessing of results.
By default, does nothing.
Parameters
----------
ds: Dataset
Original input dataset.
result: Dataset
Preliminary result dataset (as produced by ``_call()``).
Returns
-------
Dataset | mvpa2/base/node.py | _postcall | mikiec84/PyMVPA | 227 | python | def _postcall(self, ds, result):
'Postprocessing of results.\n\n By default, does nothing.\n\n Parameters\n ----------\n ds: Dataset\n Original input dataset.\n result: Dataset\n Preliminary result dataset (as produced by ``_call()``).\n\n Returns\n -------\n Dataset\n '
result = self._pass_attr(ds, result)
result = self._apply_postproc(ds, result)
return result | def _postcall(self, ds, result):
'Postprocessing of results.\n\n By default, does nothing.\n\n Parameters\n ----------\n ds: Dataset\n Original input dataset.\n result: Dataset\n Preliminary result dataset (as produced by ``_call()``).\n\n Returns\n -------\n Dataset\n '
result = self._pass_attr(ds, result)
result = self._apply_postproc(ds, result)
return result<|docstring|>Postprocessing of results.
By default, does nothing.
Parameters
----------
ds: Dataset
Original input dataset.
result: Dataset
Preliminary result dataset (as produced by ``_call()``).
Returns
-------
Dataset<|endoftext|> |
3a2188c5e4004c7d4f9f69cb2a1d7ca78bcef5e81b696a4837df0f854c097b61 | def _pass_attr(self, ds, result):
'Pass a configured set of attributes on to the output dataset'
pass_attr = self.__pass_attr
if (pass_attr is not None):
ca = self.ca
ca_keys = self.ca.keys()
for a in pass_attr:
maxis = 0
rcol = None
attr_newname = None
if isinstance(a, tuple):
if (len(a) > 1):
colswitch = {'sa': result.sa, 'fa': result.fa, 'a': result.a}
rcol = colswitch[a[1]]
if (len(a) > 2):
maxis = a[2]
if (len(a) > 3):
attr_newname = a[3]
a = a[0]
if a.startswith('ca.'):
a = a[3:]
if (a in ca_keys):
if (rcol is None):
rcol = result.sa
attr = ca[a]
else:
(attr, col) = ds.get_attr(a)
if (rcol is None):
col_class = col.__class__
if (col_class is SampleAttributesCollection):
rcol = result.sa
elif (col_class is FeatureAttributesCollection):
rcol = result.fa
elif (col_class is DatasetAttributesCollection):
rcol = result.a
else:
raise ValueError(('Cannot determine origin of %s collection' % col))
if (attr_newname is None):
attr_newname = attr.name
if (maxis == 0):
value = attr.value
else:
value = np.swapaxes(attr.value, 0, maxis)
rcol[attr_newname] = value
return result | Pass a configured set of attributes on to the output dataset | mvpa2/base/node.py | _pass_attr | mikiec84/PyMVPA | 227 | python | def _pass_attr(self, ds, result):
pass_attr = self.__pass_attr
if (pass_attr is not None):
ca = self.ca
ca_keys = self.ca.keys()
for a in pass_attr:
maxis = 0
rcol = None
attr_newname = None
if isinstance(a, tuple):
if (len(a) > 1):
colswitch = {'sa': result.sa, 'fa': result.fa, 'a': result.a}
rcol = colswitch[a[1]]
if (len(a) > 2):
maxis = a[2]
if (len(a) > 3):
attr_newname = a[3]
a = a[0]
if a.startswith('ca.'):
a = a[3:]
if (a in ca_keys):
if (rcol is None):
rcol = result.sa
attr = ca[a]
else:
(attr, col) = ds.get_attr(a)
if (rcol is None):
col_class = col.__class__
if (col_class is SampleAttributesCollection):
rcol = result.sa
elif (col_class is FeatureAttributesCollection):
rcol = result.fa
elif (col_class is DatasetAttributesCollection):
rcol = result.a
else:
raise ValueError(('Cannot determine origin of %s collection' % col))
if (attr_newname is None):
attr_newname = attr.name
if (maxis == 0):
value = attr.value
else:
value = np.swapaxes(attr.value, 0, maxis)
rcol[attr_newname] = value
return result | def _pass_attr(self, ds, result):
pass_attr = self.__pass_attr
if (pass_attr is not None):
ca = self.ca
ca_keys = self.ca.keys()
for a in pass_attr:
maxis = 0
rcol = None
attr_newname = None
if isinstance(a, tuple):
if (len(a) > 1):
colswitch = {'sa': result.sa, 'fa': result.fa, 'a': result.a}
rcol = colswitch[a[1]]
if (len(a) > 2):
maxis = a[2]
if (len(a) > 3):
attr_newname = a[3]
a = a[0]
if a.startswith('ca.'):
a = a[3:]
if (a in ca_keys):
if (rcol is None):
rcol = result.sa
attr = ca[a]
else:
(attr, col) = ds.get_attr(a)
if (rcol is None):
col_class = col.__class__
if (col_class is SampleAttributesCollection):
rcol = result.sa
elif (col_class is FeatureAttributesCollection):
rcol = result.fa
elif (col_class is DatasetAttributesCollection):
rcol = result.a
else:
raise ValueError(('Cannot determine origin of %s collection' % col))
if (attr_newname is None):
attr_newname = attr.name
if (maxis == 0):
value = attr.value
else:
value = np.swapaxes(attr.value, 0, maxis)
rcol[attr_newname] = value
return result<|docstring|>Pass a configured set of attributes on to the output dataset<|endoftext|> |
7f86d55c1bdc621ad3cb340ef46da91671fc035a66d440639b33f9a620516bc3 | def _apply_postproc(self, ds, result):
'Apply any post-processing to an output dataset'
if (self.__postproc is not None):
if __debug__:
debug('NO', 'Applying post-processing node %s', (self.__postproc,))
self.ca.raw_results = result
result = self.__postproc(result)
return result | Apply any post-processing to an output dataset | mvpa2/base/node.py | _apply_postproc | mikiec84/PyMVPA | 227 | python | def _apply_postproc(self, ds, result):
if (self.__postproc is not None):
if __debug__:
debug('NO', 'Applying post-processing node %s', (self.__postproc,))
self.ca.raw_results = result
result = self.__postproc(result)
return result | def _apply_postproc(self, ds, result):
if (self.__postproc is not None):
if __debug__:
debug('NO', 'Applying post-processing node %s', (self.__postproc,))
self.ca.raw_results = result
result = self.__postproc(result)
return result<|docstring|>Apply any post-processing to an output dataset<|endoftext|> |
baf7bcb33ab515c62a11d4810180739deea04d9ebd349242a4f457492b6e182a | def generate(self, ds):
'Yield processing results.\n\n This methods causes the node to behave like a generator. By default it\n simply yields a single result of its processing -- identical to the\n output of calling the node with a dataset. Subclasses might implement\n generators that yield multiple results.\n\n Parameters\n ----------\n ds: Dataset\n Input dataset\n\n Returns\n -------\n generator\n the generator yields the result of the processing.\n '
(yield self(ds)) | Yield processing results.
This methods causes the node to behave like a generator. By default it
simply yields a single result of its processing -- identical to the
output of calling the node with a dataset. Subclasses might implement
generators that yield multiple results.
Parameters
----------
ds: Dataset
Input dataset
Returns
-------
generator
the generator yields the result of the processing. | mvpa2/base/node.py | generate | mikiec84/PyMVPA | 227 | python | def generate(self, ds):
'Yield processing results.\n\n This methods causes the node to behave like a generator. By default it\n simply yields a single result of its processing -- identical to the\n output of calling the node with a dataset. Subclasses might implement\n generators that yield multiple results.\n\n Parameters\n ----------\n ds: Dataset\n Input dataset\n\n Returns\n -------\n generator\n the generator yields the result of the processing.\n '
(yield self(ds)) | def generate(self, ds):
'Yield processing results.\n\n This methods causes the node to behave like a generator. By default it\n simply yields a single result of its processing -- identical to the\n output of calling the node with a dataset. Subclasses might implement\n generators that yield multiple results.\n\n Parameters\n ----------\n ds: Dataset\n Input dataset\n\n Returns\n -------\n generator\n the generator yields the result of the processing.\n '
(yield self(ds))<|docstring|>Yield processing results.
This methods causes the node to behave like a generator. By default it
simply yields a single result of its processing -- identical to the
output of calling the node with a dataset. Subclasses might implement
generators that yield multiple results.
Parameters
----------
ds: Dataset
Input dataset
Returns
-------
generator
the generator yields the result of the processing.<|endoftext|> |
9bbea8d64f1186f5cef308e53eb28134528a61dbbb2b0967ab06a9d7c3b69f2e | def get_space(self):
'Query the processing space name of this node.'
return self.__space | Query the processing space name of this node. | mvpa2/base/node.py | get_space | mikiec84/PyMVPA | 227 | python | def get_space(self):
return self.__space | def get_space(self):
return self.__space<|docstring|>Query the processing space name of this node.<|endoftext|> |
59799c1bd7c051282e286b3a3b4ce3a9a2c51db5341e53e5b6ba7237ce4dd0e1 | def set_space(self, name):
'Set the processing space name of this node.'
self.__space = name | Set the processing space name of this node. | mvpa2/base/node.py | set_space | mikiec84/PyMVPA | 227 | python | def set_space(self, name):
self.__space = name | def set_space(self, name):
self.__space = name<|docstring|>Set the processing space name of this node.<|endoftext|> |
cadff6639b653192f37ffe4e711de802ddc4c3c3615363763e10723e3e02c4de | def get_postproc(self):
'Returns the post-processing node or None.'
return self.__postproc | Returns the post-processing node or None. | mvpa2/base/node.py | get_postproc | mikiec84/PyMVPA | 227 | python | def get_postproc(self):
return self.__postproc | def get_postproc(self):
return self.__postproc<|docstring|>Returns the post-processing node or None.<|endoftext|> |
9aea72fcb71aea6776c20c0ded45222f231b9820a12be7dad36486fd95519402 | def set_postproc(self, node):
'Assigns a post-processing node\n\n Set to `None` to disable postprocessing.\n '
self.__postproc = node | Assigns a post-processing node
Set to `None` to disable postprocessing. | mvpa2/base/node.py | set_postproc | mikiec84/PyMVPA | 227 | python | def set_postproc(self, node):
'Assigns a post-processing node\n\n Set to `None` to disable postprocessing.\n '
self.__postproc = node | def set_postproc(self, node):
'Assigns a post-processing node\n\n Set to `None` to disable postprocessing.\n '
self.__postproc = node<|docstring|>Assigns a post-processing node
Set to `None` to disable postprocessing.<|endoftext|> |
17c281bf6f5e84998c361f101b6f537f64e418e3eaaf95863327759556894a75 | def __init__(self, nodes, **kwargs):
'\n Parameters\n ----------\n nodes: list\n Node instances.\n '
if (not len(nodes)):
raise ValueError(('%s needs at least one embedded node.' % self.__class__.__name__))
self._nodes = nodes
Node.__init__(self, **kwargs) | Parameters
----------
nodes: list
Node instances. | mvpa2/base/node.py | __init__ | mikiec84/PyMVPA | 227 | python | def __init__(self, nodes, **kwargs):
'\n Parameters\n ----------\n nodes: list\n Node instances.\n '
if (not len(nodes)):
raise ValueError(('%s needs at least one embedded node.' % self.__class__.__name__))
self._nodes = nodes
Node.__init__(self, **kwargs) | def __init__(self, nodes, **kwargs):
'\n Parameters\n ----------\n nodes: list\n Node instances.\n '
if (not len(nodes)):
raise ValueError(('%s needs at least one embedded node.' % self.__class__.__name__))
self._nodes = nodes
Node.__init__(self, **kwargs)<|docstring|>Parameters
----------
nodes: list
Node instances.<|endoftext|> |
530d7ab23db6d59a8f1c3b05a885a0471dcd365f654c818941a806f6ccac0a9c | def generate(self, ds, startnode=0):
'\n Parameters\n ----------\n ds: Dataset\n To be processed dataset\n startnode: int\n First node in the chain that shall be considered. This argument is\n mostly useful for internal optimization.\n '
first_node = self[startnode]
if __debug__:
debug('MAP', "%s: input (%s) -> generator (%i/%i): '%s'", (self.__class__.__name__, ds.shape, (startnode + 1), len(self), first_node))
for gds in first_node.generate(ds):
if (startnode == (len(self) - 1)):
(yield gds)
else:
for rgds in self.generate(gds, startnode=(startnode + 1)):
(yield rgds) | Parameters
----------
ds: Dataset
To be processed dataset
startnode: int
First node in the chain that shall be considered. This argument is
mostly useful for internal optimization. | mvpa2/base/node.py | generate | mikiec84/PyMVPA | 227 | python | def generate(self, ds, startnode=0):
'\n Parameters\n ----------\n ds: Dataset\n To be processed dataset\n startnode: int\n First node in the chain that shall be considered. This argument is\n mostly useful for internal optimization.\n '
first_node = self[startnode]
if __debug__:
debug('MAP', "%s: input (%s) -> generator (%i/%i): '%s'", (self.__class__.__name__, ds.shape, (startnode + 1), len(self), first_node))
for gds in first_node.generate(ds):
if (startnode == (len(self) - 1)):
(yield gds)
else:
for rgds in self.generate(gds, startnode=(startnode + 1)):
(yield rgds) | def generate(self, ds, startnode=0):
'\n Parameters\n ----------\n ds: Dataset\n To be processed dataset\n startnode: int\n First node in the chain that shall be considered. This argument is\n mostly useful for internal optimization.\n '
first_node = self[startnode]
if __debug__:
debug('MAP', "%s: input (%s) -> generator (%i/%i): '%s'", (self.__class__.__name__, ds.shape, (startnode + 1), len(self), first_node))
for gds in first_node.generate(ds):
if (startnode == (len(self) - 1)):
(yield gds)
else:
for rgds in self.generate(gds, startnode=(startnode + 1)):
(yield rgds)<|docstring|>Parameters
----------
ds: Dataset
To be processed dataset
startnode: int
First node in the chain that shall be considered. This argument is
mostly useful for internal optimization.<|endoftext|> |
9bec15542e62430a4fc844a337e9455fab6aea7be44a073835b5e5c61c11d494 | def append(self, node):
'Append a node to the chain.'
self._nodes.append(node) | Append a node to the chain. | mvpa2/base/node.py | append | mikiec84/PyMVPA | 227 | python | def append(self, node):
self._nodes.append(node) | def append(self, node):
self._nodes.append(node)<|docstring|>Append a node to the chain.<|endoftext|> |
4d23ccf26dced938862260bb09f55f7a8b3b91e944e0e0b089a509bf8804d545 | def __init__(self, nodes, **kwargs):
'\n Parameters\n ----------\n nodes: list\n Node instances.\n '
CompoundNode.__init__(self, nodes=nodes, **kwargs) | Parameters
----------
nodes: list
Node instances. | mvpa2/base/node.py | __init__ | mikiec84/PyMVPA | 227 | python | def __init__(self, nodes, **kwargs):
'\n Parameters\n ----------\n nodes: list\n Node instances.\n '
CompoundNode.__init__(self, nodes=nodes, **kwargs) | def __init__(self, nodes, **kwargs):
'\n Parameters\n ----------\n nodes: list\n Node instances.\n '
CompoundNode.__init__(self, nodes=nodes, **kwargs)<|docstring|>Parameters
----------
nodes: list
Node instances.<|endoftext|> |
a2bfccd515f1b37541644153e2f74859ae2af4bc747f186546bc17587a4e6ec0 | def __init__(self, nodes, combine_axis, a=None, **kwargs):
"\n Parameters\n ----------\n mappers : list\n combine_axis : ['h', 'v']\n a: {'unique','drop_nonunique','uniques','all'} or True or False or None (default: None)\n Indicates which dataset attributes from datasets are stored\n in merged_dataset. If an int k, then the dataset attributes from\n datasets[k] are taken. If 'unique' then it is assumed that any\n attribute common to more than one dataset in datasets is unique;\n if not an exception is raised. If 'drop_nonunique' then as 'unique',\n except that exceptions are not raised. If 'uniques' then, for each\n attribute, any unique value across the datasets is stored in a tuple\n in merged_datasets. If 'all' then each attribute present in any\n dataset across datasets is stored as a tuple in merged_datasets;\n missing values are replaced by None. If None (the default) then no\n attributes are stored in merged_dataset. True is equivalent to\n 'drop_nonunique'. False is equivalent to None.\n "
CompoundNode.__init__(self, nodes=nodes, **kwargs)
self._combine_axis = combine_axis
self._a = a | Parameters
----------
mappers : list
combine_axis : ['h', 'v']
a: {'unique','drop_nonunique','uniques','all'} or True or False or None (default: None)
Indicates which dataset attributes from datasets are stored
in merged_dataset. If an int k, then the dataset attributes from
datasets[k] are taken. If 'unique' then it is assumed that any
attribute common to more than one dataset in datasets is unique;
if not an exception is raised. If 'drop_nonunique' then as 'unique',
except that exceptions are not raised. If 'uniques' then, for each
attribute, any unique value across the datasets is stored in a tuple
in merged_datasets. If 'all' then each attribute present in any
dataset across datasets is stored as a tuple in merged_datasets;
missing values are replaced by None. If None (the default) then no
attributes are stored in merged_dataset. True is equivalent to
'drop_nonunique'. False is equivalent to None. | mvpa2/base/node.py | __init__ | mikiec84/PyMVPA | 227 | python | def __init__(self, nodes, combine_axis, a=None, **kwargs):
"\n Parameters\n ----------\n mappers : list\n combine_axis : ['h', 'v']\n a: {'unique','drop_nonunique','uniques','all'} or True or False or None (default: None)\n Indicates which dataset attributes from datasets are stored\n in merged_dataset. If an int k, then the dataset attributes from\n datasets[k] are taken. If 'unique' then it is assumed that any\n attribute common to more than one dataset in datasets is unique;\n if not an exception is raised. If 'drop_nonunique' then as 'unique',\n except that exceptions are not raised. If 'uniques' then, for each\n attribute, any unique value across the datasets is stored in a tuple\n in merged_datasets. If 'all' then each attribute present in any\n dataset across datasets is stored as a tuple in merged_datasets;\n missing values are replaced by None. If None (the default) then no\n attributes are stored in merged_dataset. True is equivalent to\n 'drop_nonunique'. False is equivalent to None.\n "
CompoundNode.__init__(self, nodes=nodes, **kwargs)
self._combine_axis = combine_axis
self._a = a | def __init__(self, nodes, combine_axis, a=None, **kwargs):
"\n Parameters\n ----------\n mappers : list\n combine_axis : ['h', 'v']\n a: {'unique','drop_nonunique','uniques','all'} or True or False or None (default: None)\n Indicates which dataset attributes from datasets are stored\n in merged_dataset. If an int k, then the dataset attributes from\n datasets[k] are taken. If 'unique' then it is assumed that any\n attribute common to more than one dataset in datasets is unique;\n if not an exception is raised. If 'drop_nonunique' then as 'unique',\n except that exceptions are not raised. If 'uniques' then, for each\n attribute, any unique value across the datasets is stored in a tuple\n in merged_datasets. If 'all' then each attribute present in any\n dataset across datasets is stored as a tuple in merged_datasets;\n missing values are replaced by None. If None (the default) then no\n attributes are stored in merged_dataset. True is equivalent to\n 'drop_nonunique'. False is equivalent to None.\n "
CompoundNode.__init__(self, nodes=nodes, **kwargs)
self._combine_axis = combine_axis
self._a = a<|docstring|>Parameters
----------
mappers : list
combine_axis : ['h', 'v']
a: {'unique','drop_nonunique','uniques','all'} or True or False or None (default: None)
Indicates which dataset attributes from datasets are stored
in merged_dataset. If an int k, then the dataset attributes from
datasets[k] are taken. If 'unique' then it is assumed that any
attribute common to more than one dataset in datasets is unique;
if not an exception is raised. If 'drop_nonunique' then as 'unique',
except that exceptions are not raised. If 'uniques' then, for each
attribute, any unique value across the datasets is stored in a tuple
in merged_datasets. If 'all' then each attribute present in any
dataset across datasets is stored as a tuple in merged_datasets;
missing values are replaced by None. If None (the default) then no
attributes are stored in merged_dataset. True is equivalent to
'drop_nonunique'. False is equivalent to None.<|endoftext|> |
374281d3427df6761c0d47d63527950e682b8d31325f196eb8ddddb6f5970875 | def _dc_dt(c, t, x, derivs_0, derivs_L, diff_coeff_fun, diff_coeff_params, rxn_fun, rxn_params, n_species, h):
'\n Time derivative of concentrations in an R-D system\n for constant flux BCs.\n\n Parameters\n ----------\n c : ndarray, shape (n_species * n_gridpoints)\n The concentration of the chemical species interleaved in a\n a NumPy array. The interleaving allows us to take advantage\n of the banded structure of the Jacobian when using the\n Hindmarsh algorithm for integrating in time.\n t : float\n Time.\n derivs_0 : ndarray, shape (n_species)\n derivs_0[i] is the value of the diffusive flux,\n D dc_i/dx, at x = 0, the leftmost boundary of the domain of x.\n derivs_L : ndarray, shape (n_species)\n derivs_0[i] is the value of the diffusive flux,\n D dc_i/dx, at x = L, the rightmost boundary of the domain of x.\n diff_coeff_fun : function\n Function of the form diff_coeff_fun(c_tuple, t, x, *diff_coeff_params).\n Returns an tuple where entry i is a NumPy array containing\n the diffusion coefficient of species i at the grid points.\n c_tuple[i] is a NumPy array containing the concentrations of\n species i at the grid poitns.\n diff_coeff_params : arbitrary\n Tuple of parameters to be passed into diff_coeff_fun.\n rxn_fun : function\n Function of the form rxn_fun(c_tuple, t, *rxn_params).\n Returns an tuple where entry i is a NumPy array containing\n the net rate of production of species i by chemical reaction\n at the grid points. c_tuple[i] is a NumPy array containing\n the concentrations of species i at the grid poitns.\n rxn_params : arbitrary\n Tuple of parameters to be passed into rxn_fun.\n n_species : int\n Number of chemical species.\n h : float\n Grid spacing (assumed to be constant)\n\n Returns\n -------\n dc_dt : ndarray, shape (n_species * n_gridpoints)\n The time derivatives of the concentrations of the chemical\n species at the grid points interleaved in a NumPy array.\n '
c_tuple = tuple([c[i::n_species] for i in range(n_species)])
D_tuple = diff_coeff_fun(c_tuple, t, x, *diff_coeff_params)
rxn_tuple = rxn_fun(c_tuple, t, *rxn_params)
conc_deriv = np.empty_like(c)
da_dt = np.empty(len(c_tuple[0]))
h2 = (h ** 2)
for i in range(n_species):
a = np.copy(c_tuple[i])
D = np.copy(D_tuple[i])
da_dt[0] = (((D[0] / h2) * 2) * ((a[1] - a[0]) - (h * derivs_0[i])))
dD_dx = ((D[2:] - D[:(- 2)]) / (2 * h))
da_dx = ((a[2:] - a[:(- 2)]) / (2 * h))
da_dt[1:(- 1)] = (((D[1:(- 1)] * np.diff(a, 2)) / h2) + (dD_dx * da_dx))
da_dt[(- 1)] = (((D[(- 1)] / h2) * 2) * ((a[(- 2)] - a[(- 1)]) + (h * derivs_L[i])))
conc_deriv[i::n_species] = (da_dt + rxn_tuple[i])
return conc_deriv | Time derivative of concentrations in an R-D system
for constant flux BCs.
Parameters
----------
c : ndarray, shape (n_species * n_gridpoints)
The concentration of the chemical species interleaved in a
a NumPy array. The interleaving allows us to take advantage
of the banded structure of the Jacobian when using the
Hindmarsh algorithm for integrating in time.
t : float
Time.
derivs_0 : ndarray, shape (n_species)
derivs_0[i] is the value of the diffusive flux,
D dc_i/dx, at x = 0, the leftmost boundary of the domain of x.
derivs_L : ndarray, shape (n_species)
derivs_0[i] is the value of the diffusive flux,
D dc_i/dx, at x = L, the rightmost boundary of the domain of x.
diff_coeff_fun : function
Function of the form diff_coeff_fun(c_tuple, t, x, *diff_coeff_params).
Returns an tuple where entry i is a NumPy array containing
the diffusion coefficient of species i at the grid points.
c_tuple[i] is a NumPy array containing the concentrations of
species i at the grid poitns.
diff_coeff_params : arbitrary
Tuple of parameters to be passed into diff_coeff_fun.
rxn_fun : function
Function of the form rxn_fun(c_tuple, t, *rxn_params).
Returns an tuple where entry i is a NumPy array containing
the net rate of production of species i by chemical reaction
at the grid points. c_tuple[i] is a NumPy array containing
the concentrations of species i at the grid poitns.
rxn_params : arbitrary
Tuple of parameters to be passed into rxn_fun.
n_species : int
Number of chemical species.
h : float
Grid spacing (assumed to be constant)
Returns
-------
dc_dt : ndarray, shape (n_species * n_gridpoints)
The time derivatives of the concentrations of the chemical
species at the grid points interleaved in a NumPy array. | biocircuits/rd.py | _dc_dt | justinbois/biocircuits | 3 | python | def _dc_dt(c, t, x, derivs_0, derivs_L, diff_coeff_fun, diff_coeff_params, rxn_fun, rxn_params, n_species, h):
'\n Time derivative of concentrations in an R-D system\n for constant flux BCs.\n\n Parameters\n ----------\n c : ndarray, shape (n_species * n_gridpoints)\n The concentration of the chemical species interleaved in a\n a NumPy array. The interleaving allows us to take advantage\n of the banded structure of the Jacobian when using the\n Hindmarsh algorithm for integrating in time.\n t : float\n Time.\n derivs_0 : ndarray, shape (n_species)\n derivs_0[i] is the value of the diffusive flux,\n D dc_i/dx, at x = 0, the leftmost boundary of the domain of x.\n derivs_L : ndarray, shape (n_species)\n derivs_0[i] is the value of the diffusive flux,\n D dc_i/dx, at x = L, the rightmost boundary of the domain of x.\n diff_coeff_fun : function\n Function of the form diff_coeff_fun(c_tuple, t, x, *diff_coeff_params).\n Returns an tuple where entry i is a NumPy array containing\n the diffusion coefficient of species i at the grid points.\n c_tuple[i] is a NumPy array containing the concentrations of\n species i at the grid poitns.\n diff_coeff_params : arbitrary\n Tuple of parameters to be passed into diff_coeff_fun.\n rxn_fun : function\n Function of the form rxn_fun(c_tuple, t, *rxn_params).\n Returns an tuple where entry i is a NumPy array containing\n the net rate of production of species i by chemical reaction\n at the grid points. c_tuple[i] is a NumPy array containing\n the concentrations of species i at the grid poitns.\n rxn_params : arbitrary\n Tuple of parameters to be passed into rxn_fun.\n n_species : int\n Number of chemical species.\n h : float\n Grid spacing (assumed to be constant)\n\n Returns\n -------\n dc_dt : ndarray, shape (n_species * n_gridpoints)\n The time derivatives of the concentrations of the chemical\n species at the grid points interleaved in a NumPy array.\n '
c_tuple = tuple([c[i::n_species] for i in range(n_species)])
D_tuple = diff_coeff_fun(c_tuple, t, x, *diff_coeff_params)
rxn_tuple = rxn_fun(c_tuple, t, *rxn_params)
conc_deriv = np.empty_like(c)
da_dt = np.empty(len(c_tuple[0]))
h2 = (h ** 2)
for i in range(n_species):
a = np.copy(c_tuple[i])
D = np.copy(D_tuple[i])
da_dt[0] = (((D[0] / h2) * 2) * ((a[1] - a[0]) - (h * derivs_0[i])))
dD_dx = ((D[2:] - D[:(- 2)]) / (2 * h))
da_dx = ((a[2:] - a[:(- 2)]) / (2 * h))
da_dt[1:(- 1)] = (((D[1:(- 1)] * np.diff(a, 2)) / h2) + (dD_dx * da_dx))
da_dt[(- 1)] = (((D[(- 1)] / h2) * 2) * ((a[(- 2)] - a[(- 1)]) + (h * derivs_L[i])))
conc_deriv[i::n_species] = (da_dt + rxn_tuple[i])
return conc_deriv | def _dc_dt(c, t, x, derivs_0, derivs_L, diff_coeff_fun, diff_coeff_params, rxn_fun, rxn_params, n_species, h):
'\n Time derivative of concentrations in an R-D system\n for constant flux BCs.\n\n Parameters\n ----------\n c : ndarray, shape (n_species * n_gridpoints)\n The concentration of the chemical species interleaved in a\n a NumPy array. The interleaving allows us to take advantage\n of the banded structure of the Jacobian when using the\n Hindmarsh algorithm for integrating in time.\n t : float\n Time.\n derivs_0 : ndarray, shape (n_species)\n derivs_0[i] is the value of the diffusive flux,\n D dc_i/dx, at x = 0, the leftmost boundary of the domain of x.\n derivs_L : ndarray, shape (n_species)\n derivs_0[i] is the value of the diffusive flux,\n D dc_i/dx, at x = L, the rightmost boundary of the domain of x.\n diff_coeff_fun : function\n Function of the form diff_coeff_fun(c_tuple, t, x, *diff_coeff_params).\n Returns an tuple where entry i is a NumPy array containing\n the diffusion coefficient of species i at the grid points.\n c_tuple[i] is a NumPy array containing the concentrations of\n species i at the grid poitns.\n diff_coeff_params : arbitrary\n Tuple of parameters to be passed into diff_coeff_fun.\n rxn_fun : function\n Function of the form rxn_fun(c_tuple, t, *rxn_params).\n Returns an tuple where entry i is a NumPy array containing\n the net rate of production of species i by chemical reaction\n at the grid points. c_tuple[i] is a NumPy array containing\n the concentrations of species i at the grid poitns.\n rxn_params : arbitrary\n Tuple of parameters to be passed into rxn_fun.\n n_species : int\n Number of chemical species.\n h : float\n Grid spacing (assumed to be constant)\n\n Returns\n -------\n dc_dt : ndarray, shape (n_species * n_gridpoints)\n The time derivatives of the concentrations of the chemical\n species at the grid points interleaved in a NumPy array.\n '
c_tuple = tuple([c[i::n_species] for i in range(n_species)])
D_tuple = diff_coeff_fun(c_tuple, t, x, *diff_coeff_params)
rxn_tuple = rxn_fun(c_tuple, t, *rxn_params)
conc_deriv = np.empty_like(c)
da_dt = np.empty(len(c_tuple[0]))
h2 = (h ** 2)
for i in range(n_species):
a = np.copy(c_tuple[i])
D = np.copy(D_tuple[i])
da_dt[0] = (((D[0] / h2) * 2) * ((a[1] - a[0]) - (h * derivs_0[i])))
dD_dx = ((D[2:] - D[:(- 2)]) / (2 * h))
da_dx = ((a[2:] - a[:(- 2)]) / (2 * h))
da_dt[1:(- 1)] = (((D[1:(- 1)] * np.diff(a, 2)) / h2) + (dD_dx * da_dx))
da_dt[(- 1)] = (((D[(- 1)] / h2) * 2) * ((a[(- 2)] - a[(- 1)]) + (h * derivs_L[i])))
conc_deriv[i::n_species] = (da_dt + rxn_tuple[i])
return conc_deriv<|docstring|>Time derivative of concentrations in an R-D system
for constant flux BCs.
Parameters
----------
c : ndarray, shape (n_species * n_gridpoints)
The concentration of the chemical species interleaved in a
a NumPy array. The interleaving allows us to take advantage
of the banded structure of the Jacobian when using the
Hindmarsh algorithm for integrating in time.
t : float
Time.
derivs_0 : ndarray, shape (n_species)
derivs_0[i] is the value of the diffusive flux,
D dc_i/dx, at x = 0, the leftmost boundary of the domain of x.
derivs_L : ndarray, shape (n_species)
derivs_0[i] is the value of the diffusive flux,
D dc_i/dx, at x = L, the rightmost boundary of the domain of x.
diff_coeff_fun : function
Function of the form diff_coeff_fun(c_tuple, t, x, *diff_coeff_params).
Returns an tuple where entry i is a NumPy array containing
the diffusion coefficient of species i at the grid points.
c_tuple[i] is a NumPy array containing the concentrations of
species i at the grid poitns.
diff_coeff_params : arbitrary
Tuple of parameters to be passed into diff_coeff_fun.
rxn_fun : function
Function of the form rxn_fun(c_tuple, t, *rxn_params).
Returns an tuple where entry i is a NumPy array containing
the net rate of production of species i by chemical reaction
at the grid points. c_tuple[i] is a NumPy array containing
the concentrations of species i at the grid poitns.
rxn_params : arbitrary
Tuple of parameters to be passed into rxn_fun.
n_species : int
Number of chemical species.
h : float
Grid spacing (assumed to be constant)
Returns
-------
dc_dt : ndarray, shape (n_species * n_gridpoints)
The time derivatives of the concentrations of the chemical
species at the grid points interleaved in a NumPy array.<|endoftext|> |
440ad3c3831a45729696642be68e5f78038b1979219410d982a08e4e7034bf7a | def rd_solve(c_0_tuple, t, L=1, derivs_0=0, derivs_L=0, diff_coeff_fun=None, diff_coeff_params=(), rxn_fun=None, rxn_params=(), rtol=1.49012e-08, atol=1.49012e-08):
"Solve a system of reaction-diffusion equations in space and time.\n\n Parameters\n ----------\n c_0_tuple : tuple\n c_0_tuple[i] is a NumPy array of length n_gridpoints with the\n initial concentrations of chemical species i at the grid points.\n t : ndarray\n An array of time points for which the solution is desired.\n L : float\n Total length of the x-domain.\n derivs_0 : ndarray, shape (n_species)\n derivs_0[i] is the value of dc_i/dx at x = 0.\n derivs_L : ndarray, shape (n_species)\n derivs_L[i] is the value of dc_i/dx at x = L, the rightmost\n boundary of the domain of x.\n diff_coeff_fun : function\n Function of the form\n diff_coeff_fun(c_tuple, t, x, *diff_coeff_params).\n Returns an tuple where entry i is a NumPy array containing\n the diffusion coefficient of species i at the grid points.\n c_tuple[i] is a NumPy array containing the concentrations of\n species i at the grid points.\n diff_coeff_params : arbitrary\n Tuple of parameters to be passed into diff_coeff_fun.\n rxn_fun : function\n Function of the form rxn_fun(c_tuple, t, *rxn_params).\n Returns an tuple where entry i is a NumPy array containing\n the net rate of production of species i by chemical reaction\n at the grid points. c_tuple[i] is a NumPy array containing\n the concentrations of species i at the grid poitns.\n rxn_params : arbitrary\n Tuple of parameters to be passed into rxn_fun.\n rtol : float\n Relative tolerance for solver. Default os odeint's default.\n atol : float\n Absolute tolerance for solver. Default os odeint's default.\n\n Returns\n -------\n c_tuple : tuple\n c_tuple[i] is a NumPy array of shape (len(t), n_gridpoints)\n with the initial concentrations of chemical species i at\n the grid points over time.\n\n Notes\n -----\n .. When intergrating for long times near a steady state, you\n may need to lower the absolute tolerance (atol) because the\n solution does not change much over time and it may be difficult\n for the solver to maintain tight tolerances.\n "
n_gridpoints = len(c_0_tuple[0])
n_species = len(c_0_tuple)
h = (L / (n_gridpoints - 1))
x = np.linspace(0, L, n_gridpoints)
if np.isscalar(derivs_0):
derivs_0 = np.array((n_species * [derivs_0]))
if np.isscalar(derivs_L):
derivs_L = np.array((n_species * [derivs_L]))
params = (x, derivs_0, derivs_L, diff_coeff_fun, diff_coeff_params, rxn_fun, rxn_params, n_species, h)
c0 = np.empty((n_species * n_gridpoints))
for i in range(n_species):
c0[i::n_species] = c_0_tuple[i]
c = scipy.integrate.odeint(_dc_dt, c0, t, args=params, ml=n_species, mu=n_species, rtol=rtol, atol=atol)
return tuple([c[(:, i::n_species)] for i in range(n_species)]) | Solve a system of reaction-diffusion equations in space and time.
Parameters
----------
c_0_tuple : tuple
c_0_tuple[i] is a NumPy array of length n_gridpoints with the
initial concentrations of chemical species i at the grid points.
t : ndarray
An array of time points for which the solution is desired.
L : float
Total length of the x-domain.
derivs_0 : ndarray, shape (n_species)
derivs_0[i] is the value of dc_i/dx at x = 0.
derivs_L : ndarray, shape (n_species)
derivs_L[i] is the value of dc_i/dx at x = L, the rightmost
boundary of the domain of x.
diff_coeff_fun : function
Function of the form
diff_coeff_fun(c_tuple, t, x, *diff_coeff_params).
Returns an tuple where entry i is a NumPy array containing
the diffusion coefficient of species i at the grid points.
c_tuple[i] is a NumPy array containing the concentrations of
species i at the grid points.
diff_coeff_params : arbitrary
Tuple of parameters to be passed into diff_coeff_fun.
rxn_fun : function
Function of the form rxn_fun(c_tuple, t, *rxn_params).
Returns an tuple where entry i is a NumPy array containing
the net rate of production of species i by chemical reaction
at the grid points. c_tuple[i] is a NumPy array containing
the concentrations of species i at the grid poitns.
rxn_params : arbitrary
Tuple of parameters to be passed into rxn_fun.
rtol : float
Relative tolerance for solver. Default os odeint's default.
atol : float
Absolute tolerance for solver. Default os odeint's default.
Returns
-------
c_tuple : tuple
c_tuple[i] is a NumPy array of shape (len(t), n_gridpoints)
with the initial concentrations of chemical species i at
the grid points over time.
Notes
-----
.. When intergrating for long times near a steady state, you
may need to lower the absolute tolerance (atol) because the
solution does not change much over time and it may be difficult
for the solver to maintain tight tolerances. | biocircuits/rd.py | rd_solve | justinbois/biocircuits | 3 | python | def rd_solve(c_0_tuple, t, L=1, derivs_0=0, derivs_L=0, diff_coeff_fun=None, diff_coeff_params=(), rxn_fun=None, rxn_params=(), rtol=1.49012e-08, atol=1.49012e-08):
"Solve a system of reaction-diffusion equations in space and time.\n\n Parameters\n ----------\n c_0_tuple : tuple\n c_0_tuple[i] is a NumPy array of length n_gridpoints with the\n initial concentrations of chemical species i at the grid points.\n t : ndarray\n An array of time points for which the solution is desired.\n L : float\n Total length of the x-domain.\n derivs_0 : ndarray, shape (n_species)\n derivs_0[i] is the value of dc_i/dx at x = 0.\n derivs_L : ndarray, shape (n_species)\n derivs_L[i] is the value of dc_i/dx at x = L, the rightmost\n boundary of the domain of x.\n diff_coeff_fun : function\n Function of the form\n diff_coeff_fun(c_tuple, t, x, *diff_coeff_params).\n Returns an tuple where entry i is a NumPy array containing\n the diffusion coefficient of species i at the grid points.\n c_tuple[i] is a NumPy array containing the concentrations of\n species i at the grid points.\n diff_coeff_params : arbitrary\n Tuple of parameters to be passed into diff_coeff_fun.\n rxn_fun : function\n Function of the form rxn_fun(c_tuple, t, *rxn_params).\n Returns an tuple where entry i is a NumPy array containing\n the net rate of production of species i by chemical reaction\n at the grid points. c_tuple[i] is a NumPy array containing\n the concentrations of species i at the grid poitns.\n rxn_params : arbitrary\n Tuple of parameters to be passed into rxn_fun.\n rtol : float\n Relative tolerance for solver. Default os odeint's default.\n atol : float\n Absolute tolerance for solver. Default os odeint's default.\n\n Returns\n -------\n c_tuple : tuple\n c_tuple[i] is a NumPy array of shape (len(t), n_gridpoints)\n with the initial concentrations of chemical species i at\n the grid points over time.\n\n Notes\n -----\n .. When intergrating for long times near a steady state, you\n may need to lower the absolute tolerance (atol) because the\n solution does not change much over time and it may be difficult\n for the solver to maintain tight tolerances.\n "
n_gridpoints = len(c_0_tuple[0])
n_species = len(c_0_tuple)
h = (L / (n_gridpoints - 1))
x = np.linspace(0, L, n_gridpoints)
if np.isscalar(derivs_0):
derivs_0 = np.array((n_species * [derivs_0]))
if np.isscalar(derivs_L):
derivs_L = np.array((n_species * [derivs_L]))
params = (x, derivs_0, derivs_L, diff_coeff_fun, diff_coeff_params, rxn_fun, rxn_params, n_species, h)
c0 = np.empty((n_species * n_gridpoints))
for i in range(n_species):
c0[i::n_species] = c_0_tuple[i]
c = scipy.integrate.odeint(_dc_dt, c0, t, args=params, ml=n_species, mu=n_species, rtol=rtol, atol=atol)
return tuple([c[(:, i::n_species)] for i in range(n_species)]) | def rd_solve(c_0_tuple, t, L=1, derivs_0=0, derivs_L=0, diff_coeff_fun=None, diff_coeff_params=(), rxn_fun=None, rxn_params=(), rtol=1.49012e-08, atol=1.49012e-08):
"Solve a system of reaction-diffusion equations in space and time.\n\n Parameters\n ----------\n c_0_tuple : tuple\n c_0_tuple[i] is a NumPy array of length n_gridpoints with the\n initial concentrations of chemical species i at the grid points.\n t : ndarray\n An array of time points for which the solution is desired.\n L : float\n Total length of the x-domain.\n derivs_0 : ndarray, shape (n_species)\n derivs_0[i] is the value of dc_i/dx at x = 0.\n derivs_L : ndarray, shape (n_species)\n derivs_L[i] is the value of dc_i/dx at x = L, the rightmost\n boundary of the domain of x.\n diff_coeff_fun : function\n Function of the form\n diff_coeff_fun(c_tuple, t, x, *diff_coeff_params).\n Returns an tuple where entry i is a NumPy array containing\n the diffusion coefficient of species i at the grid points.\n c_tuple[i] is a NumPy array containing the concentrations of\n species i at the grid points.\n diff_coeff_params : arbitrary\n Tuple of parameters to be passed into diff_coeff_fun.\n rxn_fun : function\n Function of the form rxn_fun(c_tuple, t, *rxn_params).\n Returns an tuple where entry i is a NumPy array containing\n the net rate of production of species i by chemical reaction\n at the grid points. c_tuple[i] is a NumPy array containing\n the concentrations of species i at the grid poitns.\n rxn_params : arbitrary\n Tuple of parameters to be passed into rxn_fun.\n rtol : float\n Relative tolerance for solver. Default os odeint's default.\n atol : float\n Absolute tolerance for solver. Default os odeint's default.\n\n Returns\n -------\n c_tuple : tuple\n c_tuple[i] is a NumPy array of shape (len(t), n_gridpoints)\n with the initial concentrations of chemical species i at\n the grid points over time.\n\n Notes\n -----\n .. When intergrating for long times near a steady state, you\n may need to lower the absolute tolerance (atol) because the\n solution does not change much over time and it may be difficult\n for the solver to maintain tight tolerances.\n "
n_gridpoints = len(c_0_tuple[0])
n_species = len(c_0_tuple)
h = (L / (n_gridpoints - 1))
x = np.linspace(0, L, n_gridpoints)
if np.isscalar(derivs_0):
derivs_0 = np.array((n_species * [derivs_0]))
if np.isscalar(derivs_L):
derivs_L = np.array((n_species * [derivs_L]))
params = (x, derivs_0, derivs_L, diff_coeff_fun, diff_coeff_params, rxn_fun, rxn_params, n_species, h)
c0 = np.empty((n_species * n_gridpoints))
for i in range(n_species):
c0[i::n_species] = c_0_tuple[i]
c = scipy.integrate.odeint(_dc_dt, c0, t, args=params, ml=n_species, mu=n_species, rtol=rtol, atol=atol)
return tuple([c[(:, i::n_species)] for i in range(n_species)])<|docstring|>Solve a system of reaction-diffusion equations in space and time.
Parameters
----------
c_0_tuple : tuple
c_0_tuple[i] is a NumPy array of length n_gridpoints with the
initial concentrations of chemical species i at the grid points.
t : ndarray
An array of time points for which the solution is desired.
L : float
Total length of the x-domain.
derivs_0 : ndarray, shape (n_species)
derivs_0[i] is the value of dc_i/dx at x = 0.
derivs_L : ndarray, shape (n_species)
derivs_L[i] is the value of dc_i/dx at x = L, the rightmost
boundary of the domain of x.
diff_coeff_fun : function
Function of the form
diff_coeff_fun(c_tuple, t, x, *diff_coeff_params).
Returns an tuple where entry i is a NumPy array containing
the diffusion coefficient of species i at the grid points.
c_tuple[i] is a NumPy array containing the concentrations of
species i at the grid points.
diff_coeff_params : arbitrary
Tuple of parameters to be passed into diff_coeff_fun.
rxn_fun : function
Function of the form rxn_fun(c_tuple, t, *rxn_params).
Returns an tuple where entry i is a NumPy array containing
the net rate of production of species i by chemical reaction
at the grid points. c_tuple[i] is a NumPy array containing
the concentrations of species i at the grid poitns.
rxn_params : arbitrary
Tuple of parameters to be passed into rxn_fun.
rtol : float
Relative tolerance for solver. Default os odeint's default.
atol : float
Absolute tolerance for solver. Default os odeint's default.
Returns
-------
c_tuple : tuple
c_tuple[i] is a NumPy array of shape (len(t), n_gridpoints)
with the initial concentrations of chemical species i at
the grid points over time.
Notes
-----
.. When intergrating for long times near a steady state, you
may need to lower the absolute tolerance (atol) because the
solution does not change much over time and it may be difficult
for the solver to maintain tight tolerances.<|endoftext|> |
b98c0221feee2c61ca0bee199b78b465b43f8334c61cc3d9e5b4d4d8ff54bc99 | def constant_diff_coeffs(c_tuple, t, x, diff_coeffs):
'Function for use with `rd_solve()` for constant diffusion\n coefficients.\n\n Parameters\n ----------\n c_tuple : tuple\n c_tuple[i] is a NumPy array containing the concentrations of\n species i at the grid points.\n t : dummy argument\n Dummy argument for the time.\n x : dummy argument\n Dummy argument for position.\n diff_coeffs : tuple\n diff_coeffs[i] is the diffusion coefficient of species i.\n\n Returns\n -------\n output : tuple or NumPy arrays\n A tuple containing the (constant) diffusion coeffient for all\n species at all positions.\n\n Notes\n -----\n .. The `t` and `x` arguments are dummy arguments so that the call\n signature matches what is required by `rd_solve()`.\n\n '
n = len(c_tuple[0])
return tuple([(diff_coeffs[i] * np.ones(n)) for i in range(len(c_tuple))]) | Function for use with `rd_solve()` for constant diffusion
coefficients.
Parameters
----------
c_tuple : tuple
c_tuple[i] is a NumPy array containing the concentrations of
species i at the grid points.
t : dummy argument
Dummy argument for the time.
x : dummy argument
Dummy argument for position.
diff_coeffs : tuple
diff_coeffs[i] is the diffusion coefficient of species i.
Returns
-------
output : tuple or NumPy arrays
A tuple containing the (constant) diffusion coeffient for all
species at all positions.
Notes
-----
.. The `t` and `x` arguments are dummy arguments so that the call
signature matches what is required by `rd_solve()`. | biocircuits/rd.py | constant_diff_coeffs | justinbois/biocircuits | 3 | python | def constant_diff_coeffs(c_tuple, t, x, diff_coeffs):
'Function for use with `rd_solve()` for constant diffusion\n coefficients.\n\n Parameters\n ----------\n c_tuple : tuple\n c_tuple[i] is a NumPy array containing the concentrations of\n species i at the grid points.\n t : dummy argument\n Dummy argument for the time.\n x : dummy argument\n Dummy argument for position.\n diff_coeffs : tuple\n diff_coeffs[i] is the diffusion coefficient of species i.\n\n Returns\n -------\n output : tuple or NumPy arrays\n A tuple containing the (constant) diffusion coeffient for all\n species at all positions.\n\n Notes\n -----\n .. The `t` and `x` arguments are dummy arguments so that the call\n signature matches what is required by `rd_solve()`.\n\n '
n = len(c_tuple[0])
return tuple([(diff_coeffs[i] * np.ones(n)) for i in range(len(c_tuple))]) | def constant_diff_coeffs(c_tuple, t, x, diff_coeffs):
'Function for use with `rd_solve()` for constant diffusion\n coefficients.\n\n Parameters\n ----------\n c_tuple : tuple\n c_tuple[i] is a NumPy array containing the concentrations of\n species i at the grid points.\n t : dummy argument\n Dummy argument for the time.\n x : dummy argument\n Dummy argument for position.\n diff_coeffs : tuple\n diff_coeffs[i] is the diffusion coefficient of species i.\n\n Returns\n -------\n output : tuple or NumPy arrays\n A tuple containing the (constant) diffusion coeffient for all\n species at all positions.\n\n Notes\n -----\n .. The `t` and `x` arguments are dummy arguments so that the call\n signature matches what is required by `rd_solve()`.\n\n '
n = len(c_tuple[0])
return tuple([(diff_coeffs[i] * np.ones(n)) for i in range(len(c_tuple))])<|docstring|>Function for use with `rd_solve()` for constant diffusion
coefficients.
Parameters
----------
c_tuple : tuple
c_tuple[i] is a NumPy array containing the concentrations of
species i at the grid points.
t : dummy argument
Dummy argument for the time.
x : dummy argument
Dummy argument for position.
diff_coeffs : tuple
diff_coeffs[i] is the diffusion coefficient of species i.
Returns
-------
output : tuple or NumPy arrays
A tuple containing the (constant) diffusion coeffient for all
species at all positions.
Notes
-----
.. The `t` and `x` arguments are dummy arguments so that the call
signature matches what is required by `rd_solve()`.<|endoftext|> |
2cdddaddbc1b8b0420959d12aec109ff15fd07f496fcc7547808e3fd38e8eded | def load(self):
'Load crawlers from paths'
for crawler_path in self.running_config.paths:
crawler_path_abs = os.path.abspath(crawler_path)
if (not os.path.isdir(crawler_path_abs)):
self.crawlers[crawler_path_abs] = Crawlino(crawler_path_abs, self.running_config)
else:
for (root, dirs, files) in os.walk(crawler_path_abs):
for f in files:
if (any((f.startswith(x) for x in ('crawlino', 'crawler'))) and any((f.endswith(x) for x in Crawlino.VALID_CRAWLER_EXTENSIONS))):
self.crawlers[f] = Crawlino(f, self.running_config)
for c in self.crawlers.values():
self.update_global_config(c)
log.info(f'Loaded {len(self.crawlers)} crawlers') | Load crawlers from paths | crawlino/crawlino_managers/simple_manager.py | load | BBVA/crawlino | 1 | python | def load(self):
for crawler_path in self.running_config.paths:
crawler_path_abs = os.path.abspath(crawler_path)
if (not os.path.isdir(crawler_path_abs)):
self.crawlers[crawler_path_abs] = Crawlino(crawler_path_abs, self.running_config)
else:
for (root, dirs, files) in os.walk(crawler_path_abs):
for f in files:
if (any((f.startswith(x) for x in ('crawlino', 'crawler'))) and any((f.endswith(x) for x in Crawlino.VALID_CRAWLER_EXTENSIONS))):
self.crawlers[f] = Crawlino(f, self.running_config)
for c in self.crawlers.values():
self.update_global_config(c)
log.info(f'Loaded {len(self.crawlers)} crawlers') | def load(self):
for crawler_path in self.running_config.paths:
crawler_path_abs = os.path.abspath(crawler_path)
if (not os.path.isdir(crawler_path_abs)):
self.crawlers[crawler_path_abs] = Crawlino(crawler_path_abs, self.running_config)
else:
for (root, dirs, files) in os.walk(crawler_path_abs):
for f in files:
if (any((f.startswith(x) for x in ('crawlino', 'crawler'))) and any((f.endswith(x) for x in Crawlino.VALID_CRAWLER_EXTENSIONS))):
self.crawlers[f] = Crawlino(f, self.running_config)
for c in self.crawlers.values():
self.update_global_config(c)
log.info(f'Loaded {len(self.crawlers)} crawlers')<|docstring|>Load crawlers from paths<|endoftext|> |
a24edb14f68b51335ced1013f89669f365235ee82d1a0f6b7e3ec127975e98a8 | def stateEquations(x, u, theta, tstep):
'\n Beschreibung:\n DGL zur Berechnung der Zetableitungen der Zustandsgrößen zu einem Zeitpunkt tstep. \n Die Ausgabe ist ein Vektor.\n '
AL = theta[0]
AB = theta[1]
AF = theta[2]
C = theta[3]
t = 1000.0
k_ex = 0.4
k_in = 1.6
H_ex = (8.0 / t)
H_in = (32.0 / t)
gamma1 = 128.0
gamma2 = 128.0
gamma3 = 64.0
gamma4 = 64.0
gamma5 = 64.0
N = (np.size(x[(:, 0)]) / 12)
'\n x steht für ein Potential, v für den Strom, p kennzeichnet Pyramidalzellen, i inhibitorische Interneureonen, s spiny cells\n indizes ex bzw. in stehen für den exzitatorischen bzw. inhibitorischen Teil\n '
xp_ex = np.vsplit(x, (0, N))[1]
vp_ex = np.vsplit(x, (N, (2 * N)))[1]
xp_in = np.vsplit(x, ((2 * N), (3 * N)))[1]
vp_in = np.vsplit(x, ((3 * N), (4 * N)))[1]
xp_ges = np.vsplit(x, ((4 * N), (5 * N)))[1]
xs = np.vsplit(x, ((5 * N), (6 * N)))[1]
vs = np.vsplit(x, ((6 * N), (7 * N)))[1]
xi_ex = np.vsplit(x, ((7 * N), (8 * N)))[1]
vi_ex = np.vsplit(x, ((8 * N), (9 * N)))[1]
xi_in = np.vsplit(x, ((9 * N), (10 * N)))[1]
vi_in = np.vsplit(x, ((10 * N), (11 * N)))[1]
xi_ges = np.vsplit(x, ((11 * N), (12 * N)))[1]
xp_ex_dot = vp_ex[(:, tstep)]
vp_ex_dot = ((((k_ex * H_ex) * (np.dot((AB + AL), sig(xp_ges[(:, tstep)])) + (gamma2 * sig(xs[(:, tstep)])))) - ((2.0 * k_ex) * vp_ex[(:, tstep)])) - ((k_ex ** 2.0) * xp_ex[(:, tstep)]))
xp_in_dot = vp_in[(:, tstep)]
vp_in_dot = (((((k_in * H_in) * gamma4) * sig(xi_ges[(:, tstep)])) - ((2.0 * k_in) * vp_in[(:, tstep)])) - ((k_in ** 2.0) * xp_in[(:, tstep)]))
xp_ges_dot = (xp_ex[(:, tstep)] - xp_in[(:, tstep)])
xs_dot = vs[(:, tstep)]
vs_dot = ((((k_ex * H_ex) * (np.dot(((AF + AL) + (gamma1 * np.eye(3))), sig(xp_ges[(:, tstep)])) + np.dot(C, u[(:, tstep)]))) - ((2.0 * k_ex) * vs[(:, tstep)])) - ((k_ex ** 2.0) * xs[(:, tstep)]))
xi_ex_dot = vi_ex[(:, tstep)]
vi_ex_dot = ((((k_ex * H_ex) * np.dot(((AB + AL) + (gamma3 * np.eye(3))), sig(xp_ges[(:, tstep)]))) - ((2.0 * k_ex) * vi_ex[(:, tstep)])) - ((k_ex ** 2.0) * xi_ex[(:, tstep)]))
xi_in_dot = vi_in[(:, tstep)]
vi_in_dot = (((((k_in * H_in) * gamma5) * sig(xi_ges[(:, tstep)])) - ((2.0 * k_in) * vi_in[(:, tstep)])) - ((k_in ** 2.0) * xi_in[(:, tstep)]))
xi_ges_dot = (xi_ex[(:, tstep)] - xi_in[(:, tstep)])
x_dot = np.hstack([[xp_ex_dot], [vp_ex_dot], [xp_in_dot], [vp_in_dot], [xp_ges_dot], [xs_dot], [vs_dot], [xi_ex_dot], [vi_ex_dot], [xi_in_dot], [vi_in_dot], [xi_ges_dot]]).T
return x_dot | Beschreibung:
DGL zur Berechnung der Zetableitungen der Zustandsgrößen zu einem Zeitpunkt tstep.
Die Ausgabe ist ein Vektor. | DCM/programs/EEGModel.py | stateEquations | l-althueser/NiMoNa_DCM16 | 1 | python | def stateEquations(x, u, theta, tstep):
'\n Beschreibung:\n DGL zur Berechnung der Zetableitungen der Zustandsgrößen zu einem Zeitpunkt tstep. \n Die Ausgabe ist ein Vektor.\n '
AL = theta[0]
AB = theta[1]
AF = theta[2]
C = theta[3]
t = 1000.0
k_ex = 0.4
k_in = 1.6
H_ex = (8.0 / t)
H_in = (32.0 / t)
gamma1 = 128.0
gamma2 = 128.0
gamma3 = 64.0
gamma4 = 64.0
gamma5 = 64.0
N = (np.size(x[(:, 0)]) / 12)
'\n x steht für ein Potential, v für den Strom, p kennzeichnet Pyramidalzellen, i inhibitorische Interneureonen, s spiny cells\n indizes ex bzw. in stehen für den exzitatorischen bzw. inhibitorischen Teil\n '
xp_ex = np.vsplit(x, (0, N))[1]
vp_ex = np.vsplit(x, (N, (2 * N)))[1]
xp_in = np.vsplit(x, ((2 * N), (3 * N)))[1]
vp_in = np.vsplit(x, ((3 * N), (4 * N)))[1]
xp_ges = np.vsplit(x, ((4 * N), (5 * N)))[1]
xs = np.vsplit(x, ((5 * N), (6 * N)))[1]
vs = np.vsplit(x, ((6 * N), (7 * N)))[1]
xi_ex = np.vsplit(x, ((7 * N), (8 * N)))[1]
vi_ex = np.vsplit(x, ((8 * N), (9 * N)))[1]
xi_in = np.vsplit(x, ((9 * N), (10 * N)))[1]
vi_in = np.vsplit(x, ((10 * N), (11 * N)))[1]
xi_ges = np.vsplit(x, ((11 * N), (12 * N)))[1]
xp_ex_dot = vp_ex[(:, tstep)]
vp_ex_dot = ((((k_ex * H_ex) * (np.dot((AB + AL), sig(xp_ges[(:, tstep)])) + (gamma2 * sig(xs[(:, tstep)])))) - ((2.0 * k_ex) * vp_ex[(:, tstep)])) - ((k_ex ** 2.0) * xp_ex[(:, tstep)]))
xp_in_dot = vp_in[(:, tstep)]
vp_in_dot = (((((k_in * H_in) * gamma4) * sig(xi_ges[(:, tstep)])) - ((2.0 * k_in) * vp_in[(:, tstep)])) - ((k_in ** 2.0) * xp_in[(:, tstep)]))
xp_ges_dot = (xp_ex[(:, tstep)] - xp_in[(:, tstep)])
xs_dot = vs[(:, tstep)]
vs_dot = ((((k_ex * H_ex) * (np.dot(((AF + AL) + (gamma1 * np.eye(3))), sig(xp_ges[(:, tstep)])) + np.dot(C, u[(:, tstep)]))) - ((2.0 * k_ex) * vs[(:, tstep)])) - ((k_ex ** 2.0) * xs[(:, tstep)]))
xi_ex_dot = vi_ex[(:, tstep)]
vi_ex_dot = ((((k_ex * H_ex) * np.dot(((AB + AL) + (gamma3 * np.eye(3))), sig(xp_ges[(:, tstep)]))) - ((2.0 * k_ex) * vi_ex[(:, tstep)])) - ((k_ex ** 2.0) * xi_ex[(:, tstep)]))
xi_in_dot = vi_in[(:, tstep)]
vi_in_dot = (((((k_in * H_in) * gamma5) * sig(xi_ges[(:, tstep)])) - ((2.0 * k_in) * vi_in[(:, tstep)])) - ((k_in ** 2.0) * xi_in[(:, tstep)]))
xi_ges_dot = (xi_ex[(:, tstep)] - xi_in[(:, tstep)])
x_dot = np.hstack([[xp_ex_dot], [vp_ex_dot], [xp_in_dot], [vp_in_dot], [xp_ges_dot], [xs_dot], [vs_dot], [xi_ex_dot], [vi_ex_dot], [xi_in_dot], [vi_in_dot], [xi_ges_dot]]).T
return x_dot | def stateEquations(x, u, theta, tstep):
'\n Beschreibung:\n DGL zur Berechnung der Zetableitungen der Zustandsgrößen zu einem Zeitpunkt tstep. \n Die Ausgabe ist ein Vektor.\n '
AL = theta[0]
AB = theta[1]
AF = theta[2]
C = theta[3]
t = 1000.0
k_ex = 0.4
k_in = 1.6
H_ex = (8.0 / t)
H_in = (32.0 / t)
gamma1 = 128.0
gamma2 = 128.0
gamma3 = 64.0
gamma4 = 64.0
gamma5 = 64.0
N = (np.size(x[(:, 0)]) / 12)
'\n x steht für ein Potential, v für den Strom, p kennzeichnet Pyramidalzellen, i inhibitorische Interneureonen, s spiny cells\n indizes ex bzw. in stehen für den exzitatorischen bzw. inhibitorischen Teil\n '
xp_ex = np.vsplit(x, (0, N))[1]
vp_ex = np.vsplit(x, (N, (2 * N)))[1]
xp_in = np.vsplit(x, ((2 * N), (3 * N)))[1]
vp_in = np.vsplit(x, ((3 * N), (4 * N)))[1]
xp_ges = np.vsplit(x, ((4 * N), (5 * N)))[1]
xs = np.vsplit(x, ((5 * N), (6 * N)))[1]
vs = np.vsplit(x, ((6 * N), (7 * N)))[1]
xi_ex = np.vsplit(x, ((7 * N), (8 * N)))[1]
vi_ex = np.vsplit(x, ((8 * N), (9 * N)))[1]
xi_in = np.vsplit(x, ((9 * N), (10 * N)))[1]
vi_in = np.vsplit(x, ((10 * N), (11 * N)))[1]
xi_ges = np.vsplit(x, ((11 * N), (12 * N)))[1]
xp_ex_dot = vp_ex[(:, tstep)]
vp_ex_dot = ((((k_ex * H_ex) * (np.dot((AB + AL), sig(xp_ges[(:, tstep)])) + (gamma2 * sig(xs[(:, tstep)])))) - ((2.0 * k_ex) * vp_ex[(:, tstep)])) - ((k_ex ** 2.0) * xp_ex[(:, tstep)]))
xp_in_dot = vp_in[(:, tstep)]
vp_in_dot = (((((k_in * H_in) * gamma4) * sig(xi_ges[(:, tstep)])) - ((2.0 * k_in) * vp_in[(:, tstep)])) - ((k_in ** 2.0) * xp_in[(:, tstep)]))
xp_ges_dot = (xp_ex[(:, tstep)] - xp_in[(:, tstep)])
xs_dot = vs[(:, tstep)]
vs_dot = ((((k_ex * H_ex) * (np.dot(((AF + AL) + (gamma1 * np.eye(3))), sig(xp_ges[(:, tstep)])) + np.dot(C, u[(:, tstep)]))) - ((2.0 * k_ex) * vs[(:, tstep)])) - ((k_ex ** 2.0) * xs[(:, tstep)]))
xi_ex_dot = vi_ex[(:, tstep)]
vi_ex_dot = ((((k_ex * H_ex) * np.dot(((AB + AL) + (gamma3 * np.eye(3))), sig(xp_ges[(:, tstep)]))) - ((2.0 * k_ex) * vi_ex[(:, tstep)])) - ((k_ex ** 2.0) * xi_ex[(:, tstep)]))
xi_in_dot = vi_in[(:, tstep)]
vi_in_dot = (((((k_in * H_in) * gamma5) * sig(xi_ges[(:, tstep)])) - ((2.0 * k_in) * vi_in[(:, tstep)])) - ((k_in ** 2.0) * xi_in[(:, tstep)]))
xi_ges_dot = (xi_ex[(:, tstep)] - xi_in[(:, tstep)])
x_dot = np.hstack([[xp_ex_dot], [vp_ex_dot], [xp_in_dot], [vp_in_dot], [xp_ges_dot], [xs_dot], [vs_dot], [xi_ex_dot], [vi_ex_dot], [xi_in_dot], [vi_in_dot], [xi_ges_dot]]).T
return x_dot<|docstring|>Beschreibung:
DGL zur Berechnung der Zetableitungen der Zustandsgrößen zu einem Zeitpunkt tstep.
Die Ausgabe ist ein Vektor.<|endoftext|> |
586857ebdcb7a5c9962ece7e0d8b3468c144a48c015b0da7ee691fc32e7a3c83 | @property
def distributions(self):
'Dictionary that contains the distributions of :attr:`params`.'
return self._distributions | Dictionary that contains the distributions of :attr:`params`. | boexplain/optuna/optuna/structs.py | distributions | sfu-db/BOExplain | 8 | python | @property
def distributions(self):
return self._distributions | @property
def distributions(self):
return self._distributions<|docstring|>Dictionary that contains the distributions of :attr:`params`.<|endoftext|> |
285e345ac8053b3d5cc80a5e8b89a736c020a6a1f81767a48c73b6f3b5c72710 | @property
def duration(self):
'Return the elapsed time taken to complete the trial.\n\n Returns:\n The duration.\n '
if (self.datetime_start and self.datetime_complete):
return (self.datetime_complete - self.datetime_start)
else:
return None | Return the elapsed time taken to complete the trial.
Returns:
The duration. | boexplain/optuna/optuna/structs.py | duration | sfu-db/BOExplain | 8 | python | @property
def duration(self):
'Return the elapsed time taken to complete the trial.\n\n Returns:\n The duration.\n '
if (self.datetime_start and self.datetime_complete):
return (self.datetime_complete - self.datetime_start)
else:
return None | @property
def duration(self):
'Return the elapsed time taken to complete the trial.\n\n Returns:\n The duration.\n '
if (self.datetime_start and self.datetime_complete):
return (self.datetime_complete - self.datetime_start)
else:
return None<|docstring|>Return the elapsed time taken to complete the trial.
Returns:
The duration.<|endoftext|> |
4444533e4755e4fd5b8695678bb6f0d5a2bcc0929441dd900cabe7d9d3ca957b | def connect(self):
' Try to connect to MQTT broker\n '
print('Initializing telemetry via MQTT ...')
self.sta_if = network.WLAN(network.STA_IF)
if (not self.sta_if.isconnected()):
print('Error: Not connected to network')
else:
from NETWORK import my_mqtt_usr, my_mqtt_pwd, my_mqtt_srv
if (len(self._broker) == 0):
self._broker = my_mqtt_srv
self._client = MQTTClient(self._clientID, self._broker)
self._client.set_last_will(self._rootTopic, b'link/down')
try:
if (self._client.connect() == 0):
print('[{0:>12}] {1}'.format('topic', self._rootTopic))
self._client.publish(self._rootTopic, b'link/up')
self._isReady = True
except:
print('Error: MQTT brocker {} not responding'.format(self._broker))
print(('... done.' if self._isReady else '... FAILED')) | Try to connect to MQTT broker | code/robotling/remote/telemetry.py | connect | boeh-da/robotling | 0 | python | def connect(self):
' \n '
print('Initializing telemetry via MQTT ...')
self.sta_if = network.WLAN(network.STA_IF)
if (not self.sta_if.isconnected()):
print('Error: Not connected to network')
else:
from NETWORK import my_mqtt_usr, my_mqtt_pwd, my_mqtt_srv
if (len(self._broker) == 0):
self._broker = my_mqtt_srv
self._client = MQTTClient(self._clientID, self._broker)
self._client.set_last_will(self._rootTopic, b'link/down')
try:
if (self._client.connect() == 0):
print('[{0:>12}] {1}'.format('topic', self._rootTopic))
self._client.publish(self._rootTopic, b'link/up')
self._isReady = True
except:
print('Error: MQTT brocker {} not responding'.format(self._broker))
print(('... done.' if self._isReady else '... FAILED')) | def connect(self):
' \n '
print('Initializing telemetry via MQTT ...')
self.sta_if = network.WLAN(network.STA_IF)
if (not self.sta_if.isconnected()):
print('Error: Not connected to network')
else:
from NETWORK import my_mqtt_usr, my_mqtt_pwd, my_mqtt_srv
if (len(self._broker) == 0):
self._broker = my_mqtt_srv
self._client = MQTTClient(self._clientID, self._broker)
self._client.set_last_will(self._rootTopic, b'link/down')
try:
if (self._client.connect() == 0):
print('[{0:>12}] {1}'.format('topic', self._rootTopic))
self._client.publish(self._rootTopic, b'link/up')
self._isReady = True
except:
print('Error: MQTT brocker {} not responding'.format(self._broker))
print(('... done.' if self._isReady else '... FAILED'))<|docstring|>Try to connect to MQTT broker<|endoftext|> |
37845e98139c0a6181a79b27b8e54c279eba2a82fba5ac302cd3a1a099c2a0de | def publishDict(self, t, d):
' Publish a dictionary as a message under <standard topic>/<t>\n '
if self._isReady:
self._client.publish((self._rootTopic + t), ujson.dumps(d)) | Publish a dictionary as a message under <standard topic>/<t> | code/robotling/remote/telemetry.py | publishDict | boeh-da/robotling | 0 | python | def publishDict(self, t, d):
' \n '
if self._isReady:
self._client.publish((self._rootTopic + t), ujson.dumps(d)) | def publishDict(self, t, d):
' \n '
if self._isReady:
self._client.publish((self._rootTopic + t), ujson.dumps(d))<|docstring|>Publish a dictionary as a message under <standard topic>/<t><|endoftext|> |
1475bfb69660eb68f8ff6d3844afcd1525fe04371ca0857aec05c7a3cd5838c1 | def publish(self, t, m):
' Publish a message under <standard topic>/<t>\n '
if self._isReady:
try:
self._client.publish((self._rootTopic + t), m)
except OSError as error:
if (error.args[0] != errno.ECONNRESET):
print('Error: publish caused {0}'.format(error.args[0])) | Publish a message under <standard topic>/<t> | code/robotling/remote/telemetry.py | publish | boeh-da/robotling | 0 | python | def publish(self, t, m):
' \n '
if self._isReady:
try:
self._client.publish((self._rootTopic + t), m)
except OSError as error:
if (error.args[0] != errno.ECONNRESET):
print('Error: publish caused {0}'.format(error.args[0])) | def publish(self, t, m):
' \n '
if self._isReady:
try:
self._client.publish((self._rootTopic + t), m)
except OSError as error:
if (error.args[0] != errno.ECONNRESET):
print('Error: publish caused {0}'.format(error.args[0]))<|docstring|>Publish a message under <standard topic>/<t><|endoftext|> |
c4daedbaa991af1c8d4c1a6f89eaec102b50109ebbfb07d6c5b9258e56c42361 | def disconnect(self):
' Disconnect from MQTT broker\n '
if self._isReady:
self._client.disconnect()
self._isReady = False | Disconnect from MQTT broker | code/robotling/remote/telemetry.py | disconnect | boeh-da/robotling | 0 | python | def disconnect(self):
' \n '
if self._isReady:
self._client.disconnect()
self._isReady = False | def disconnect(self):
' \n '
if self._isReady:
self._client.disconnect()
self._isReady = False<|docstring|>Disconnect from MQTT broker<|endoftext|> |
c45be1604f7af87f6e571a5824001b4b6d104b834386e2272bb4b13691520f4c | def format_hmac(user, hash_data, hashcat):
'Take hash data and return a valid hashcat hash for the\n PBKDF2-HMAC-SHA512 hash mode (12100) if hashcat is set to True\n else it will return a valid john format hash for the \n JtR pbkdf2-hmac-sha512 hash format'
hcat_fmt_str = 'sha512:{}:{}:{}'
john_fmt_str = '{}:$pbkdf2-hmac-sha512${}.{}.{}'
(junk, morejunk, iterations, salt, digest) = hash_data.split('$')
if hashcat:
return hcat_fmt_str.format(iterations, salt, digest)
hex_digest = hexlify(b64decode(digest)).decode().upper()
hex_salt = hexlify(b64decode(salt)).decode().upper()
return john_fmt_str.format(user, iterations, hex_salt, hex_digest) | Take hash data and return a valid hashcat hash for the
PBKDF2-HMAC-SHA512 hash mode (12100) if hashcat is set to True
else it will return a valid john format hash for the
JtR pbkdf2-hmac-sha512 hash format | mosquitto2john.py | format_hmac | ghostking2802/John_conversion_scripts | 0 | python | def format_hmac(user, hash_data, hashcat):
'Take hash data and return a valid hashcat hash for the\n PBKDF2-HMAC-SHA512 hash mode (12100) if hashcat is set to True\n else it will return a valid john format hash for the \n JtR pbkdf2-hmac-sha512 hash format'
hcat_fmt_str = 'sha512:{}:{}:{}'
john_fmt_str = '{}:$pbkdf2-hmac-sha512${}.{}.{}'
(junk, morejunk, iterations, salt, digest) = hash_data.split('$')
if hashcat:
return hcat_fmt_str.format(iterations, salt, digest)
hex_digest = hexlify(b64decode(digest)).decode().upper()
hex_salt = hexlify(b64decode(salt)).decode().upper()
return john_fmt_str.format(user, iterations, hex_salt, hex_digest) | def format_hmac(user, hash_data, hashcat):
'Take hash data and return a valid hashcat hash for the\n PBKDF2-HMAC-SHA512 hash mode (12100) if hashcat is set to True\n else it will return a valid john format hash for the \n JtR pbkdf2-hmac-sha512 hash format'
hcat_fmt_str = 'sha512:{}:{}:{}'
john_fmt_str = '{}:$pbkdf2-hmac-sha512${}.{}.{}'
(junk, morejunk, iterations, salt, digest) = hash_data.split('$')
if hashcat:
return hcat_fmt_str.format(iterations, salt, digest)
hex_digest = hexlify(b64decode(digest)).decode().upper()
hex_salt = hexlify(b64decode(salt)).decode().upper()
return john_fmt_str.format(user, iterations, hex_salt, hex_digest)<|docstring|>Take hash data and return a valid hashcat hash for the
PBKDF2-HMAC-SHA512 hash mode (12100) if hashcat is set to True
else it will return a valid john format hash for the
JtR pbkdf2-hmac-sha512 hash format<|endoftext|> |
b3751235729bd9191e3e2c92f096a49e2f6425d5a33923ee3361802ddf83d952 | def format_sha512(user, hash_data, hashcat):
'Take hash data and return a valid hashcat hash for the\n salted SHA512 hash mode (1710) if hashcat is set True\n else it will take a hash and return a valid john hash for the\n dynamic_82 John mode - sha512($password.$salt) '
hcat_fmt_str = '{}:{}'
john_fmt_str = '{}:$dynamic_82${}$HEX${}'
(junk, morejunk, salt, digest) = hash_data.split('$')
hex_digest = hexlify(b64decode(digest)).decode().upper()
hex_salt = hexlify(b64decode(salt)).decode().upper()
if hashcat:
return hcat_fmt_str.format(hex_digest, hex_salt)
return john_fmt_str.format(user, hex_digest, hex_salt) | Take hash data and return a valid hashcat hash for the
salted SHA512 hash mode (1710) if hashcat is set True
else it will take a hash and return a valid john hash for the
dynamic_82 John mode - sha512($password.$salt) | mosquitto2john.py | format_sha512 | ghostking2802/John_conversion_scripts | 0 | python | def format_sha512(user, hash_data, hashcat):
'Take hash data and return a valid hashcat hash for the\n salted SHA512 hash mode (1710) if hashcat is set True\n else it will take a hash and return a valid john hash for the\n dynamic_82 John mode - sha512($password.$salt) '
hcat_fmt_str = '{}:{}'
john_fmt_str = '{}:$dynamic_82${}$HEX${}'
(junk, morejunk, salt, digest) = hash_data.split('$')
hex_digest = hexlify(b64decode(digest)).decode().upper()
hex_salt = hexlify(b64decode(salt)).decode().upper()
if hashcat:
return hcat_fmt_str.format(hex_digest, hex_salt)
return john_fmt_str.format(user, hex_digest, hex_salt) | def format_sha512(user, hash_data, hashcat):
'Take hash data and return a valid hashcat hash for the\n salted SHA512 hash mode (1710) if hashcat is set True\n else it will take a hash and return a valid john hash for the\n dynamic_82 John mode - sha512($password.$salt) '
hcat_fmt_str = '{}:{}'
john_fmt_str = '{}:$dynamic_82${}$HEX${}'
(junk, morejunk, salt, digest) = hash_data.split('$')
hex_digest = hexlify(b64decode(digest)).decode().upper()
hex_salt = hexlify(b64decode(salt)).decode().upper()
if hashcat:
return hcat_fmt_str.format(hex_digest, hex_salt)
return john_fmt_str.format(user, hex_digest, hex_salt)<|docstring|>Take hash data and return a valid hashcat hash for the
salted SHA512 hash mode (1710) if hashcat is set True
else it will take a hash and return a valid john hash for the
dynamic_82 John mode - sha512($password.$salt)<|endoftext|> |
83716cc6d7e412a97be0da94c1538f0ead4f77d3d0dca4788bcc1cab3f149406 | def extract_hash(line, hmac_list, sha512_list, regex, hashcat):
'Do basic parsing on a given passwd file line, if valid hash found\n format it accordingly for its type and once properly formatted,\n append to the corresponding hash list. Hash identification is managed \n by a pretty basic regex passed from calling function.'
line = line.strip()
if (not line):
return
m = regex.match(line)
if (not m):
return
if (m.group().count(':') > 1):
stderr.write("Invalid input. Try removing ':' from username:\n {}\n".format(m.group()))
return
(user, hash_data) = m.group().split(':')
if (hash_data.count('$') == 4):
hmac_list.append(format_hmac(user, hash_data, hashcat))
elif (hash_data.count('$') == 3):
sha512_list.append(format_sha512(user, hash_data, hashcat))
else:
stderr.write('Error parsing hash - bad format:\n{}'.format(hash_string)) | Do basic parsing on a given passwd file line, if valid hash found
format it accordingly for its type and once properly formatted,
append to the corresponding hash list. Hash identification is managed
by a pretty basic regex passed from calling function. | mosquitto2john.py | extract_hash | ghostking2802/John_conversion_scripts | 0 | python | def extract_hash(line, hmac_list, sha512_list, regex, hashcat):
'Do basic parsing on a given passwd file line, if valid hash found\n format it accordingly for its type and once properly formatted,\n append to the corresponding hash list. Hash identification is managed \n by a pretty basic regex passed from calling function.'
line = line.strip()
if (not line):
return
m = regex.match(line)
if (not m):
return
if (m.group().count(':') > 1):
stderr.write("Invalid input. Try removing ':' from username:\n {}\n".format(m.group()))
return
(user, hash_data) = m.group().split(':')
if (hash_data.count('$') == 4):
hmac_list.append(format_hmac(user, hash_data, hashcat))
elif (hash_data.count('$') == 3):
sha512_list.append(format_sha512(user, hash_data, hashcat))
else:
stderr.write('Error parsing hash - bad format:\n{}'.format(hash_string)) | def extract_hash(line, hmac_list, sha512_list, regex, hashcat):
'Do basic parsing on a given passwd file line, if valid hash found\n format it accordingly for its type and once properly formatted,\n append to the corresponding hash list. Hash identification is managed \n by a pretty basic regex passed from calling function.'
line = line.strip()
if (not line):
return
m = regex.match(line)
if (not m):
return
if (m.group().count(':') > 1):
stderr.write("Invalid input. Try removing ':' from username:\n {}\n".format(m.group()))
return
(user, hash_data) = m.group().split(':')
if (hash_data.count('$') == 4):
hmac_list.append(format_hmac(user, hash_data, hashcat))
elif (hash_data.count('$') == 3):
sha512_list.append(format_sha512(user, hash_data, hashcat))
else:
stderr.write('Error parsing hash - bad format:\n{}'.format(hash_string))<|docstring|>Do basic parsing on a given passwd file line, if valid hash found
format it accordingly for its type and once properly formatted,
append to the corresponding hash list. Hash identification is managed
by a pretty basic regex passed from calling function.<|endoftext|> |
52f75f2f5c337011254259790c0becc1e0bcca226cfcf6200308aaf1792bdd15 | def process_file(hashfile, hashcat):
"Take a mosquitto_passwd file and convert to John/Hashcat compatible \n format.Can handle both SHA512 and PBKDF2_HMAC_SHA512 output formats.\n Uses raw hex or base64 for hash and salt because 'bad' bytes are possible.\n\n Some versions of mosquitto_passwd can use mixed hash types, so we\n manage the two possible variants in simple lists, up until writing\n them out. \n\n See https://github.com/eclipse/mosquitto/search?q=pw_sha512_pbkdf2 for\n info on HMAC format Hashes. An equivalent search can be made for SHA512.\n \n Hashes have been assumed to always be of the format:\n username:$[HASHNO][$ITER(HMAC ONLY)]$SALT$HASH\n Where salt and hash are always B64 encoded and usernames can be .+\n Any usernames with a colon are out of spec, but possible, so we handle\n them by alerting the user and advising manual management.\n "
hmac_list = []
sha512_list = []
regex = re.compile('.+:\\$[6-7](\\$[0-9]+)*\\$[a-zA-Z0-9+/=]+\\$[a-zA-Z0-9+/=]{80,90}')
with open(hashfile, 'r') as h:
for line in h:
extract_hash(line, hmac_list, sha512_list, regex, hashcat)
if ((len(sha512_list) > 0) or (len(hmac_list) > 0)):
for h in sha512_list:
stdout.write((h + '\n'))
for h in hmac_list:
stdout.write((h + '\n'))
else:
stderr.write('No hashes found. Is this a valid mosquitto_passwd file?\n') | Take a mosquitto_passwd file and convert to John/Hashcat compatible
format.Can handle both SHA512 and PBKDF2_HMAC_SHA512 output formats.
Uses raw hex or base64 for hash and salt because 'bad' bytes are possible.
Some versions of mosquitto_passwd can use mixed hash types, so we
manage the two possible variants in simple lists, up until writing
them out.
See https://github.com/eclipse/mosquitto/search?q=pw_sha512_pbkdf2 for
info on HMAC format Hashes. An equivalent search can be made for SHA512.
Hashes have been assumed to always be of the format:
username:$[HASHNO][$ITER(HMAC ONLY)]$SALT$HASH
Where salt and hash are always B64 encoded and usernames can be .+
Any usernames with a colon are out of spec, but possible, so we handle
them by alerting the user and advising manual management. | mosquitto2john.py | process_file | ghostking2802/John_conversion_scripts | 0 | python | def process_file(hashfile, hashcat):
"Take a mosquitto_passwd file and convert to John/Hashcat compatible \n format.Can handle both SHA512 and PBKDF2_HMAC_SHA512 output formats.\n Uses raw hex or base64 for hash and salt because 'bad' bytes are possible.\n\n Some versions of mosquitto_passwd can use mixed hash types, so we\n manage the two possible variants in simple lists, up until writing\n them out. \n\n See https://github.com/eclipse/mosquitto/search?q=pw_sha512_pbkdf2 for\n info on HMAC format Hashes. An equivalent search can be made for SHA512.\n \n Hashes have been assumed to always be of the format:\n username:$[HASHNO][$ITER(HMAC ONLY)]$SALT$HASH\n Where salt and hash are always B64 encoded and usernames can be .+\n Any usernames with a colon are out of spec, but possible, so we handle\n them by alerting the user and advising manual management.\n "
hmac_list = []
sha512_list = []
regex = re.compile('.+:\\$[6-7](\\$[0-9]+)*\\$[a-zA-Z0-9+/=]+\\$[a-zA-Z0-9+/=]{80,90}')
with open(hashfile, 'r') as h:
for line in h:
extract_hash(line, hmac_list, sha512_list, regex, hashcat)
if ((len(sha512_list) > 0) or (len(hmac_list) > 0)):
for h in sha512_list:
stdout.write((h + '\n'))
for h in hmac_list:
stdout.write((h + '\n'))
else:
stderr.write('No hashes found. Is this a valid mosquitto_passwd file?\n') | def process_file(hashfile, hashcat):
"Take a mosquitto_passwd file and convert to John/Hashcat compatible \n format.Can handle both SHA512 and PBKDF2_HMAC_SHA512 output formats.\n Uses raw hex or base64 for hash and salt because 'bad' bytes are possible.\n\n Some versions of mosquitto_passwd can use mixed hash types, so we\n manage the two possible variants in simple lists, up until writing\n them out. \n\n See https://github.com/eclipse/mosquitto/search?q=pw_sha512_pbkdf2 for\n info on HMAC format Hashes. An equivalent search can be made for SHA512.\n \n Hashes have been assumed to always be of the format:\n username:$[HASHNO][$ITER(HMAC ONLY)]$SALT$HASH\n Where salt and hash are always B64 encoded and usernames can be .+\n Any usernames with a colon are out of spec, but possible, so we handle\n them by alerting the user and advising manual management.\n "
hmac_list = []
sha512_list = []
regex = re.compile('.+:\\$[6-7](\\$[0-9]+)*\\$[a-zA-Z0-9+/=]+\\$[a-zA-Z0-9+/=]{80,90}')
with open(hashfile, 'r') as h:
for line in h:
extract_hash(line, hmac_list, sha512_list, regex, hashcat)
if ((len(sha512_list) > 0) or (len(hmac_list) > 0)):
for h in sha512_list:
stdout.write((h + '\n'))
for h in hmac_list:
stdout.write((h + '\n'))
else:
stderr.write('No hashes found. Is this a valid mosquitto_passwd file?\n')<|docstring|>Take a mosquitto_passwd file and convert to John/Hashcat compatible
format.Can handle both SHA512 and PBKDF2_HMAC_SHA512 output formats.
Uses raw hex or base64 for hash and salt because 'bad' bytes are possible.
Some versions of mosquitto_passwd can use mixed hash types, so we
manage the two possible variants in simple lists, up until writing
them out.
See https://github.com/eclipse/mosquitto/search?q=pw_sha512_pbkdf2 for
info on HMAC format Hashes. An equivalent search can be made for SHA512.
Hashes have been assumed to always be of the format:
username:$[HASHNO][$ITER(HMAC ONLY)]$SALT$HASH
Where salt and hash are always B64 encoded and usernames can be .+
Any usernames with a colon are out of spec, but possible, so we handle
them by alerting the user and advising manual management.<|endoftext|> |
de1b9703a0e0f3b972df99526c7dd0de5c69062bb2b1e5fcbab8a8478d8be5a2 | def setUp(self):
'[summary]'
self.db_mock = [Food(email='this is an email', food='This is the 1st meal'), Food(email='this is an email', food='This is the 2nd meal')] | [summary] | test/unittest_mocked.py | setUp | bryanchau11/HealthyLifestyle | 0 | python | def setUp(self):
self.db_mock = [Food(email='this is an email', food='This is the 1st meal'), Food(email='this is an email', food='This is the 2nd meal')] | def setUp(self):
self.db_mock = [Food(email='this is an email', food='This is the 1st meal'), Food(email='this is an email', food='This is the 2nd meal')]<|docstring|>[summary]<|endoftext|> |
a82e0219b922ae356df5791f2065d815060586c23a9f30f06c61235ca06aa6c5 | def mock_add_to_db(self, meal):
'[summary]\n\n Args:\n meal ([type]): [description]\n '
self.db_mock.append(meal) | [summary]
Args:
meal ([type]): [description] | test/unittest_mocked.py | mock_add_to_db | bryanchau11/HealthyLifestyle | 0 | python | def mock_add_to_db(self, meal):
'[summary]\n\n Args:\n meal ([type]): [description]\n '
self.db_mock.append(meal) | def mock_add_to_db(self, meal):
'[summary]\n\n Args:\n meal ([type]): [description]\n '
self.db_mock.append(meal)<|docstring|>[summary]
Args:
meal ([type]): [description]<|endoftext|> |
ffbd359dfed9cd5e1eb35a9f61c5634d92f12e2e15f6dba141372c75064217be | def mock_delete_from_db(self, meal):
'[summary]\n\n Args:\n meal ([type]): [description]\n '
self.db_mock = [entry for entry in self.db_mock if (entry.email != meal.email)] | [summary]
Args:
meal ([type]): [description] | test/unittest_mocked.py | mock_delete_from_db | bryanchau11/HealthyLifestyle | 0 | python | def mock_delete_from_db(self, meal):
'[summary]\n\n Args:\n meal ([type]): [description]\n '
self.db_mock = [entry for entry in self.db_mock if (entry.email != meal.email)] | def mock_delete_from_db(self, meal):
'[summary]\n\n Args:\n meal ([type]): [description]\n '
self.db_mock = [entry for entry in self.db_mock if (entry.email != meal.email)]<|docstring|>[summary]
Args:
meal ([type]): [description]<|endoftext|> |
37e881acffb2782998a5634ae9fa7a2b6abb98dc02f60660aa560d3885d91e23 | def mock_db_commit(self):
'[summary]'
pass | [summary] | test/unittest_mocked.py | mock_db_commit | bryanchau11/HealthyLifestyle | 0 | python | def mock_db_commit(self):
pass | def mock_db_commit(self):
pass<|docstring|>[summary]<|endoftext|> |
025fdecf64970529a4b0db6c05a5d54250e83b6dfec2df44a8639d2b1bce37c9 | def test_save_meal(self):
'[summary]'
with patch('app.db.session.add', self.mock_add_to_db):
with patch('app.db.session.commit', self.mock_db_commit):
mock_filtered = MagicMock()
mock_filtered.all.return_value = self.db_mock
self.mock_add_to_db(Food(email='this is an email', food='This is the 3rd meal'))
self.mock_db_commit()
self.assertEqual(len(self.db_mock), 3)
self.assertEqual(self.db_mock[2].food, 'This is the 3rd meal') | [summary] | test/unittest_mocked.py | test_save_meal | bryanchau11/HealthyLifestyle | 0 | python | def test_save_meal(self):
with patch('app.db.session.add', self.mock_add_to_db):
with patch('app.db.session.commit', self.mock_db_commit):
mock_filtered = MagicMock()
mock_filtered.all.return_value = self.db_mock
self.mock_add_to_db(Food(email='this is an email', food='This is the 3rd meal'))
self.mock_db_commit()
self.assertEqual(len(self.db_mock), 3)
self.assertEqual(self.db_mock[2].food, 'This is the 3rd meal') | def test_save_meal(self):
with patch('app.db.session.add', self.mock_add_to_db):
with patch('app.db.session.commit', self.mock_db_commit):
mock_filtered = MagicMock()
mock_filtered.all.return_value = self.db_mock
self.mock_add_to_db(Food(email='this is an email', food='This is the 3rd meal'))
self.mock_db_commit()
self.assertEqual(len(self.db_mock), 3)
self.assertEqual(self.db_mock[2].food, 'This is the 3rd meal')<|docstring|>[summary]<|endoftext|> |
58e2916bc3a678e4d8dde54d43e78bb053f6a3d28dc42e28b5417c770e825350 | def test_delete_meal_db(self):
'[summary]'
with patch('app.Food.query') as mock_query:
with patch('app.db.session.add', self.mock_add_to_db):
with patch('app.db.session.delete', self.mock_delete_from_db):
with patch('app.db.session.commit', self.mock_db_commit):
mock_filtered = MagicMock()
mock_filtered.all.return_value = self.db_mock
mock_filtered.filter.return_value = [Food(email='this is an email', food='This is the 2nd meal')]
mock_query.filter_by.return_value = mock_filtered
delete_meal_db('this is an email', 'This is the 1st meal')
self.assertEqual(len(self.db_mock), 2)
self.assertEqual(self.db_mock[0].food, 'This is the 1st meal') | [summary] | test/unittest_mocked.py | test_delete_meal_db | bryanchau11/HealthyLifestyle | 0 | python | def test_delete_meal_db(self):
with patch('app.Food.query') as mock_query:
with patch('app.db.session.add', self.mock_add_to_db):
with patch('app.db.session.delete', self.mock_delete_from_db):
with patch('app.db.session.commit', self.mock_db_commit):
mock_filtered = MagicMock()
mock_filtered.all.return_value = self.db_mock
mock_filtered.filter.return_value = [Food(email='this is an email', food='This is the 2nd meal')]
mock_query.filter_by.return_value = mock_filtered
delete_meal_db('this is an email', 'This is the 1st meal')
self.assertEqual(len(self.db_mock), 2)
self.assertEqual(self.db_mock[0].food, 'This is the 1st meal') | def test_delete_meal_db(self):
with patch('app.Food.query') as mock_query:
with patch('app.db.session.add', self.mock_add_to_db):
with patch('app.db.session.delete', self.mock_delete_from_db):
with patch('app.db.session.commit', self.mock_db_commit):
mock_filtered = MagicMock()
mock_filtered.all.return_value = self.db_mock
mock_filtered.filter.return_value = [Food(email='this is an email', food='This is the 2nd meal')]
mock_query.filter_by.return_value = mock_filtered
delete_meal_db('this is an email', 'This is the 1st meal')
self.assertEqual(len(self.db_mock), 2)
self.assertEqual(self.db_mock[0].food, 'This is the 1st meal')<|docstring|>[summary]<|endoftext|> |
fb86de7f97fa835b1aca7dfa0935ac6462ed3a68a93a8a4a36b5f46a0f316ce0 | def test_get_meal(self):
'[summary]'
with patch('function.recommendedMeals.requests.get') as mock_requests_get:
mock_response = MagicMock()
mock_response.json.side_effect = [{}, {'response': {'hits': [{'result': {'name': 'Beef and Mustard Pie', 'image': 'https://www.themealdb.com/images/media/meals/sytuqu1511553755.jpg'}}]}}]
mock_requests_get.return_value = mock_response
self.assertEqual(get_meal('Beef and Mustard Pie')[0], 'Beef and Mustard Pie')
self.assertEqual(get_meal('Beef and Mustard Pie')[1], 'https://www.themealdb.com/images/media/meals/sytuqu1511553755.jpg') | [summary] | test/unittest_mocked.py | test_get_meal | bryanchau11/HealthyLifestyle | 0 | python | def test_get_meal(self):
with patch('function.recommendedMeals.requests.get') as mock_requests_get:
mock_response = MagicMock()
mock_response.json.side_effect = [{}, {'response': {'hits': [{'result': {'name': 'Beef and Mustard Pie', 'image': 'https://www.themealdb.com/images/media/meals/sytuqu1511553755.jpg'}}]}}]
mock_requests_get.return_value = mock_response
self.assertEqual(get_meal('Beef and Mustard Pie')[0], 'Beef and Mustard Pie')
self.assertEqual(get_meal('Beef and Mustard Pie')[1], 'https://www.themealdb.com/images/media/meals/sytuqu1511553755.jpg') | def test_get_meal(self):
with patch('function.recommendedMeals.requests.get') as mock_requests_get:
mock_response = MagicMock()
mock_response.json.side_effect = [{}, {'response': {'hits': [{'result': {'name': 'Beef and Mustard Pie', 'image': 'https://www.themealdb.com/images/media/meals/sytuqu1511553755.jpg'}}]}}]
mock_requests_get.return_value = mock_response
self.assertEqual(get_meal('Beef and Mustard Pie')[0], 'Beef and Mustard Pie')
self.assertEqual(get_meal('Beef and Mustard Pie')[1], 'https://www.themealdb.com/images/media/meals/sytuqu1511553755.jpg')<|docstring|>[summary]<|endoftext|> |
e8588007ebb8798a16c29bd8b76bb2e778efc6b4d655ca227fba684241de683f | def mock_add_to_db(self, user):
'[summary]\n\n Args:\n meal ([type]): [description]\n '
self.db_mock.append(user) | [summary]
Args:
meal ([type]): [description] | test/unittest_mocked.py | mock_add_to_db | bryanchau11/HealthyLifestyle | 0 | python | def mock_add_to_db(self, user):
'[summary]\n\n Args:\n meal ([type]): [description]\n '
self.db_mock.append(user) | def mock_add_to_db(self, user):
'[summary]\n\n Args:\n meal ([type]): [description]\n '
self.db_mock.append(user)<|docstring|>[summary]
Args:
meal ([type]): [description]<|endoftext|> |
b6ebb18b52200472678d042dcd28267f74b4404a11597f502a31989f923caf48 | def mock_delete_from_db(self, user):
'[summary]\n\n Args:\n meal ([type]): [description]\n '
self.db_mock = [entry for entry in self.db_mock if (entry.email != user.email)] | [summary]
Args:
meal ([type]): [description] | test/unittest_mocked.py | mock_delete_from_db | bryanchau11/HealthyLifestyle | 0 | python | def mock_delete_from_db(self, user):
'[summary]\n\n Args:\n meal ([type]): [description]\n '
self.db_mock = [entry for entry in self.db_mock if (entry.email != user.email)] | def mock_delete_from_db(self, user):
'[summary]\n\n Args:\n meal ([type]): [description]\n '
self.db_mock = [entry for entry in self.db_mock if (entry.email != user.email)]<|docstring|>[summary]
Args:
meal ([type]): [description]<|endoftext|> |
37e881acffb2782998a5634ae9fa7a2b6abb98dc02f60660aa560d3885d91e23 | def mock_db_commit(self):
'[summary]'
pass | [summary] | test/unittest_mocked.py | mock_db_commit | bryanchau11/HealthyLifestyle | 0 | python | def mock_db_commit(self):
pass | def mock_db_commit(self):
pass<|docstring|>[summary]<|endoftext|> |
b4a731cd78b90ed16197b6c2f379dd4540ae4e098342ca293af47a27d6d374cb | def test_signup(self):
'[summary]'
with patch('app.db.session.add', self.mock_add_to_db):
with patch('app.db.session.commit', self.mock_db_commit):
mock_filtered = MagicMock()
mock_filtered.all.return_value = self.db_mock
self.mock_add_to_db(User(email='email3', username='user3', password='pass3'))
self.assertEqual(len(self.db_mock), 3)
self.assertEqual(self.db_mock[2].email, 'email3') | [summary] | test/unittest_mocked.py | test_signup | bryanchau11/HealthyLifestyle | 0 | python | def test_signup(self):
with patch('app.db.session.add', self.mock_add_to_db):
with patch('app.db.session.commit', self.mock_db_commit):
mock_filtered = MagicMock()
mock_filtered.all.return_value = self.db_mock
self.mock_add_to_db(User(email='email3', username='user3', password='pass3'))
self.assertEqual(len(self.db_mock), 3)
self.assertEqual(self.db_mock[2].email, 'email3') | def test_signup(self):
with patch('app.db.session.add', self.mock_add_to_db):
with patch('app.db.session.commit', self.mock_db_commit):
mock_filtered = MagicMock()
mock_filtered.all.return_value = self.db_mock
self.mock_add_to_db(User(email='email3', username='user3', password='pass3'))
self.assertEqual(len(self.db_mock), 3)
self.assertEqual(self.db_mock[2].email, 'email3')<|docstring|>[summary]<|endoftext|> |
12bdcc2d46616f95bb2633edcd867d06455226e7ec8433852a31612f7dcb47dd | def test_change_user_info(self):
'[summary]'
with patch('app.db.session.commit', self.mock_db_commit):
mock_filtered = MagicMock()
mock_filtered.all.return_value = self.db_mock
self.db_mock[0].username = 'test'
self.db_mock[0].height = '72'
self.db_mock[0].weight = '130'
self.db_mock[0].age = '22'
self.assertEqual(self.db_mock[0], User(email='email1', username='test', password='pass1', height='72', weight='130', age='22')) | [summary] | test/unittest_mocked.py | test_change_user_info | bryanchau11/HealthyLifestyle | 0 | python | def test_change_user_info(self):
with patch('app.db.session.commit', self.mock_db_commit):
mock_filtered = MagicMock()
mock_filtered.all.return_value = self.db_mock
self.db_mock[0].username = 'test'
self.db_mock[0].height = '72'
self.db_mock[0].weight = '130'
self.db_mock[0].age = '22'
self.assertEqual(self.db_mock[0], User(email='email1', username='test', password='pass1', height='72', weight='130', age='22')) | def test_change_user_info(self):
with patch('app.db.session.commit', self.mock_db_commit):
mock_filtered = MagicMock()
mock_filtered.all.return_value = self.db_mock
self.db_mock[0].username = 'test'
self.db_mock[0].height = '72'
self.db_mock[0].weight = '130'
self.db_mock[0].age = '22'
self.assertEqual(self.db_mock[0], User(email='email1', username='test', password='pass1', height='72', weight='130', age='22'))<|docstring|>[summary]<|endoftext|> |
6988c086a33dc0c391ef8d5c87134166325ed14f100eefa2d948967ed299a449 | def _get_exchange_current_density(self, variables):
'\n A private function to obtain the exchange current density\n\n Parameters\n ----------\n variables: dict\n The variables in the full model.\n\n Returns\n -------\n j0 : :class: `pybamm.Symbol`\n The exchange current density.\n '
param = self.param
c_e = variables[(self.domain + ' electrolyte concentration')]
T = variables[(self.domain + ' electrode temperature')]
if (self.reaction == 'lithium-ion main'):
if (self.options['particle size'] == 'distribution'):
c_s_surf = variables[(self.domain + ' particle surface concentration distribution')]
if (isinstance(c_s_surf, pybamm.Broadcast) and isinstance(c_e, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast)):
c_s_surf = c_s_surf.orphans[0]
c_e = c_e.orphans[0]
T = T.orphans[0]
c_e = pybamm.PrimaryBroadcast(c_e, ['current collector'])
c_e = pybamm.PrimaryBroadcast(c_e, [(self.domain.lower() + ' particle size')])
T = pybamm.PrimaryBroadcast(T, [(self.domain.lower() + ' particle size')])
else:
c_s_surf = variables[(self.domain + ' particle surface concentration')]
if (isinstance(c_s_surf, pybamm.Broadcast) and isinstance(c_e, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast)):
c_s_surf = c_s_surf.orphans[0]
c_e = c_e.orphans[0]
T = T.orphans[0]
tol = 1e-08
c_e = pybamm.maximum(tol, c_e)
c_s_surf = pybamm.maximum(tol, pybamm.minimum(c_s_surf, (1 - tol)))
if (self.domain == 'Negative'):
j0 = (param.j0_n(c_e, c_s_surf, T) / param.C_r_n)
elif (self.domain == 'Positive'):
j0 = ((param.gamma_p * param.j0_p(c_e, c_s_surf, T)) / param.C_r_p)
elif (self.reaction == 'lithium metal plating'):
j0 = param.j0_plating(c_e, 1, T)
elif (self.reaction == 'lead-acid main'):
if (isinstance(c_e, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast)):
c_e = c_e.orphans[0]
T = T.orphans[0]
if (self.domain == 'Negative'):
j0 = param.j0_n(c_e, T)
elif (self.domain == 'Positive'):
j0 = param.j0_p(c_e, T)
elif (self.reaction == 'lead-acid oxygen'):
if (isinstance(c_e, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast)):
c_e = c_e.orphans[0]
T = T.orphans[0]
if (self.domain == 'Negative'):
j0 = pybamm.Scalar(0)
elif (self.domain == 'Positive'):
j0 = param.j0_p_Ox(c_e, T)
else:
j0 = pybamm.Scalar(0)
return j0 | A private function to obtain the exchange current density
Parameters
----------
variables: dict
The variables in the full model.
Returns
-------
j0 : :class: `pybamm.Symbol`
The exchange current density. | pybamm/models/submodels/interface/base_interface.py | _get_exchange_current_density | ehtec/PyBaMM | 330 | python | def _get_exchange_current_density(self, variables):
'\n A private function to obtain the exchange current density\n\n Parameters\n ----------\n variables: dict\n The variables in the full model.\n\n Returns\n -------\n j0 : :class: `pybamm.Symbol`\n The exchange current density.\n '
param = self.param
c_e = variables[(self.domain + ' electrolyte concentration')]
T = variables[(self.domain + ' electrode temperature')]
if (self.reaction == 'lithium-ion main'):
if (self.options['particle size'] == 'distribution'):
c_s_surf = variables[(self.domain + ' particle surface concentration distribution')]
if (isinstance(c_s_surf, pybamm.Broadcast) and isinstance(c_e, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast)):
c_s_surf = c_s_surf.orphans[0]
c_e = c_e.orphans[0]
T = T.orphans[0]
c_e = pybamm.PrimaryBroadcast(c_e, ['current collector'])
c_e = pybamm.PrimaryBroadcast(c_e, [(self.domain.lower() + ' particle size')])
T = pybamm.PrimaryBroadcast(T, [(self.domain.lower() + ' particle size')])
else:
c_s_surf = variables[(self.domain + ' particle surface concentration')]
if (isinstance(c_s_surf, pybamm.Broadcast) and isinstance(c_e, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast)):
c_s_surf = c_s_surf.orphans[0]
c_e = c_e.orphans[0]
T = T.orphans[0]
tol = 1e-08
c_e = pybamm.maximum(tol, c_e)
c_s_surf = pybamm.maximum(tol, pybamm.minimum(c_s_surf, (1 - tol)))
if (self.domain == 'Negative'):
j0 = (param.j0_n(c_e, c_s_surf, T) / param.C_r_n)
elif (self.domain == 'Positive'):
j0 = ((param.gamma_p * param.j0_p(c_e, c_s_surf, T)) / param.C_r_p)
elif (self.reaction == 'lithium metal plating'):
j0 = param.j0_plating(c_e, 1, T)
elif (self.reaction == 'lead-acid main'):
if (isinstance(c_e, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast)):
c_e = c_e.orphans[0]
T = T.orphans[0]
if (self.domain == 'Negative'):
j0 = param.j0_n(c_e, T)
elif (self.domain == 'Positive'):
j0 = param.j0_p(c_e, T)
elif (self.reaction == 'lead-acid oxygen'):
if (isinstance(c_e, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast)):
c_e = c_e.orphans[0]
T = T.orphans[0]
if (self.domain == 'Negative'):
j0 = pybamm.Scalar(0)
elif (self.domain == 'Positive'):
j0 = param.j0_p_Ox(c_e, T)
else:
j0 = pybamm.Scalar(0)
return j0 | def _get_exchange_current_density(self, variables):
'\n A private function to obtain the exchange current density\n\n Parameters\n ----------\n variables: dict\n The variables in the full model.\n\n Returns\n -------\n j0 : :class: `pybamm.Symbol`\n The exchange current density.\n '
param = self.param
c_e = variables[(self.domain + ' electrolyte concentration')]
T = variables[(self.domain + ' electrode temperature')]
if (self.reaction == 'lithium-ion main'):
if (self.options['particle size'] == 'distribution'):
c_s_surf = variables[(self.domain + ' particle surface concentration distribution')]
if (isinstance(c_s_surf, pybamm.Broadcast) and isinstance(c_e, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast)):
c_s_surf = c_s_surf.orphans[0]
c_e = c_e.orphans[0]
T = T.orphans[0]
c_e = pybamm.PrimaryBroadcast(c_e, ['current collector'])
c_e = pybamm.PrimaryBroadcast(c_e, [(self.domain.lower() + ' particle size')])
T = pybamm.PrimaryBroadcast(T, [(self.domain.lower() + ' particle size')])
else:
c_s_surf = variables[(self.domain + ' particle surface concentration')]
if (isinstance(c_s_surf, pybamm.Broadcast) and isinstance(c_e, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast)):
c_s_surf = c_s_surf.orphans[0]
c_e = c_e.orphans[0]
T = T.orphans[0]
tol = 1e-08
c_e = pybamm.maximum(tol, c_e)
c_s_surf = pybamm.maximum(tol, pybamm.minimum(c_s_surf, (1 - tol)))
if (self.domain == 'Negative'):
j0 = (param.j0_n(c_e, c_s_surf, T) / param.C_r_n)
elif (self.domain == 'Positive'):
j0 = ((param.gamma_p * param.j0_p(c_e, c_s_surf, T)) / param.C_r_p)
elif (self.reaction == 'lithium metal plating'):
j0 = param.j0_plating(c_e, 1, T)
elif (self.reaction == 'lead-acid main'):
if (isinstance(c_e, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast)):
c_e = c_e.orphans[0]
T = T.orphans[0]
if (self.domain == 'Negative'):
j0 = param.j0_n(c_e, T)
elif (self.domain == 'Positive'):
j0 = param.j0_p(c_e, T)
elif (self.reaction == 'lead-acid oxygen'):
if (isinstance(c_e, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast)):
c_e = c_e.orphans[0]
T = T.orphans[0]
if (self.domain == 'Negative'):
j0 = pybamm.Scalar(0)
elif (self.domain == 'Positive'):
j0 = param.j0_p_Ox(c_e, T)
else:
j0 = pybamm.Scalar(0)
return j0<|docstring|>A private function to obtain the exchange current density
Parameters
----------
variables: dict
The variables in the full model.
Returns
-------
j0 : :class: `pybamm.Symbol`
The exchange current density.<|endoftext|> |
701d6534c0010ab3f9662eeeafe813d20fe7576c35c4275dc5c4b540a5356666 | def _get_open_circuit_potential(self, variables):
'\n A private function to obtain the open circuit potential and entropic change\n\n Parameters\n ----------\n variables: dict\n The variables in the full model.\n\n Returns\n -------\n ocp : :class:`pybamm.Symbol`\n The open-circuit potential\n dUdT : :class:`pybamm.Symbol`\n The entropic change in open-circuit potential due to temperature\n\n '
if (self.reaction == 'lithium-ion main'):
T = variables[(self.domain + ' electrode temperature')]
if (self.options['particle size'] == 'distribution'):
c_s_surf = variables[(self.domain + ' particle surface concentration distribution')]
if (isinstance(c_s_surf, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast)):
c_s_surf = c_s_surf.orphans[0]
T = T.orphans[0]
T = pybamm.PrimaryBroadcast(T, [(self.domain.lower() + ' particle size')])
else:
c_s_surf = variables[(self.domain + ' particle surface concentration')]
if (isinstance(c_s_surf, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast)):
c_s_surf = c_s_surf.orphans[0]
T = T.orphans[0]
if (self.domain == 'Negative'):
ocp = self.param.U_n(c_s_surf, T)
dUdT = self.param.dUdT_n(c_s_surf)
elif (self.domain == 'Positive'):
ocp = self.param.U_p(c_s_surf, T)
dUdT = self.param.dUdT_p(c_s_surf)
elif (self.reaction == 'lithium metal plating'):
T = variables[(self.domain + ' electrode temperature')]
ocp = self.param.U_n_ref
dUdT = (0 * T)
elif (self.reaction == 'lead-acid main'):
c_e = variables[(self.domain + ' electrolyte concentration')]
if isinstance(c_e, pybamm.Broadcast):
c_e = c_e.orphans[0]
if (self.domain == 'Negative'):
ocp = self.param.U_n(c_e, self.param.T_init)
elif (self.domain == 'Positive'):
ocp = self.param.U_p(c_e, self.param.T_init)
dUdT = pybamm.Scalar(0)
elif (self.reaction == 'lead-acid oxygen'):
if (self.domain == 'Negative'):
ocp = self.param.U_n_Ox
elif (self.domain == 'Positive'):
ocp = self.param.U_p_Ox
dUdT = pybamm.Scalar(0)
else:
ocp = pybamm.Scalar(0)
dUdT = pybamm.Scalar(0)
return (ocp, dUdT) | A private function to obtain the open circuit potential and entropic change
Parameters
----------
variables: dict
The variables in the full model.
Returns
-------
ocp : :class:`pybamm.Symbol`
The open-circuit potential
dUdT : :class:`pybamm.Symbol`
The entropic change in open-circuit potential due to temperature | pybamm/models/submodels/interface/base_interface.py | _get_open_circuit_potential | ehtec/PyBaMM | 330 | python | def _get_open_circuit_potential(self, variables):
'\n A private function to obtain the open circuit potential and entropic change\n\n Parameters\n ----------\n variables: dict\n The variables in the full model.\n\n Returns\n -------\n ocp : :class:`pybamm.Symbol`\n The open-circuit potential\n dUdT : :class:`pybamm.Symbol`\n The entropic change in open-circuit potential due to temperature\n\n '
if (self.reaction == 'lithium-ion main'):
T = variables[(self.domain + ' electrode temperature')]
if (self.options['particle size'] == 'distribution'):
c_s_surf = variables[(self.domain + ' particle surface concentration distribution')]
if (isinstance(c_s_surf, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast)):
c_s_surf = c_s_surf.orphans[0]
T = T.orphans[0]
T = pybamm.PrimaryBroadcast(T, [(self.domain.lower() + ' particle size')])
else:
c_s_surf = variables[(self.domain + ' particle surface concentration')]
if (isinstance(c_s_surf, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast)):
c_s_surf = c_s_surf.orphans[0]
T = T.orphans[0]
if (self.domain == 'Negative'):
ocp = self.param.U_n(c_s_surf, T)
dUdT = self.param.dUdT_n(c_s_surf)
elif (self.domain == 'Positive'):
ocp = self.param.U_p(c_s_surf, T)
dUdT = self.param.dUdT_p(c_s_surf)
elif (self.reaction == 'lithium metal plating'):
T = variables[(self.domain + ' electrode temperature')]
ocp = self.param.U_n_ref
dUdT = (0 * T)
elif (self.reaction == 'lead-acid main'):
c_e = variables[(self.domain + ' electrolyte concentration')]
if isinstance(c_e, pybamm.Broadcast):
c_e = c_e.orphans[0]
if (self.domain == 'Negative'):
ocp = self.param.U_n(c_e, self.param.T_init)
elif (self.domain == 'Positive'):
ocp = self.param.U_p(c_e, self.param.T_init)
dUdT = pybamm.Scalar(0)
elif (self.reaction == 'lead-acid oxygen'):
if (self.domain == 'Negative'):
ocp = self.param.U_n_Ox
elif (self.domain == 'Positive'):
ocp = self.param.U_p_Ox
dUdT = pybamm.Scalar(0)
else:
ocp = pybamm.Scalar(0)
dUdT = pybamm.Scalar(0)
return (ocp, dUdT) | def _get_open_circuit_potential(self, variables):
'\n A private function to obtain the open circuit potential and entropic change\n\n Parameters\n ----------\n variables: dict\n The variables in the full model.\n\n Returns\n -------\n ocp : :class:`pybamm.Symbol`\n The open-circuit potential\n dUdT : :class:`pybamm.Symbol`\n The entropic change in open-circuit potential due to temperature\n\n '
if (self.reaction == 'lithium-ion main'):
T = variables[(self.domain + ' electrode temperature')]
if (self.options['particle size'] == 'distribution'):
c_s_surf = variables[(self.domain + ' particle surface concentration distribution')]
if (isinstance(c_s_surf, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast)):
c_s_surf = c_s_surf.orphans[0]
T = T.orphans[0]
T = pybamm.PrimaryBroadcast(T, [(self.domain.lower() + ' particle size')])
else:
c_s_surf = variables[(self.domain + ' particle surface concentration')]
if (isinstance(c_s_surf, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast)):
c_s_surf = c_s_surf.orphans[0]
T = T.orphans[0]
if (self.domain == 'Negative'):
ocp = self.param.U_n(c_s_surf, T)
dUdT = self.param.dUdT_n(c_s_surf)
elif (self.domain == 'Positive'):
ocp = self.param.U_p(c_s_surf, T)
dUdT = self.param.dUdT_p(c_s_surf)
elif (self.reaction == 'lithium metal plating'):
T = variables[(self.domain + ' electrode temperature')]
ocp = self.param.U_n_ref
dUdT = (0 * T)
elif (self.reaction == 'lead-acid main'):
c_e = variables[(self.domain + ' electrolyte concentration')]
if isinstance(c_e, pybamm.Broadcast):
c_e = c_e.orphans[0]
if (self.domain == 'Negative'):
ocp = self.param.U_n(c_e, self.param.T_init)
elif (self.domain == 'Positive'):
ocp = self.param.U_p(c_e, self.param.T_init)
dUdT = pybamm.Scalar(0)
elif (self.reaction == 'lead-acid oxygen'):
if (self.domain == 'Negative'):
ocp = self.param.U_n_Ox
elif (self.domain == 'Positive'):
ocp = self.param.U_p_Ox
dUdT = pybamm.Scalar(0)
else:
ocp = pybamm.Scalar(0)
dUdT = pybamm.Scalar(0)
return (ocp, dUdT)<|docstring|>A private function to obtain the open circuit potential and entropic change
Parameters
----------
variables: dict
The variables in the full model.
Returns
-------
ocp : :class:`pybamm.Symbol`
The open-circuit potential
dUdT : :class:`pybamm.Symbol`
The entropic change in open-circuit potential due to temperature<|endoftext|> |
2b2f24d03fd0cb5c63ae35c20346dc85b11737948b81a46b662fec412058d093 | def _get_number_of_electrons_in_reaction(self):
'Returns the number of electrons in the reaction.'
if (self.reaction in ['lead-acid main', 'lithium-ion main', 'lithium metal plating']):
if (self.domain == 'Negative'):
return self.param.ne_n
elif (self.domain == 'Positive'):
return self.param.ne_p
elif (self.reaction == 'lead-acid oxygen'):
return self.param.ne_Ox
else:
return pybamm.Scalar(0) | Returns the number of electrons in the reaction. | pybamm/models/submodels/interface/base_interface.py | _get_number_of_electrons_in_reaction | ehtec/PyBaMM | 330 | python | def _get_number_of_electrons_in_reaction(self):
if (self.reaction in ['lead-acid main', 'lithium-ion main', 'lithium metal plating']):
if (self.domain == 'Negative'):
return self.param.ne_n
elif (self.domain == 'Positive'):
return self.param.ne_p
elif (self.reaction == 'lead-acid oxygen'):
return self.param.ne_Ox
else:
return pybamm.Scalar(0) | def _get_number_of_electrons_in_reaction(self):
if (self.reaction in ['lead-acid main', 'lithium-ion main', 'lithium metal plating']):
if (self.domain == 'Negative'):
return self.param.ne_n
elif (self.domain == 'Positive'):
return self.param.ne_p
elif (self.reaction == 'lead-acid oxygen'):
return self.param.ne_Ox
else:
return pybamm.Scalar(0)<|docstring|>Returns the number of electrons in the reaction.<|endoftext|> |
5dc63c3eb173784fdb5fe6f31afd17f81de40bec8eec1e866bb73c5134b3f697 | def _get_electrolyte_reaction_signed_stoichiometry(self):
'Returns the number of electrons in the reaction.'
if (self.reaction in ['lithium-ion main', 'SEI', 'lithium plating', 'lithium metal plating']):
return (pybamm.Scalar(1), pybamm.Scalar(1))
elif (self.reaction == 'lead-acid main'):
return (self.param.s_plus_n_S, self.param.s_plus_p_S)
elif (self.reaction == 'lead-acid oxygen'):
return (self.param.s_plus_Ox, self.param.s_plus_Ox)
else:
return (pybamm.Scalar(0), pybamm.Scalar(0)) | Returns the number of electrons in the reaction. | pybamm/models/submodels/interface/base_interface.py | _get_electrolyte_reaction_signed_stoichiometry | ehtec/PyBaMM | 330 | python | def _get_electrolyte_reaction_signed_stoichiometry(self):
if (self.reaction in ['lithium-ion main', 'SEI', 'lithium plating', 'lithium metal plating']):
return (pybamm.Scalar(1), pybamm.Scalar(1))
elif (self.reaction == 'lead-acid main'):
return (self.param.s_plus_n_S, self.param.s_plus_p_S)
elif (self.reaction == 'lead-acid oxygen'):
return (self.param.s_plus_Ox, self.param.s_plus_Ox)
else:
return (pybamm.Scalar(0), pybamm.Scalar(0)) | def _get_electrolyte_reaction_signed_stoichiometry(self):
if (self.reaction in ['lithium-ion main', 'SEI', 'lithium plating', 'lithium metal plating']):
return (pybamm.Scalar(1), pybamm.Scalar(1))
elif (self.reaction == 'lead-acid main'):
return (self.param.s_plus_n_S, self.param.s_plus_p_S)
elif (self.reaction == 'lead-acid oxygen'):
return (self.param.s_plus_Ox, self.param.s_plus_Ox)
else:
return (pybamm.Scalar(0), pybamm.Scalar(0))<|docstring|>Returns the number of electrons in the reaction.<|endoftext|> |
f617b4b50c41846e6bc430f9d63f3f8ae159b1262482ee3bb01d864d91ec426d | def _get_average_total_interfacial_current_density(self, variables):
'\n Method to obtain the average total interfacial current density.\n\n Note: for lithium-ion models this is only exact if all the particles have\n the same radius. For the current set of models implemeted in pybamm,\n having the radius as a function of through-cell distance only makes sense\n for the DFN model. In the DFN, the correct average interfacial current density\n is computed in \'base_kinetics.py\' by averaging the actual interfacial current\n density. The approximation here is only used to get the approximate constant\n additional resistance term for the "average" SEI film resistance model\n (if using), where only negligible errors will be introduced.\n\n For "leading-order" and "composite" submodels (as used in the SPM and SPMe)\n there is only a single particle radius, so this method returns correct result.\n '
i_boundary_cc = variables['Current collector current density']
if (self.half_cell and (self.domain == 'Negative')):
i_boundary_cc = variables['Current collector current density']
j_total_average = i_boundary_cc
else:
a_av = variables[(('X-averaged ' + self.domain.lower()) + ' electrode surface area to volume ratio')]
if (self.domain == 'Negative'):
j_total_average = (i_boundary_cc / (a_av * self.param.l_n))
elif (self.domain == 'Positive'):
j_total_average = ((- i_boundary_cc) / (a_av * self.param.l_p))
return j_total_average | Method to obtain the average total interfacial current density.
Note: for lithium-ion models this is only exact if all the particles have
the same radius. For the current set of models implemeted in pybamm,
having the radius as a function of through-cell distance only makes sense
for the DFN model. In the DFN, the correct average interfacial current density
is computed in 'base_kinetics.py' by averaging the actual interfacial current
density. The approximation here is only used to get the approximate constant
additional resistance term for the "average" SEI film resistance model
(if using), where only negligible errors will be introduced.
For "leading-order" and "composite" submodels (as used in the SPM and SPMe)
there is only a single particle radius, so this method returns correct result. | pybamm/models/submodels/interface/base_interface.py | _get_average_total_interfacial_current_density | ehtec/PyBaMM | 330 | python | def _get_average_total_interfacial_current_density(self, variables):
'\n Method to obtain the average total interfacial current density.\n\n Note: for lithium-ion models this is only exact if all the particles have\n the same radius. For the current set of models implemeted in pybamm,\n having the radius as a function of through-cell distance only makes sense\n for the DFN model. In the DFN, the correct average interfacial current density\n is computed in \'base_kinetics.py\' by averaging the actual interfacial current\n density. The approximation here is only used to get the approximate constant\n additional resistance term for the "average" SEI film resistance model\n (if using), where only negligible errors will be introduced.\n\n For "leading-order" and "composite" submodels (as used in the SPM and SPMe)\n there is only a single particle radius, so this method returns correct result.\n '
i_boundary_cc = variables['Current collector current density']
if (self.half_cell and (self.domain == 'Negative')):
i_boundary_cc = variables['Current collector current density']
j_total_average = i_boundary_cc
else:
a_av = variables[(('X-averaged ' + self.domain.lower()) + ' electrode surface area to volume ratio')]
if (self.domain == 'Negative'):
j_total_average = (i_boundary_cc / (a_av * self.param.l_n))
elif (self.domain == 'Positive'):
j_total_average = ((- i_boundary_cc) / (a_av * self.param.l_p))
return j_total_average | def _get_average_total_interfacial_current_density(self, variables):
'\n Method to obtain the average total interfacial current density.\n\n Note: for lithium-ion models this is only exact if all the particles have\n the same radius. For the current set of models implemeted in pybamm,\n having the radius as a function of through-cell distance only makes sense\n for the DFN model. In the DFN, the correct average interfacial current density\n is computed in \'base_kinetics.py\' by averaging the actual interfacial current\n density. The approximation here is only used to get the approximate constant\n additional resistance term for the "average" SEI film resistance model\n (if using), where only negligible errors will be introduced.\n\n For "leading-order" and "composite" submodels (as used in the SPM and SPMe)\n there is only a single particle radius, so this method returns correct result.\n '
i_boundary_cc = variables['Current collector current density']
if (self.half_cell and (self.domain == 'Negative')):
i_boundary_cc = variables['Current collector current density']
j_total_average = i_boundary_cc
else:
a_av = variables[(('X-averaged ' + self.domain.lower()) + ' electrode surface area to volume ratio')]
if (self.domain == 'Negative'):
j_total_average = (i_boundary_cc / (a_av * self.param.l_n))
elif (self.domain == 'Positive'):
j_total_average = ((- i_boundary_cc) / (a_av * self.param.l_p))
return j_total_average<|docstring|>Method to obtain the average total interfacial current density.
Note: for lithium-ion models this is only exact if all the particles have
the same radius. For the current set of models implemeted in pybamm,
having the radius as a function of through-cell distance only makes sense
for the DFN model. In the DFN, the correct average interfacial current density
is computed in 'base_kinetics.py' by averaging the actual interfacial current
density. The approximation here is only used to get the approximate constant
additional resistance term for the "average" SEI film resistance model
(if using), where only negligible errors will be introduced.
For "leading-order" and "composite" submodels (as used in the SPM and SPMe)
there is only a single particle radius, so this method returns correct result.<|endoftext|> |
fbcd3c68ae098c1e15fe716dd1e97a073f01e1e03aaf55c7ec589100e387fb80 | def _get_standard_whole_cell_interfacial_current_variables(self, variables):
'\n Get variables associated with interfacial current over the whole cell domain\n This function also automatically increments the "total source term" variables\n '
param = self.param
i_typ = param.i_typ
L_x = param.L_x
j_n_scale = param.j_scale_n
j_p_scale = param.j_scale_p
j_p_av = variables[(('X-averaged positive electrode' + self.reaction_name) + ' interfacial current density')]
zero_s = pybamm.FullBroadcast(0, 'separator', 'current collector')
j_p = variables[(('Positive electrode' + self.reaction_name) + ' interfacial current density')]
if self.half_cell:
j = pybamm.concatenation(zero_s, j_p)
j_dim = pybamm.concatenation(zero_s, (j_p_scale * j_p))
else:
j_n_av = variables[(('X-averaged negative electrode' + self.reaction_name) + ' interfacial current density')]
j_n = variables[(('Negative electrode' + self.reaction_name) + ' interfacial current density')]
j = pybamm.concatenation(j_n, zero_s, j_p)
j_dim = pybamm.concatenation((j_n_scale * j_n), zero_s, (j_p_scale * j_p))
new_variables = variables.copy()
if (self.reaction not in ['SEI', 'lithium plating']):
new_variables.update({self.Reaction_icd: j, (self.Reaction_icd + ' [A.m-2]'): j_dim, (self.Reaction_icd + ' per volume [A.m-3]'): ((i_typ / L_x) * j)})
a_p = new_variables['Positive electrode surface area to volume ratio']
(s_n, s_p) = self._get_electrolyte_reaction_signed_stoichiometry()
if self.half_cell:
a_n = pybamm.Scalar(1)
a = pybamm.concatenation(zero_s, a_p)
s = pybamm.concatenation(zero_s, pybamm.FullBroadcast(s_p, 'positive electrode', 'current collector'))
else:
a_n = new_variables['Negative electrode surface area to volume ratio']
a = pybamm.concatenation(a_n, zero_s, a_p)
s = pybamm.concatenation(pybamm.FullBroadcast(s_n, 'negative electrode', 'current collector'), zero_s, pybamm.FullBroadcast(s_p, 'positive electrode', 'current collector'))
j.print_name = 'J'
a.print_name = 'a'
j_p.print_name = 'j_p'
new_variables['Sum of electrolyte reaction source terms'] += ((a * s) * j)
new_variables['Sum of positive electrode electrolyte reaction source terms'] += ((a_p * s_p) * j_p)
new_variables['Sum of x-averaged positive electrode electrolyte reaction source terms'] += pybamm.x_average(((a_p * s_p) * j_p))
new_variables['Sum of interfacial current densities'] += j
new_variables['Sum of positive electrode interfacial current densities'] += j_p
new_variables['Sum of x-averaged positive electrode interfacial current densities'] += j_p_av
if (not self.half_cell):
j_n.print_name = 'j_n'
new_variables['Sum of negative electrode electrolyte reaction source terms'] += ((a_n * s_n) * j_n)
new_variables['Sum of x-averaged negative electrode electrolyte reaction source terms'] += pybamm.x_average(((a_n * s_n) * j_n))
new_variables['Sum of negative electrode interfacial current densities'] += j_n
new_variables['Sum of x-averaged negative electrode interfacial current densities'] += j_n_av
variables.update(new_variables)
return variables | Get variables associated with interfacial current over the whole cell domain
This function also automatically increments the "total source term" variables | pybamm/models/submodels/interface/base_interface.py | _get_standard_whole_cell_interfacial_current_variables | ehtec/PyBaMM | 330 | python | def _get_standard_whole_cell_interfacial_current_variables(self, variables):
'\n Get variables associated with interfacial current over the whole cell domain\n This function also automatically increments the "total source term" variables\n '
param = self.param
i_typ = param.i_typ
L_x = param.L_x
j_n_scale = param.j_scale_n
j_p_scale = param.j_scale_p
j_p_av = variables[(('X-averaged positive electrode' + self.reaction_name) + ' interfacial current density')]
zero_s = pybamm.FullBroadcast(0, 'separator', 'current collector')
j_p = variables[(('Positive electrode' + self.reaction_name) + ' interfacial current density')]
if self.half_cell:
j = pybamm.concatenation(zero_s, j_p)
j_dim = pybamm.concatenation(zero_s, (j_p_scale * j_p))
else:
j_n_av = variables[(('X-averaged negative electrode' + self.reaction_name) + ' interfacial current density')]
j_n = variables[(('Negative electrode' + self.reaction_name) + ' interfacial current density')]
j = pybamm.concatenation(j_n, zero_s, j_p)
j_dim = pybamm.concatenation((j_n_scale * j_n), zero_s, (j_p_scale * j_p))
new_variables = variables.copy()
if (self.reaction not in ['SEI', 'lithium plating']):
new_variables.update({self.Reaction_icd: j, (self.Reaction_icd + ' [A.m-2]'): j_dim, (self.Reaction_icd + ' per volume [A.m-3]'): ((i_typ / L_x) * j)})
a_p = new_variables['Positive electrode surface area to volume ratio']
(s_n, s_p) = self._get_electrolyte_reaction_signed_stoichiometry()
if self.half_cell:
a_n = pybamm.Scalar(1)
a = pybamm.concatenation(zero_s, a_p)
s = pybamm.concatenation(zero_s, pybamm.FullBroadcast(s_p, 'positive electrode', 'current collector'))
else:
a_n = new_variables['Negative electrode surface area to volume ratio']
a = pybamm.concatenation(a_n, zero_s, a_p)
s = pybamm.concatenation(pybamm.FullBroadcast(s_n, 'negative electrode', 'current collector'), zero_s, pybamm.FullBroadcast(s_p, 'positive electrode', 'current collector'))
j.print_name = 'J'
a.print_name = 'a'
j_p.print_name = 'j_p'
new_variables['Sum of electrolyte reaction source terms'] += ((a * s) * j)
new_variables['Sum of positive electrode electrolyte reaction source terms'] += ((a_p * s_p) * j_p)
new_variables['Sum of x-averaged positive electrode electrolyte reaction source terms'] += pybamm.x_average(((a_p * s_p) * j_p))
new_variables['Sum of interfacial current densities'] += j
new_variables['Sum of positive electrode interfacial current densities'] += j_p
new_variables['Sum of x-averaged positive electrode interfacial current densities'] += j_p_av
if (not self.half_cell):
j_n.print_name = 'j_n'
new_variables['Sum of negative electrode electrolyte reaction source terms'] += ((a_n * s_n) * j_n)
new_variables['Sum of x-averaged negative electrode electrolyte reaction source terms'] += pybamm.x_average(((a_n * s_n) * j_n))
new_variables['Sum of negative electrode interfacial current densities'] += j_n
new_variables['Sum of x-averaged negative electrode interfacial current densities'] += j_n_av
variables.update(new_variables)
return variables | def _get_standard_whole_cell_interfacial_current_variables(self, variables):
'\n Get variables associated with interfacial current over the whole cell domain\n This function also automatically increments the "total source term" variables\n '
param = self.param
i_typ = param.i_typ
L_x = param.L_x
j_n_scale = param.j_scale_n
j_p_scale = param.j_scale_p
j_p_av = variables[(('X-averaged positive electrode' + self.reaction_name) + ' interfacial current density')]
zero_s = pybamm.FullBroadcast(0, 'separator', 'current collector')
j_p = variables[(('Positive electrode' + self.reaction_name) + ' interfacial current density')]
if self.half_cell:
j = pybamm.concatenation(zero_s, j_p)
j_dim = pybamm.concatenation(zero_s, (j_p_scale * j_p))
else:
j_n_av = variables[(('X-averaged negative electrode' + self.reaction_name) + ' interfacial current density')]
j_n = variables[(('Negative electrode' + self.reaction_name) + ' interfacial current density')]
j = pybamm.concatenation(j_n, zero_s, j_p)
j_dim = pybamm.concatenation((j_n_scale * j_n), zero_s, (j_p_scale * j_p))
new_variables = variables.copy()
if (self.reaction not in ['SEI', 'lithium plating']):
new_variables.update({self.Reaction_icd: j, (self.Reaction_icd + ' [A.m-2]'): j_dim, (self.Reaction_icd + ' per volume [A.m-3]'): ((i_typ / L_x) * j)})
a_p = new_variables['Positive electrode surface area to volume ratio']
(s_n, s_p) = self._get_electrolyte_reaction_signed_stoichiometry()
if self.half_cell:
a_n = pybamm.Scalar(1)
a = pybamm.concatenation(zero_s, a_p)
s = pybamm.concatenation(zero_s, pybamm.FullBroadcast(s_p, 'positive electrode', 'current collector'))
else:
a_n = new_variables['Negative electrode surface area to volume ratio']
a = pybamm.concatenation(a_n, zero_s, a_p)
s = pybamm.concatenation(pybamm.FullBroadcast(s_n, 'negative electrode', 'current collector'), zero_s, pybamm.FullBroadcast(s_p, 'positive electrode', 'current collector'))
j.print_name = 'J'
a.print_name = 'a'
j_p.print_name = 'j_p'
new_variables['Sum of electrolyte reaction source terms'] += ((a * s) * j)
new_variables['Sum of positive electrode electrolyte reaction source terms'] += ((a_p * s_p) * j_p)
new_variables['Sum of x-averaged positive electrode electrolyte reaction source terms'] += pybamm.x_average(((a_p * s_p) * j_p))
new_variables['Sum of interfacial current densities'] += j
new_variables['Sum of positive electrode interfacial current densities'] += j_p
new_variables['Sum of x-averaged positive electrode interfacial current densities'] += j_p_av
if (not self.half_cell):
j_n.print_name = 'j_n'
new_variables['Sum of negative electrode electrolyte reaction source terms'] += ((a_n * s_n) * j_n)
new_variables['Sum of x-averaged negative electrode electrolyte reaction source terms'] += pybamm.x_average(((a_n * s_n) * j_n))
new_variables['Sum of negative electrode interfacial current densities'] += j_n
new_variables['Sum of x-averaged negative electrode interfacial current densities'] += j_n_av
variables.update(new_variables)
return variables<|docstring|>Get variables associated with interfacial current over the whole cell domain
This function also automatically increments the "total source term" variables<|endoftext|> |
e2eca7f5a963777763ab60ea6b93f291855d243cab341b0940aaf3b3c882f38c | def _get_standard_ocp_variables(self, ocp, dUdT):
'\n A private function to obtain the open circuit potential and\n related standard variables.\n\n Parameters\n ----------\n ocp : :class:`pybamm.Symbol`\n The open-circuit potential\n dUdT : :class:`pybamm.Symbol`\n The entropic change in ocp\n\n Returns\n -------\n variables : dict\n The variables dictionary including the open circuit potentials\n and related standard variables.\n '
if (ocp.domain in [['negative particle size'], ['positive particle size']]):
ocp = pybamm.size_average(ocp)
if (dUdT.domain in [['negative particle size'], ['positive particle size']]):
dUdT = pybamm.size_average(dUdT)
dUdT_av = pybamm.x_average(dUdT)
ocp_av = pybamm.x_average(ocp)
if (self.half_cell and (self.domain == 'Negative')):
pass
elif (ocp.domain == []):
ocp = pybamm.FullBroadcast(ocp, self.domain_for_broadcast, 'current collector')
elif (ocp.domain == ['current collector']):
ocp = pybamm.PrimaryBroadcast(ocp, self.domain_for_broadcast)
pot_scale = self.param.potential_scale
if (self.domain == 'Negative'):
ocp_dim = (self.param.U_n_ref + (pot_scale * ocp))
ocp_av_dim = (self.param.U_n_ref + (pot_scale * ocp_av))
elif (self.domain == 'Positive'):
ocp_dim = (self.param.U_p_ref + (pot_scale * ocp))
ocp_av_dim = (self.param.U_p_ref + (pot_scale * ocp_av))
variables = {(((self.domain + ' electrode') + self.reaction_name) + ' open circuit potential'): ocp, (((self.domain + ' electrode') + self.reaction_name) + ' open circuit potential [V]'): ocp_dim, (((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' open circuit potential'): ocp_av, (((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' open circuit potential [V]'): ocp_av_dim}
if (self.reaction in ['lithium-ion main', 'lead-acid main']):
variables.update({(self.domain + ' electrode entropic change'): dUdT, (self.domain + ' electrode entropic change [V.K-1]'): ((pot_scale * dUdT) / self.param.Delta_T), (('X-averaged ' + self.domain.lower()) + ' electrode entropic change'): dUdT_av, (('X-averaged ' + self.domain.lower()) + ' electrode entropic change [V.K-1]'): ((pot_scale * dUdT_av) / self.param.Delta_T)})
return variables | A private function to obtain the open circuit potential and
related standard variables.
Parameters
----------
ocp : :class:`pybamm.Symbol`
The open-circuit potential
dUdT : :class:`pybamm.Symbol`
The entropic change in ocp
Returns
-------
variables : dict
The variables dictionary including the open circuit potentials
and related standard variables. | pybamm/models/submodels/interface/base_interface.py | _get_standard_ocp_variables | ehtec/PyBaMM | 330 | python | def _get_standard_ocp_variables(self, ocp, dUdT):
'\n A private function to obtain the open circuit potential and\n related standard variables.\n\n Parameters\n ----------\n ocp : :class:`pybamm.Symbol`\n The open-circuit potential\n dUdT : :class:`pybamm.Symbol`\n The entropic change in ocp\n\n Returns\n -------\n variables : dict\n The variables dictionary including the open circuit potentials\n and related standard variables.\n '
if (ocp.domain in [['negative particle size'], ['positive particle size']]):
ocp = pybamm.size_average(ocp)
if (dUdT.domain in [['negative particle size'], ['positive particle size']]):
dUdT = pybamm.size_average(dUdT)
dUdT_av = pybamm.x_average(dUdT)
ocp_av = pybamm.x_average(ocp)
if (self.half_cell and (self.domain == 'Negative')):
pass
elif (ocp.domain == []):
ocp = pybamm.FullBroadcast(ocp, self.domain_for_broadcast, 'current collector')
elif (ocp.domain == ['current collector']):
ocp = pybamm.PrimaryBroadcast(ocp, self.domain_for_broadcast)
pot_scale = self.param.potential_scale
if (self.domain == 'Negative'):
ocp_dim = (self.param.U_n_ref + (pot_scale * ocp))
ocp_av_dim = (self.param.U_n_ref + (pot_scale * ocp_av))
elif (self.domain == 'Positive'):
ocp_dim = (self.param.U_p_ref + (pot_scale * ocp))
ocp_av_dim = (self.param.U_p_ref + (pot_scale * ocp_av))
variables = {(((self.domain + ' electrode') + self.reaction_name) + ' open circuit potential'): ocp, (((self.domain + ' electrode') + self.reaction_name) + ' open circuit potential [V]'): ocp_dim, (((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' open circuit potential'): ocp_av, (((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' open circuit potential [V]'): ocp_av_dim}
if (self.reaction in ['lithium-ion main', 'lead-acid main']):
variables.update({(self.domain + ' electrode entropic change'): dUdT, (self.domain + ' electrode entropic change [V.K-1]'): ((pot_scale * dUdT) / self.param.Delta_T), (('X-averaged ' + self.domain.lower()) + ' electrode entropic change'): dUdT_av, (('X-averaged ' + self.domain.lower()) + ' electrode entropic change [V.K-1]'): ((pot_scale * dUdT_av) / self.param.Delta_T)})
return variables | def _get_standard_ocp_variables(self, ocp, dUdT):
'\n A private function to obtain the open circuit potential and\n related standard variables.\n\n Parameters\n ----------\n ocp : :class:`pybamm.Symbol`\n The open-circuit potential\n dUdT : :class:`pybamm.Symbol`\n The entropic change in ocp\n\n Returns\n -------\n variables : dict\n The variables dictionary including the open circuit potentials\n and related standard variables.\n '
if (ocp.domain in [['negative particle size'], ['positive particle size']]):
ocp = pybamm.size_average(ocp)
if (dUdT.domain in [['negative particle size'], ['positive particle size']]):
dUdT = pybamm.size_average(dUdT)
dUdT_av = pybamm.x_average(dUdT)
ocp_av = pybamm.x_average(ocp)
if (self.half_cell and (self.domain == 'Negative')):
pass
elif (ocp.domain == []):
ocp = pybamm.FullBroadcast(ocp, self.domain_for_broadcast, 'current collector')
elif (ocp.domain == ['current collector']):
ocp = pybamm.PrimaryBroadcast(ocp, self.domain_for_broadcast)
pot_scale = self.param.potential_scale
if (self.domain == 'Negative'):
ocp_dim = (self.param.U_n_ref + (pot_scale * ocp))
ocp_av_dim = (self.param.U_n_ref + (pot_scale * ocp_av))
elif (self.domain == 'Positive'):
ocp_dim = (self.param.U_p_ref + (pot_scale * ocp))
ocp_av_dim = (self.param.U_p_ref + (pot_scale * ocp_av))
variables = {(((self.domain + ' electrode') + self.reaction_name) + ' open circuit potential'): ocp, (((self.domain + ' electrode') + self.reaction_name) + ' open circuit potential [V]'): ocp_dim, (((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' open circuit potential'): ocp_av, (((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' open circuit potential [V]'): ocp_av_dim}
if (self.reaction in ['lithium-ion main', 'lead-acid main']):
variables.update({(self.domain + ' electrode entropic change'): dUdT, (self.domain + ' electrode entropic change [V.K-1]'): ((pot_scale * dUdT) / self.param.Delta_T), (('X-averaged ' + self.domain.lower()) + ' electrode entropic change'): dUdT_av, (('X-averaged ' + self.domain.lower()) + ' electrode entropic change [V.K-1]'): ((pot_scale * dUdT_av) / self.param.Delta_T)})
return variables<|docstring|>A private function to obtain the open circuit potential and
related standard variables.
Parameters
----------
ocp : :class:`pybamm.Symbol`
The open-circuit potential
dUdT : :class:`pybamm.Symbol`
The entropic change in ocp
Returns
-------
variables : dict
The variables dictionary including the open circuit potentials
and related standard variables.<|endoftext|> |
6d90299a7adfa4164b1adabc1bc27005e3c1a298eaa71cdc0d7365444818a59a | def _get_standard_size_distribution_interfacial_current_variables(self, j):
'\n Interfacial current density variables that depend on particle size R,\n relevant if "particle size" option is "distribution".\n '
if (j.domains['secondary'] == [(self.domain.lower() + ' electrode')]):
j_xav = pybamm.x_average(j)
else:
j_xav = j
j = pybamm.SecondaryBroadcast(j_xav, [(self.domain.lower() + ' electrode')])
i_typ = self.param.i_typ
L_x = self.param.L_x
if (self.domain == 'Negative'):
j_scale = (i_typ / (self.param.a_n_typ * L_x))
elif (self.domain == 'Positive'):
j_scale = (i_typ / (self.param.a_p_typ * L_x))
variables = {(((self.domain + ' electrode') + self.reaction_name) + ' interfacial current density distribution'): j, (((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' interfacial current density distribution'): j_xav, ((((self.domain + ' electrode') + self.reaction_name) + ' interfacial current density') + ' distribution [A.m-2]'): (j_scale * j), ((((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' interfacial current density') + ' distribution [A.m-2]'): (j_scale * j_xav)}
return variables | Interfacial current density variables that depend on particle size R,
relevant if "particle size" option is "distribution". | pybamm/models/submodels/interface/base_interface.py | _get_standard_size_distribution_interfacial_current_variables | ehtec/PyBaMM | 330 | python | def _get_standard_size_distribution_interfacial_current_variables(self, j):
'\n Interfacial current density variables that depend on particle size R,\n relevant if "particle size" option is "distribution".\n '
if (j.domains['secondary'] == [(self.domain.lower() + ' electrode')]):
j_xav = pybamm.x_average(j)
else:
j_xav = j
j = pybamm.SecondaryBroadcast(j_xav, [(self.domain.lower() + ' electrode')])
i_typ = self.param.i_typ
L_x = self.param.L_x
if (self.domain == 'Negative'):
j_scale = (i_typ / (self.param.a_n_typ * L_x))
elif (self.domain == 'Positive'):
j_scale = (i_typ / (self.param.a_p_typ * L_x))
variables = {(((self.domain + ' electrode') + self.reaction_name) + ' interfacial current density distribution'): j, (((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' interfacial current density distribution'): j_xav, ((((self.domain + ' electrode') + self.reaction_name) + ' interfacial current density') + ' distribution [A.m-2]'): (j_scale * j), ((((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' interfacial current density') + ' distribution [A.m-2]'): (j_scale * j_xav)}
return variables | def _get_standard_size_distribution_interfacial_current_variables(self, j):
'\n Interfacial current density variables that depend on particle size R,\n relevant if "particle size" option is "distribution".\n '
if (j.domains['secondary'] == [(self.domain.lower() + ' electrode')]):
j_xav = pybamm.x_average(j)
else:
j_xav = j
j = pybamm.SecondaryBroadcast(j_xav, [(self.domain.lower() + ' electrode')])
i_typ = self.param.i_typ
L_x = self.param.L_x
if (self.domain == 'Negative'):
j_scale = (i_typ / (self.param.a_n_typ * L_x))
elif (self.domain == 'Positive'):
j_scale = (i_typ / (self.param.a_p_typ * L_x))
variables = {(((self.domain + ' electrode') + self.reaction_name) + ' interfacial current density distribution'): j, (((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' interfacial current density distribution'): j_xav, ((((self.domain + ' electrode') + self.reaction_name) + ' interfacial current density') + ' distribution [A.m-2]'): (j_scale * j), ((((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' interfacial current density') + ' distribution [A.m-2]'): (j_scale * j_xav)}
return variables<|docstring|>Interfacial current density variables that depend on particle size R,
relevant if "particle size" option is "distribution".<|endoftext|> |
963aadd48327fea76d297db6cf92bdae1711eff17da97c8932cc05dcb5ca3003 | def _get_standard_size_distribution_exchange_current_variables(self, j0):
'\n Exchange current variables that depend on particle size.\n '
i_typ = self.param.i_typ
L_x = self.param.L_x
if (self.domain == 'Negative'):
j_scale = (i_typ / (self.param.a_n_typ * L_x))
elif (self.domain == 'Positive'):
j_scale = (i_typ / (self.param.a_p_typ * L_x))
if (j0.domains['secondary'] != [(self.domain.lower() + ' electrode')]):
j0_av = j0
j0 = pybamm.SecondaryBroadcast(j0, self.domain_for_broadcast)
else:
j0_av = pybamm.x_average(j0)
variables = {(((self.domain + ' electrode') + self.reaction_name) + ' exchange current density distribution'): j0, (((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' exchange current density distribution'): j0_av, (((self.domain + ' electrode') + self.reaction_name) + ' exchange current density distribution [A.m-2]'): (j_scale * j0), (((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' exchange current density distribution [A.m-2]'): (j_scale * j0_av), ((((self.domain + ' electrode') + self.reaction_name) + ' exchange current density distribution') + ' per volume [A.m-3]'): ((i_typ / L_x) * j0), ((((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' exchange current density distribution') + ' per volume [A.m-3]'): ((i_typ / L_x) * j0_av)}
return variables | Exchange current variables that depend on particle size. | pybamm/models/submodels/interface/base_interface.py | _get_standard_size_distribution_exchange_current_variables | ehtec/PyBaMM | 330 | python | def _get_standard_size_distribution_exchange_current_variables(self, j0):
'\n \n '
i_typ = self.param.i_typ
L_x = self.param.L_x
if (self.domain == 'Negative'):
j_scale = (i_typ / (self.param.a_n_typ * L_x))
elif (self.domain == 'Positive'):
j_scale = (i_typ / (self.param.a_p_typ * L_x))
if (j0.domains['secondary'] != [(self.domain.lower() + ' electrode')]):
j0_av = j0
j0 = pybamm.SecondaryBroadcast(j0, self.domain_for_broadcast)
else:
j0_av = pybamm.x_average(j0)
variables = {(((self.domain + ' electrode') + self.reaction_name) + ' exchange current density distribution'): j0, (((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' exchange current density distribution'): j0_av, (((self.domain + ' electrode') + self.reaction_name) + ' exchange current density distribution [A.m-2]'): (j_scale * j0), (((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' exchange current density distribution [A.m-2]'): (j_scale * j0_av), ((((self.domain + ' electrode') + self.reaction_name) + ' exchange current density distribution') + ' per volume [A.m-3]'): ((i_typ / L_x) * j0), ((((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' exchange current density distribution') + ' per volume [A.m-3]'): ((i_typ / L_x) * j0_av)}
return variables | def _get_standard_size_distribution_exchange_current_variables(self, j0):
'\n \n '
i_typ = self.param.i_typ
L_x = self.param.L_x
if (self.domain == 'Negative'):
j_scale = (i_typ / (self.param.a_n_typ * L_x))
elif (self.domain == 'Positive'):
j_scale = (i_typ / (self.param.a_p_typ * L_x))
if (j0.domains['secondary'] != [(self.domain.lower() + ' electrode')]):
j0_av = j0
j0 = pybamm.SecondaryBroadcast(j0, self.domain_for_broadcast)
else:
j0_av = pybamm.x_average(j0)
variables = {(((self.domain + ' electrode') + self.reaction_name) + ' exchange current density distribution'): j0, (((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' exchange current density distribution'): j0_av, (((self.domain + ' electrode') + self.reaction_name) + ' exchange current density distribution [A.m-2]'): (j_scale * j0), (((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' exchange current density distribution [A.m-2]'): (j_scale * j0_av), ((((self.domain + ' electrode') + self.reaction_name) + ' exchange current density distribution') + ' per volume [A.m-3]'): ((i_typ / L_x) * j0), ((((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' exchange current density distribution') + ' per volume [A.m-3]'): ((i_typ / L_x) * j0_av)}
return variables<|docstring|>Exchange current variables that depend on particle size.<|endoftext|> |
9e5530dc25aaa05dfe56decc4ddcbb52666a58f568cb3a465fd85aa443cd16b1 | def _get_standard_size_distribution_overpotential_variables(self, eta_r):
'\n Overpotential variables that depend on particle size.\n '
pot_scale = self.param.potential_scale
if (eta_r.domains['secondary'] != [(self.domain.lower() + ' electrode')]):
eta_r_av = eta_r
eta_r = pybamm.SecondaryBroadcast(eta_r, self.domain_for_broadcast)
else:
eta_r_av = pybamm.x_average(eta_r)
domain_reaction = (((self.domain + ' electrode') + self.reaction_name) + ' reaction overpotential')
variables = {domain_reaction: eta_r, (('X-averaged ' + domain_reaction.lower()) + ' distribution'): eta_r_av, (domain_reaction + ' [V]'): (eta_r * pot_scale), (('X-averaged ' + domain_reaction.lower()) + ' distribution [V]'): (eta_r_av * pot_scale)}
return variables | Overpotential variables that depend on particle size. | pybamm/models/submodels/interface/base_interface.py | _get_standard_size_distribution_overpotential_variables | ehtec/PyBaMM | 330 | python | def _get_standard_size_distribution_overpotential_variables(self, eta_r):
'\n \n '
pot_scale = self.param.potential_scale
if (eta_r.domains['secondary'] != [(self.domain.lower() + ' electrode')]):
eta_r_av = eta_r
eta_r = pybamm.SecondaryBroadcast(eta_r, self.domain_for_broadcast)
else:
eta_r_av = pybamm.x_average(eta_r)
domain_reaction = (((self.domain + ' electrode') + self.reaction_name) + ' reaction overpotential')
variables = {domain_reaction: eta_r, (('X-averaged ' + domain_reaction.lower()) + ' distribution'): eta_r_av, (domain_reaction + ' [V]'): (eta_r * pot_scale), (('X-averaged ' + domain_reaction.lower()) + ' distribution [V]'): (eta_r_av * pot_scale)}
return variables | def _get_standard_size_distribution_overpotential_variables(self, eta_r):
'\n \n '
pot_scale = self.param.potential_scale
if (eta_r.domains['secondary'] != [(self.domain.lower() + ' electrode')]):
eta_r_av = eta_r
eta_r = pybamm.SecondaryBroadcast(eta_r, self.domain_for_broadcast)
else:
eta_r_av = pybamm.x_average(eta_r)
domain_reaction = (((self.domain + ' electrode') + self.reaction_name) + ' reaction overpotential')
variables = {domain_reaction: eta_r, (('X-averaged ' + domain_reaction.lower()) + ' distribution'): eta_r_av, (domain_reaction + ' [V]'): (eta_r * pot_scale), (('X-averaged ' + domain_reaction.lower()) + ' distribution [V]'): (eta_r_av * pot_scale)}
return variables<|docstring|>Overpotential variables that depend on particle size.<|endoftext|> |
7bb0c6768a0c96d29fd41f58649f6b6c2d09b811a2fd48d4ffb04b13492d9f62 | def _get_standard_size_distribution_ocp_variables(self, ocp, dUdT):
'\n A private function to obtain the open circuit potential and\n related standard variables when there is a distribution of particle sizes.\n '
if (ocp.domains['secondary'] != [(self.domain.lower() + ' electrode')]):
ocp_av = ocp
ocp = pybamm.SecondaryBroadcast(ocp, self.domain_for_broadcast)
else:
ocp_av = pybamm.x_average(ocp)
if (dUdT.domains['secondary'] != [(self.domain.lower() + ' electrode')]):
dUdT_av = dUdT
dUdT = pybamm.SecondaryBroadcast(dUdT, self.domain_for_broadcast)
else:
dUdT_av = pybamm.x_average(dUdT)
pot_scale = self.param.potential_scale
if (self.domain == 'Negative'):
ocp_dim = (self.param.U_n_ref + (pot_scale * ocp))
ocp_av_dim = (self.param.U_n_ref + (pot_scale * ocp_av))
elif (self.domain == 'Positive'):
ocp_dim = (self.param.U_p_ref + (pot_scale * ocp))
ocp_av_dim = (self.param.U_p_ref + (pot_scale * ocp_av))
variables = {(((self.domain + ' electrode') + self.reaction_name) + ' open circuit potential distribution'): ocp, (((self.domain + ' electrode') + self.reaction_name) + ' open circuit potential distribution [V]'): ocp_dim, (((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' open circuit potential distribution'): ocp_av, (((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' open circuit potential distribution [V]'): ocp_av_dim}
if (self.reaction_name == ''):
variables.update({((self.domain + ' electrode entropic change') + ' (size-dependent)'): dUdT, ((self.domain + ' electrode entropic change') + ' (size-dependent) [V.K-1]'): ((pot_scale * dUdT) / self.param.Delta_T), ((('X-averaged ' + self.domain.lower()) + ' electrode entropic change') + ' (size-dependent)'): dUdT_av, ((('X-averaged ' + self.domain.lower()) + ' electrode entropic change') + ' (size-dependent) [V.K-1]'): ((pot_scale * dUdT_av) / self.param.Delta_T)})
return variables | A private function to obtain the open circuit potential and
related standard variables when there is a distribution of particle sizes. | pybamm/models/submodels/interface/base_interface.py | _get_standard_size_distribution_ocp_variables | ehtec/PyBaMM | 330 | python | def _get_standard_size_distribution_ocp_variables(self, ocp, dUdT):
'\n A private function to obtain the open circuit potential and\n related standard variables when there is a distribution of particle sizes.\n '
if (ocp.domains['secondary'] != [(self.domain.lower() + ' electrode')]):
ocp_av = ocp
ocp = pybamm.SecondaryBroadcast(ocp, self.domain_for_broadcast)
else:
ocp_av = pybamm.x_average(ocp)
if (dUdT.domains['secondary'] != [(self.domain.lower() + ' electrode')]):
dUdT_av = dUdT
dUdT = pybamm.SecondaryBroadcast(dUdT, self.domain_for_broadcast)
else:
dUdT_av = pybamm.x_average(dUdT)
pot_scale = self.param.potential_scale
if (self.domain == 'Negative'):
ocp_dim = (self.param.U_n_ref + (pot_scale * ocp))
ocp_av_dim = (self.param.U_n_ref + (pot_scale * ocp_av))
elif (self.domain == 'Positive'):
ocp_dim = (self.param.U_p_ref + (pot_scale * ocp))
ocp_av_dim = (self.param.U_p_ref + (pot_scale * ocp_av))
variables = {(((self.domain + ' electrode') + self.reaction_name) + ' open circuit potential distribution'): ocp, (((self.domain + ' electrode') + self.reaction_name) + ' open circuit potential distribution [V]'): ocp_dim, (((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' open circuit potential distribution'): ocp_av, (((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' open circuit potential distribution [V]'): ocp_av_dim}
if (self.reaction_name == ):
variables.update({((self.domain + ' electrode entropic change') + ' (size-dependent)'): dUdT, ((self.domain + ' electrode entropic change') + ' (size-dependent) [V.K-1]'): ((pot_scale * dUdT) / self.param.Delta_T), ((('X-averaged ' + self.domain.lower()) + ' electrode entropic change') + ' (size-dependent)'): dUdT_av, ((('X-averaged ' + self.domain.lower()) + ' electrode entropic change') + ' (size-dependent) [V.K-1]'): ((pot_scale * dUdT_av) / self.param.Delta_T)})
return variables | def _get_standard_size_distribution_ocp_variables(self, ocp, dUdT):
'\n A private function to obtain the open circuit potential and\n related standard variables when there is a distribution of particle sizes.\n '
if (ocp.domains['secondary'] != [(self.domain.lower() + ' electrode')]):
ocp_av = ocp
ocp = pybamm.SecondaryBroadcast(ocp, self.domain_for_broadcast)
else:
ocp_av = pybamm.x_average(ocp)
if (dUdT.domains['secondary'] != [(self.domain.lower() + ' electrode')]):
dUdT_av = dUdT
dUdT = pybamm.SecondaryBroadcast(dUdT, self.domain_for_broadcast)
else:
dUdT_av = pybamm.x_average(dUdT)
pot_scale = self.param.potential_scale
if (self.domain == 'Negative'):
ocp_dim = (self.param.U_n_ref + (pot_scale * ocp))
ocp_av_dim = (self.param.U_n_ref + (pot_scale * ocp_av))
elif (self.domain == 'Positive'):
ocp_dim = (self.param.U_p_ref + (pot_scale * ocp))
ocp_av_dim = (self.param.U_p_ref + (pot_scale * ocp_av))
variables = {(((self.domain + ' electrode') + self.reaction_name) + ' open circuit potential distribution'): ocp, (((self.domain + ' electrode') + self.reaction_name) + ' open circuit potential distribution [V]'): ocp_dim, (((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' open circuit potential distribution'): ocp_av, (((('X-averaged ' + self.domain.lower()) + ' electrode') + self.reaction_name) + ' open circuit potential distribution [V]'): ocp_av_dim}
if (self.reaction_name == ):
variables.update({((self.domain + ' electrode entropic change') + ' (size-dependent)'): dUdT, ((self.domain + ' electrode entropic change') + ' (size-dependent) [V.K-1]'): ((pot_scale * dUdT) / self.param.Delta_T), ((('X-averaged ' + self.domain.lower()) + ' electrode entropic change') + ' (size-dependent)'): dUdT_av, ((('X-averaged ' + self.domain.lower()) + ' electrode entropic change') + ' (size-dependent) [V.K-1]'): ((pot_scale * dUdT_av) / self.param.Delta_T)})
return variables<|docstring|>A private function to obtain the open circuit potential and
related standard variables when there is a distribution of particle sizes.<|endoftext|> |
2ef471a416bcdfdf1dd37a52d62036c624ec1d8be99fcc86dc82e61a09b12e9c | def strip_html(html):
'\n Removes HTML tags from the specified string\n Args:\n html: the string that contains the HTML tags to remove\n Returns:\n The specified string without the HTML tags\n '
s = MLStripper()
s.feed(html)
return s.get_data() | Removes HTML tags from the specified string
Args:
html: the string that contains the HTML tags to remove
Returns:
The specified string without the HTML tags | idfm_api/utils.py | strip_html | droso-hass/idfm-api | 0 | python | def strip_html(html):
'\n Removes HTML tags from the specified string\n Args:\n html: the string that contains the HTML tags to remove\n Returns:\n The specified string without the HTML tags\n '
s = MLStripper()
s.feed(html)
return s.get_data() | def strip_html(html):
'\n Removes HTML tags from the specified string\n Args:\n html: the string that contains the HTML tags to remove\n Returns:\n The specified string without the HTML tags\n '
s = MLStripper()
s.feed(html)
return s.get_data()<|docstring|>Removes HTML tags from the specified string
Args:
html: the string that contains the HTML tags to remove
Returns:
The specified string without the HTML tags<|endoftext|> |
b9912559f2d525a41bbbbf4b6f422e65d1dea1985a31eb535de8034cc82908dc | def add_nodes_metadata(node_list):
'Adds metadata to the nodes list'
nodes = []
for node in node_list:
curr_node = node['metadata']
if curr_node['cc_licenses']:
curr_node['cc_licenses'] = json.loads(curr_node['cc_licenses'])
nodes.append(curr_node)
return nodes | Adds metadata to the nodes list | backend/src/linked_commons/views.py | add_nodes_metadata | next2nothing-cloud/https-github.com-creativecommons-cccatalog-dataviz | 24 | python | def add_nodes_metadata(node_list):
nodes = []
for node in node_list:
curr_node = node['metadata']
if curr_node['cc_licenses']:
curr_node['cc_licenses'] = json.loads(curr_node['cc_licenses'])
nodes.append(curr_node)
return nodes | def add_nodes_metadata(node_list):
nodes = []
for node in node_list:
curr_node = node['metadata']
if curr_node['cc_licenses']:
curr_node['cc_licenses'] = json.loads(curr_node['cc_licenses'])
nodes.append(curr_node)
return nodes<|docstring|>Adds metadata to the nodes list<|endoftext|> |
d083e59fb72e9571a581cdf81badb060ec092586e33957fd8baec033aac07f06 | def get_filtered_data(node_name, collection_instance):
'\n Filters the Graph using node_name and returns the D1 list\n '
nodes_id = {node_name}
links = []
node_list = collection_instance.find_one(node_name)
for link in node_list['D1']:
links.append({**link, 'source': node_name})
nodes_id.add(link['target'])
for link in node_list['RD1']:
links.append({**link, 'target': node_name})
nodes_id.add(link['source'])
node_list = collection_instance.find({'_id': {'$in': list(nodes_id)}}, projection=['metadata'])
nodes = add_nodes_metadata(node_list)
return {'links': links, 'nodes': nodes} | Filters the Graph using node_name and returns the D1 list | backend/src/linked_commons/views.py | get_filtered_data | next2nothing-cloud/https-github.com-creativecommons-cccatalog-dataviz | 24 | python | def get_filtered_data(node_name, collection_instance):
'\n \n '
nodes_id = {node_name}
links = []
node_list = collection_instance.find_one(node_name)
for link in node_list['D1']:
links.append({**link, 'source': node_name})
nodes_id.add(link['target'])
for link in node_list['RD1']:
links.append({**link, 'target': node_name})
nodes_id.add(link['source'])
node_list = collection_instance.find({'_id': {'$in': list(nodes_id)}}, projection=['metadata'])
nodes = add_nodes_metadata(node_list)
return {'links': links, 'nodes': nodes} | def get_filtered_data(node_name, collection_instance):
'\n \n '
nodes_id = {node_name}
links = []
node_list = collection_instance.find_one(node_name)
for link in node_list['D1']:
links.append({**link, 'source': node_name})
nodes_id.add(link['target'])
for link in node_list['RD1']:
links.append({**link, 'target': node_name})
nodes_id.add(link['source'])
node_list = collection_instance.find({'_id': {'$in': list(nodes_id)}}, projection=['metadata'])
nodes = add_nodes_metadata(node_list)
return {'links': links, 'nodes': nodes}<|docstring|>Filters the Graph using node_name and returns the D1 list<|endoftext|> |
fe62a3edacc7965fc40678ea3860c96f77bc96a034a021ab02d3cd4798e12f9b | def build_random_landing_graph(db, landing_graph_size=LANDING_GRAPH_META):
'Builds a random graph of size landing_graph_size'
links = []
total_nodes = db.count()
index = random.randint(a=1, b=(total_nodes - 1))
root_node = db.find(projection=[]).limit((- 1)).skip(index).next()['_id']
base_node = root_node
nodes_id = set()
nodes_id.add(root_node)
while ((len(nodes_id) < landing_graph_size['nodes']) and (len(links) < landing_graph_size['links'])):
temp_nodes_id = []
node_list = db.find_one(root_node)
for link in node_list['D1']:
links.append({**link, 'source': root_node})
temp_nodes_id.append(link['target'])
if ((len(nodes_id) + len(temp_nodes_id)) > 500):
break
for link in node_list['RD1']:
links.append({**link, 'target': root_node})
temp_nodes_id.append(link['source'])
if ((len(nodes_id) + len(temp_nodes_id)) > 500):
break
nodes_id.update(temp_nodes_id)
if (temp_nodes_id and (len(nodes_id) < 500)):
root_node = random.choice(temp_nodes_id)
else:
break
node_list = db.find({'_id': {'$in': list(nodes_id)}}, projection=['metadata'])
nodes = add_nodes_metadata(node_list)
return {'root_node': base_node, 'links': links, 'nodes': nodes} | Builds a random graph of size landing_graph_size | backend/src/linked_commons/views.py | build_random_landing_graph | next2nothing-cloud/https-github.com-creativecommons-cccatalog-dataviz | 24 | python | def build_random_landing_graph(db, landing_graph_size=LANDING_GRAPH_META):
links = []
total_nodes = db.count()
index = random.randint(a=1, b=(total_nodes - 1))
root_node = db.find(projection=[]).limit((- 1)).skip(index).next()['_id']
base_node = root_node
nodes_id = set()
nodes_id.add(root_node)
while ((len(nodes_id) < landing_graph_size['nodes']) and (len(links) < landing_graph_size['links'])):
temp_nodes_id = []
node_list = db.find_one(root_node)
for link in node_list['D1']:
links.append({**link, 'source': root_node})
temp_nodes_id.append(link['target'])
if ((len(nodes_id) + len(temp_nodes_id)) > 500):
break
for link in node_list['RD1']:
links.append({**link, 'target': root_node})
temp_nodes_id.append(link['source'])
if ((len(nodes_id) + len(temp_nodes_id)) > 500):
break
nodes_id.update(temp_nodes_id)
if (temp_nodes_id and (len(nodes_id) < 500)):
root_node = random.choice(temp_nodes_id)
else:
break
node_list = db.find({'_id': {'$in': list(nodes_id)}}, projection=['metadata'])
nodes = add_nodes_metadata(node_list)
return {'root_node': base_node, 'links': links, 'nodes': nodes} | def build_random_landing_graph(db, landing_graph_size=LANDING_GRAPH_META):
links = []
total_nodes = db.count()
index = random.randint(a=1, b=(total_nodes - 1))
root_node = db.find(projection=[]).limit((- 1)).skip(index).next()['_id']
base_node = root_node
nodes_id = set()
nodes_id.add(root_node)
while ((len(nodes_id) < landing_graph_size['nodes']) and (len(links) < landing_graph_size['links'])):
temp_nodes_id = []
node_list = db.find_one(root_node)
for link in node_list['D1']:
links.append({**link, 'source': root_node})
temp_nodes_id.append(link['target'])
if ((len(nodes_id) + len(temp_nodes_id)) > 500):
break
for link in node_list['RD1']:
links.append({**link, 'target': root_node})
temp_nodes_id.append(link['source'])
if ((len(nodes_id) + len(temp_nodes_id)) > 500):
break
nodes_id.update(temp_nodes_id)
if (temp_nodes_id and (len(nodes_id) < 500)):
root_node = random.choice(temp_nodes_id)
else:
break
node_list = db.find({'_id': {'$in': list(nodes_id)}}, projection=['metadata'])
nodes = add_nodes_metadata(node_list)
return {'root_node': base_node, 'links': links, 'nodes': nodes}<|docstring|>Builds a random graph of size landing_graph_size<|endoftext|> |
596eaf32045a9b9574315c8bab0b19c105b68b0f49584a98c42c5321902beb92 | def serve_graph_data(request):
"Returns the Graph In {'nodes':[], 'links':[]} format\n \n Returns a random graph if 'name' query param is None\n else returns the filtered graph with 'name' as root node\n "
node_name = request.GET.get('name')
client = pymongo.MongoClient(f'mongodb://{USERNAME}:{PASSWORD}@{HOSTNAME}')
db = client.get_database(name=DB_NAME)
collection_instance = db.get_collection(name=COLLECTION_NAME)
if (node_name == None):
data = build_random_landing_graph(collection_instance)
client.close()
return JsonResponse(data)
else:
node_count = collection_instance.count_documents({'_id': node_name})
if (node_count == 0):
return JsonResponse({'error': True, 'message': (('node ' + node_name) + " doesn't exist")}, json_dumps_params={'indent': 2})
data = get_filtered_data(node_name, collection_instance)
client.close()
return JsonResponse(data)
client.close()
return JsonResponse({'error': True, 'message': 'Server Error'}) | Returns the Graph In {'nodes':[], 'links':[]} format
Returns a random graph if 'name' query param is None
else returns the filtered graph with 'name' as root node | backend/src/linked_commons/views.py | serve_graph_data | next2nothing-cloud/https-github.com-creativecommons-cccatalog-dataviz | 24 | python | def serve_graph_data(request):
"Returns the Graph In {'nodes':[], 'links':[]} format\n \n Returns a random graph if 'name' query param is None\n else returns the filtered graph with 'name' as root node\n "
node_name = request.GET.get('name')
client = pymongo.MongoClient(f'mongodb://{USERNAME}:{PASSWORD}@{HOSTNAME}')
db = client.get_database(name=DB_NAME)
collection_instance = db.get_collection(name=COLLECTION_NAME)
if (node_name == None):
data = build_random_landing_graph(collection_instance)
client.close()
return JsonResponse(data)
else:
node_count = collection_instance.count_documents({'_id': node_name})
if (node_count == 0):
return JsonResponse({'error': True, 'message': (('node ' + node_name) + " doesn't exist")}, json_dumps_params={'indent': 2})
data = get_filtered_data(node_name, collection_instance)
client.close()
return JsonResponse(data)
client.close()
return JsonResponse({'error': True, 'message': 'Server Error'}) | def serve_graph_data(request):
"Returns the Graph In {'nodes':[], 'links':[]} format\n \n Returns a random graph if 'name' query param is None\n else returns the filtered graph with 'name' as root node\n "
node_name = request.GET.get('name')
client = pymongo.MongoClient(f'mongodb://{USERNAME}:{PASSWORD}@{HOSTNAME}')
db = client.get_database(name=DB_NAME)
collection_instance = db.get_collection(name=COLLECTION_NAME)
if (node_name == None):
data = build_random_landing_graph(collection_instance)
client.close()
return JsonResponse(data)
else:
node_count = collection_instance.count_documents({'_id': node_name})
if (node_count == 0):
return JsonResponse({'error': True, 'message': (('node ' + node_name) + " doesn't exist")}, json_dumps_params={'indent': 2})
data = get_filtered_data(node_name, collection_instance)
client.close()
return JsonResponse(data)
client.close()
return JsonResponse({'error': True, 'message': 'Server Error'})<|docstring|>Returns the Graph In {'nodes':[], 'links':[]} format
Returns a random graph if 'name' query param is None
else returns the filtered graph with 'name' as root node<|endoftext|> |
f0740247d3dcced41897f80569613c086a67c5df58ff8c1aa4b7251fd3bfc295 | def serve_suggestions(request):
'Returns a list of nodes matching the given query'
query = request.GET.get('q')
if query:
client = pymongo.MongoClient(f'mongodb://{USERNAME}:{PASSWORD}@{HOSTNAME}')
db = client.get_database(name=DB_NAME)
collection_instance = db.get_collection(name=COLLECTION_NAME)
res = collection_instance.find({'_id': {'$regex': query}}, return_key=True).limit(8)
query_set = []
for node in res:
query_set.append({'id': node['_id']})
return JsonResponse({'error': False, 'suggestions': query_set})
else:
return JsonResponse({'error': True, 'message': 'No query params passed'}) | Returns a list of nodes matching the given query | backend/src/linked_commons/views.py | serve_suggestions | next2nothing-cloud/https-github.com-creativecommons-cccatalog-dataviz | 24 | python | def serve_suggestions(request):
query = request.GET.get('q')
if query:
client = pymongo.MongoClient(f'mongodb://{USERNAME}:{PASSWORD}@{HOSTNAME}')
db = client.get_database(name=DB_NAME)
collection_instance = db.get_collection(name=COLLECTION_NAME)
res = collection_instance.find({'_id': {'$regex': query}}, return_key=True).limit(8)
query_set = []
for node in res:
query_set.append({'id': node['_id']})
return JsonResponse({'error': False, 'suggestions': query_set})
else:
return JsonResponse({'error': True, 'message': 'No query params passed'}) | def serve_suggestions(request):
query = request.GET.get('q')
if query:
client = pymongo.MongoClient(f'mongodb://{USERNAME}:{PASSWORD}@{HOSTNAME}')
db = client.get_database(name=DB_NAME)
collection_instance = db.get_collection(name=COLLECTION_NAME)
res = collection_instance.find({'_id': {'$regex': query}}, return_key=True).limit(8)
query_set = []
for node in res:
query_set.append({'id': node['_id']})
return JsonResponse({'error': False, 'suggestions': query_set})
else:
return JsonResponse({'error': True, 'message': 'No query params passed'})<|docstring|>Returns a list of nodes matching the given query<|endoftext|> |
e1c3b9216fdaebd832dd0b02a2c8416147437a5c642f299018ece4f24ec54634 | def initialize_log(self):
'\n 初始化日志\n '
log_path = os.path.join(config.LOG_PATH, config.LOG_NAME).strip()
existed = os.path.exists(config.LOG_PATH)
if (not existed):
os.makedirs(config.LOG_PATH)
existed = os.path.exists(log_path)
if existed:
os.remove(log_path)
logging.basicConfig(filename=os.path.join(config.LOG_PATH, config.LOG_NAME), level=logging.DEBUG, format=config.LOG_FORMAT, datefmt=config.DATE_FORMAT) | 初始化日志 | server/server/init.py | initialize_log | aweijx/MMW_YNU | 2 | python | def initialize_log(self):
'\n \n '
log_path = os.path.join(config.LOG_PATH, config.LOG_NAME).strip()
existed = os.path.exists(config.LOG_PATH)
if (not existed):
os.makedirs(config.LOG_PATH)
existed = os.path.exists(log_path)
if existed:
os.remove(log_path)
logging.basicConfig(filename=os.path.join(config.LOG_PATH, config.LOG_NAME), level=logging.DEBUG, format=config.LOG_FORMAT, datefmt=config.DATE_FORMAT) | def initialize_log(self):
'\n \n '
log_path = os.path.join(config.LOG_PATH, config.LOG_NAME).strip()
existed = os.path.exists(config.LOG_PATH)
if (not existed):
os.makedirs(config.LOG_PATH)
existed = os.path.exists(log_path)
if existed:
os.remove(log_path)
logging.basicConfig(filename=os.path.join(config.LOG_PATH, config.LOG_NAME), level=logging.DEBUG, format=config.LOG_FORMAT, datefmt=config.DATE_FORMAT)<|docstring|>初始化日志<|endoftext|> |
26cbc9ccfeba4f81dd953c46d9e08490132261e02e2f136d899891f75fec4451 | def initialize_tables(self):
'\n 初始化数据库表\n '
if ('clear' == self.method):
logging.info('初始化数据库表文件:init.py initialize_tables, 模式: 完全清除。')
users.drop_table()
users.create_table()
coding.drop_table()
coding.create_table()
papers.drop_table()
papers.create_table()
postgraduate.drop_table()
postgraduate.create_table()
contest.drop_table()
contest.create_table()
found.drop_table()
found.create_table()
message.drop_table()
message.create_table()
attention.drop_table()
attention.create_table()
play.drop_table()
play.create_table()
second_hand.drop_table()
second_hand.create_table()
study.drop_table()
study.create_table()
logging.info('初始化数据库表文件:init.py initialize_tables, 模式: 完全清除 完成。')
elif ('remain' == self.method):
users.create_table()
found.create_table()
message.create_table()
attention.create_table()
play.create_table()
study.create_table()
message.create_table()
banner.create_table()
contest.create_table()
papers.create_table()
coding.create_table()
postgraduate.create_table()
logging.info('初始化数据库表文件:init.py initialize_tables, 模式: 保留数据 完成。') | 初始化数据库表 | server/server/init.py | initialize_tables | aweijx/MMW_YNU | 2 | python | def initialize_tables(self):
'\n \n '
if ('clear' == self.method):
logging.info('文件:init.py initialize_tables, 模式: 完全清除。')
users.drop_table()
users.create_table()
coding.drop_table()
coding.create_table()
papers.drop_table()
papers.create_table()
postgraduate.drop_table()
postgraduate.create_table()
contest.drop_table()
contest.create_table()
found.drop_table()
found.create_table()
message.drop_table()
message.create_table()
attention.drop_table()
attention.create_table()
play.drop_table()
play.create_table()
second_hand.drop_table()
second_hand.create_table()
study.drop_table()
study.create_table()
logging.info('文件:init.py initialize_tables, 模式: 完全清除 完成。')
elif ('remain' == self.method):
users.create_table()
found.create_table()
message.create_table()
attention.create_table()
play.create_table()
study.create_table()
message.create_table()
banner.create_table()
contest.create_table()
papers.create_table()
coding.create_table()
postgraduate.create_table()
logging.info('文件:init.py initialize_tables, 模式: 保留数据 完成。') | def initialize_tables(self):
'\n \n '
if ('clear' == self.method):
logging.info('文件:init.py initialize_tables, 模式: 完全清除。')
users.drop_table()
users.create_table()
coding.drop_table()
coding.create_table()
papers.drop_table()
papers.create_table()
postgraduate.drop_table()
postgraduate.create_table()
contest.drop_table()
contest.create_table()
found.drop_table()
found.create_table()
message.drop_table()
message.create_table()
attention.drop_table()
attention.create_table()
play.drop_table()
play.create_table()
second_hand.drop_table()
second_hand.create_table()
study.drop_table()
study.create_table()
logging.info('文件:init.py initialize_tables, 模式: 完全清除 完成。')
elif ('remain' == self.method):
users.create_table()
found.create_table()
message.create_table()
attention.create_table()
play.create_table()
study.create_table()
message.create_table()
banner.create_table()
contest.create_table()
papers.create_table()
coding.create_table()
postgraduate.create_table()
logging.info('文件:init.py initialize_tables, 模式: 保留数据 完成。')<|docstring|>初始化数据库表<|endoftext|> |
f2d879bb70a7d617b79f4396ded96d24977a55ab2e3713e1dae5cfba33fa2052 | def fully_mixed(n_qubits=1):
'Return fully mixed state.'
dim = (2 ** n_qubits)
return Qobj((np.eye(dim, dtype=np.complex128) / dim)) | Return fully mixed state. | quantpy/qobj.py | fully_mixed | esthete88/quantpy | 0 | python | def fully_mixed(n_qubits=1):
dim = (2 ** n_qubits)
return Qobj((np.eye(dim, dtype=np.complex128) / dim)) | def fully_mixed(n_qubits=1):
dim = (2 ** n_qubits)
return Qobj((np.eye(dim, dtype=np.complex128) / dim))<|docstring|>Return fully mixed state.<|endoftext|> |
63bac03cd57bd638456379a04b33d25a0da81adc8d0a4f0d959f0893e8fad984 | def GHZ(n_qubits=3):
'Return GHZ state.'
ket = ((([1] + ([0] * ((2 ** n_qubits) - 2))) + [1]) / np.sqrt(2))
return Qobj(ket, is_ket=True) | Return GHZ state. | quantpy/qobj.py | GHZ | esthete88/quantpy | 0 | python | def GHZ(n_qubits=3):
ket = ((([1] + ([0] * ((2 ** n_qubits) - 2))) + [1]) / np.sqrt(2))
return Qobj(ket, is_ket=True) | def GHZ(n_qubits=3):
ket = ((([1] + ([0] * ((2 ** n_qubits) - 2))) + [1]) / np.sqrt(2))
return Qobj(ket, is_ket=True)<|docstring|>Return GHZ state.<|endoftext|> |
add1778eb431b2fdb7efb1e607102581986e9428dccb6b018f0f5745445cf7eb | def zero(n_qubits=1):
'Return zero state.'
ket = ([1] + ([0] * ((2 ** n_qubits) - 1)))
return Qobj(ket, is_ket=True) | Return zero state. | quantpy/qobj.py | zero | esthete88/quantpy | 0 | python | def zero(n_qubits=1):
ket = ([1] + ([0] * ((2 ** n_qubits) - 1)))
return Qobj(ket, is_ket=True) | def zero(n_qubits=1):
ket = ([1] + ([0] * ((2 ** n_qubits) - 1)))
return Qobj(ket, is_ket=True)<|docstring|>Return zero state.<|endoftext|> |
ad21236dcc89d7e931a035425fd2adc27c817b3adbd47ffd706458c9a8f5bc10 | @property
def matrix(self):
'Quantum object in a matrix form'
if ('matrix' not in self._types):
self._types.add('matrix')
basis = generate_pauli(self.n_qubits)
self._matrix = np.zeros(((2 ** self.n_qubits), (2 ** self.n_qubits)), dtype=np.complex128)
for i in range((4 ** self.n_qubits)):
self._matrix += (basis[i] * self._bloch[i])
return self._matrix | Quantum object in a matrix form | quantpy/qobj.py | matrix | esthete88/quantpy | 0 | python | @property
def matrix(self):
if ('matrix' not in self._types):
self._types.add('matrix')
basis = generate_pauli(self.n_qubits)
self._matrix = np.zeros(((2 ** self.n_qubits), (2 ** self.n_qubits)), dtype=np.complex128)
for i in range((4 ** self.n_qubits)):
self._matrix += (basis[i] * self._bloch[i])
return self._matrix | @property
def matrix(self):
if ('matrix' not in self._types):
self._types.add('matrix')
basis = generate_pauli(self.n_qubits)
self._matrix = np.zeros(((2 ** self.n_qubits), (2 ** self.n_qubits)), dtype=np.complex128)
for i in range((4 ** self.n_qubits)):
self._matrix += (basis[i] * self._bloch[i])
return self._matrix<|docstring|>Quantum object in a matrix form<|endoftext|> |
047267b2ce1508e29f82286ea83fc779114f0847bc7fae839fabb547607c9f0f | @property
def bloch(self):
'A vector, representing the quantum object in Pauli basis'
if ('bloch' not in self._types):
self._types.add('bloch')
basis = generate_pauli(self.n_qubits)
self._bloch = (np.array([np.real(product(basis_element, self._matrix)) for basis_element in basis]) / (2 ** self.n_qubits))
return self._bloch | A vector, representing the quantum object in Pauli basis | quantpy/qobj.py | bloch | esthete88/quantpy | 0 | python | @property
def bloch(self):
if ('bloch' not in self._types):
self._types.add('bloch')
basis = generate_pauli(self.n_qubits)
self._bloch = (np.array([np.real(product(basis_element, self._matrix)) for basis_element in basis]) / (2 ** self.n_qubits))
return self._bloch | @property
def bloch(self):
if ('bloch' not in self._types):
self._types.add('bloch')
basis = generate_pauli(self.n_qubits)
self._bloch = (np.array([np.real(product(basis_element, self._matrix)) for basis_element in basis]) / (2 ** self.n_qubits))
return self._bloch<|docstring|>A vector, representing the quantum object in Pauli basis<|endoftext|> |
fb782af2d44ae2d6ee9e16da199a14f74e333e0d4a11ee2a35f80fc131471379 | def ptrace(self, keep=(0,)):
'Partial trace of the quantum object\n\n Parameters\n ----------\n keep : array-like, default=[0]\n List of indices of subsystems to keep after being traced.\n\n Returns\n -------\n rho : Qobj\n Traced quantum object\n '
keep = np.array(keep)
bra_idx = list(range(self.n_qubits))
ket_idx = [((self.n_qubits + i) if (i in keep) else i) for i in range(self.n_qubits)]
rho = self.matrix.reshape(([2] * (2 * self.n_qubits)))
rho = np.einsum(rho, (bra_idx + ket_idx))
return Qobj(rho.reshape((2 ** len(keep)), (2 ** len(keep)))) | Partial trace of the quantum object
Parameters
----------
keep : array-like, default=[0]
List of indices of subsystems to keep after being traced.
Returns
-------
rho : Qobj
Traced quantum object | quantpy/qobj.py | ptrace | esthete88/quantpy | 0 | python | def ptrace(self, keep=(0,)):
'Partial trace of the quantum object\n\n Parameters\n ----------\n keep : array-like, default=[0]\n List of indices of subsystems to keep after being traced.\n\n Returns\n -------\n rho : Qobj\n Traced quantum object\n '
keep = np.array(keep)
bra_idx = list(range(self.n_qubits))
ket_idx = [((self.n_qubits + i) if (i in keep) else i) for i in range(self.n_qubits)]
rho = self.matrix.reshape(([2] * (2 * self.n_qubits)))
rho = np.einsum(rho, (bra_idx + ket_idx))
return Qobj(rho.reshape((2 ** len(keep)), (2 ** len(keep)))) | def ptrace(self, keep=(0,)):
'Partial trace of the quantum object\n\n Parameters\n ----------\n keep : array-like, default=[0]\n List of indices of subsystems to keep after being traced.\n\n Returns\n -------\n rho : Qobj\n Traced quantum object\n '
keep = np.array(keep)
bra_idx = list(range(self.n_qubits))
ket_idx = [((self.n_qubits + i) if (i in keep) else i) for i in range(self.n_qubits)]
rho = self.matrix.reshape(([2] * (2 * self.n_qubits)))
rho = np.einsum(rho, (bra_idx + ket_idx))
return Qobj(rho.reshape((2 ** len(keep)), (2 ** len(keep))))<|docstring|>Partial trace of the quantum object
Parameters
----------
keep : array-like, default=[0]
List of indices of subsystems to keep after being traced.
Returns
-------
rho : Qobj
Traced quantum object<|endoftext|> |
8909a00445ab54d4a3578b76dc12e4ec63393d7228feb2bb131d2879a8f46afd | def schmidt(self):
'Return Schmidt decomposition of the quantum object, if it is pure and consists of 2\n subsystems.\n\n Returns\n -------\n U : complex numpy 2-D array\n Unitary matrix having first subsystem vectors as columns\n s : complex numpy 1-D array\n Singular values of the decomposition, sorted in non-increasing order\n Vh : complex 2-D array\n Unitary matrix having second subsystem vectors as rows\n '
matrix_dim = (2 ** int((self.n_qubits / 2)))
matrix_repr = np.reshape(self.ket(), (matrix_dim, matrix_dim))
return la.svd(matrix_repr) | Return Schmidt decomposition of the quantum object, if it is pure and consists of 2
subsystems.
Returns
-------
U : complex numpy 2-D array
Unitary matrix having first subsystem vectors as columns
s : complex numpy 1-D array
Singular values of the decomposition, sorted in non-increasing order
Vh : complex 2-D array
Unitary matrix having second subsystem vectors as rows | quantpy/qobj.py | schmidt | esthete88/quantpy | 0 | python | def schmidt(self):
'Return Schmidt decomposition of the quantum object, if it is pure and consists of 2\n subsystems.\n\n Returns\n -------\n U : complex numpy 2-D array\n Unitary matrix having first subsystem vectors as columns\n s : complex numpy 1-D array\n Singular values of the decomposition, sorted in non-increasing order\n Vh : complex 2-D array\n Unitary matrix having second subsystem vectors as rows\n '
matrix_dim = (2 ** int((self.n_qubits / 2)))
matrix_repr = np.reshape(self.ket(), (matrix_dim, matrix_dim))
return la.svd(matrix_repr) | def schmidt(self):
'Return Schmidt decomposition of the quantum object, if it is pure and consists of 2\n subsystems.\n\n Returns\n -------\n U : complex numpy 2-D array\n Unitary matrix having first subsystem vectors as columns\n s : complex numpy 1-D array\n Singular values of the decomposition, sorted in non-increasing order\n Vh : complex 2-D array\n Unitary matrix having second subsystem vectors as rows\n '
matrix_dim = (2 ** int((self.n_qubits / 2)))
matrix_repr = np.reshape(self.ket(), (matrix_dim, matrix_dim))
return la.svd(matrix_repr)<|docstring|>Return Schmidt decomposition of the quantum object, if it is pure and consists of 2
subsystems.
Returns
-------
U : complex numpy 2-D array
Unitary matrix having first subsystem vectors as columns
s : complex numpy 1-D array
Singular values of the decomposition, sorted in non-increasing order
Vh : complex 2-D array
Unitary matrix having second subsystem vectors as rows<|endoftext|> |
48219887264d12b307c27a9c29fc96c71d32a8aa21720fb3db5fdf5042f4b356 | def eig(self):
'Find eigenvalues and eigenvectors of the quantum object\n\n Returns\n -------\n v : complex numpy 1-D array\n The eigenvalues, each repeated according to its multiplicity\n U : complex numpy 2-D array\n The normalized right eigenvector corresponding to the eigenvalue `v[i]`\n is the column `U[:, i]`\n\n Raises\n ------\n LinAlgError\n If eigenvalue computation does not converge\n '
return la.eig(self.matrix) | Find eigenvalues and eigenvectors of the quantum object
Returns
-------
v : complex numpy 1-D array
The eigenvalues, each repeated according to its multiplicity
U : complex numpy 2-D array
The normalized right eigenvector corresponding to the eigenvalue `v[i]`
is the column `U[:, i]`
Raises
------
LinAlgError
If eigenvalue computation does not converge | quantpy/qobj.py | eig | esthete88/quantpy | 0 | python | def eig(self):
'Find eigenvalues and eigenvectors of the quantum object\n\n Returns\n -------\n v : complex numpy 1-D array\n The eigenvalues, each repeated according to its multiplicity\n U : complex numpy 2-D array\n The normalized right eigenvector corresponding to the eigenvalue `v[i]`\n is the column `U[:, i]`\n\n Raises\n ------\n LinAlgError\n If eigenvalue computation does not converge\n '
return la.eig(self.matrix) | def eig(self):
'Find eigenvalues and eigenvectors of the quantum object\n\n Returns\n -------\n v : complex numpy 1-D array\n The eigenvalues, each repeated according to its multiplicity\n U : complex numpy 2-D array\n The normalized right eigenvector corresponding to the eigenvalue `v[i]`\n is the column `U[:, i]`\n\n Raises\n ------\n LinAlgError\n If eigenvalue computation does not converge\n '
return la.eig(self.matrix)<|docstring|>Find eigenvalues and eigenvectors of the quantum object
Returns
-------
v : complex numpy 1-D array
The eigenvalues, each repeated according to its multiplicity
U : complex numpy 2-D array
The normalized right eigenvector corresponding to the eigenvalue `v[i]`
is the column `U[:, i]`
Raises
------
LinAlgError
If eigenvalue computation does not converge<|endoftext|> |
cbc14342a90b7e641f7aea68f0be0b82c718f9c6970adaaee564ed25deba912e | def is_density_matrix(self, verbose=True):
'Check if the quantum object is a valid density matrix.\n Perform a test for hermiticity, positive semi-definiteness and unit trace.\n Alert the user about violations of the specific properties.\n '
herm_flag = np.allclose(self.matrix, self.matrix.T.conj())
pos_flag = np.allclose(np.minimum(np.real(self.eig()[0]), 0), 0)
trace_flag = np.allclose(np.trace(self.matrix), 1)
if (herm_flag and pos_flag and trace_flag):
return True
if ((not herm_flag) and verbose):
print('Non-hermitian', file=sys.stderr)
if ((not pos_flag) and verbose):
print('Non-positive', file=sys.stderr)
if ((not trace_flag) and verbose):
print('Trace is not 1', file=sys.stderr)
return False | Check if the quantum object is a valid density matrix.
Perform a test for hermiticity, positive semi-definiteness and unit trace.
Alert the user about violations of the specific properties. | quantpy/qobj.py | is_density_matrix | esthete88/quantpy | 0 | python | def is_density_matrix(self, verbose=True):
'Check if the quantum object is a valid density matrix.\n Perform a test for hermiticity, positive semi-definiteness and unit trace.\n Alert the user about violations of the specific properties.\n '
herm_flag = np.allclose(self.matrix, self.matrix.T.conj())
pos_flag = np.allclose(np.minimum(np.real(self.eig()[0]), 0), 0)
trace_flag = np.allclose(np.trace(self.matrix), 1)
if (herm_flag and pos_flag and trace_flag):
return True
if ((not herm_flag) and verbose):
print('Non-hermitian', file=sys.stderr)
if ((not pos_flag) and verbose):
print('Non-positive', file=sys.stderr)
if ((not trace_flag) and verbose):
print('Trace is not 1', file=sys.stderr)
return False | def is_density_matrix(self, verbose=True):
'Check if the quantum object is a valid density matrix.\n Perform a test for hermiticity, positive semi-definiteness and unit trace.\n Alert the user about violations of the specific properties.\n '
herm_flag = np.allclose(self.matrix, self.matrix.T.conj())
pos_flag = np.allclose(np.minimum(np.real(self.eig()[0]), 0), 0)
trace_flag = np.allclose(np.trace(self.matrix), 1)
if (herm_flag and pos_flag and trace_flag):
return True
if ((not herm_flag) and verbose):
print('Non-hermitian', file=sys.stderr)
if ((not pos_flag) and verbose):
print('Non-positive', file=sys.stderr)
if ((not trace_flag) and verbose):
print('Trace is not 1', file=sys.stderr)
return False<|docstring|>Check if the quantum object is a valid density matrix.
Perform a test for hermiticity, positive semi-definiteness and unit trace.
Alert the user about violations of the specific properties.<|endoftext|> |
d38bd9f2053bde20eb4c62283293a9bddae24ea7f858e8e73fe18de0495c2747 | def trace(self):
'Trace of the quantum object'
return np.trace(self.matrix) | Trace of the quantum object | quantpy/qobj.py | trace | esthete88/quantpy | 0 | python | def trace(self):
return np.trace(self.matrix) | def trace(self):
return np.trace(self.matrix)<|docstring|>Trace of the quantum object<|endoftext|> |
dfef5d59c6471eee3b05db27a8295fbcb4ea4737d9eab71515363fc29810ea6f | def impurity(self):
'Return impurity measure 1-Tr(rho^2)'
return (1 - (self @ self).trace()) | Return impurity measure 1-Tr(rho^2) | quantpy/qobj.py | impurity | esthete88/quantpy | 0 | python | def impurity(self):
return (1 - (self @ self).trace()) | def impurity(self):
return (1 - (self @ self).trace())<|docstring|>Return impurity measure 1-Tr(rho^2)<|endoftext|> |
17d47bbe92798d182fd27230bf536137286a34850964cabfa40b563ecd7f2fb9 | def is_pure(self):
'Check if the quantum object is a valid rank-1 density matrix'
return (np.allclose(self.impurity(), 0) and self.is_density_matrix()) | Check if the quantum object is a valid rank-1 density matrix | quantpy/qobj.py | is_pure | esthete88/quantpy | 0 | python | def is_pure(self):
return (np.allclose(self.impurity(), 0) and self.is_density_matrix()) | def is_pure(self):
return (np.allclose(self.impurity(), 0) and self.is_density_matrix())<|docstring|>Check if the quantum object is a valid rank-1 density matrix<|endoftext|> |
f4aff2e61c369d727ac3b3c3fcbf1870a5fc2aeacf1b243a930024257f61e5c1 | def ket(self):
'Return ket vector representation of the quantum object if it is pure'
if (not self.is_pure()):
raise ValueError('Quantum object is not pure')
return self.eig()[1][(:, 0)] | Return ket vector representation of the quantum object if it is pure | quantpy/qobj.py | ket | esthete88/quantpy | 0 | python | def ket(self):
if (not self.is_pure()):
raise ValueError('Quantum object is not pure')
return self.eig()[1][(:, 0)] | def ket(self):
if (not self.is_pure()):
raise ValueError('Quantum object is not pure')
return self.eig()[1][(:, 0)]<|docstring|>Return ket vector representation of the quantum object if it is pure<|endoftext|> |
ddc9734a3f06a4f676d74fb51db89652243da8548e94e7e4dd555e455917cc9a | def _repr_latex_(self):
'Generate a LaTeX representation of the Qobj instance. Can be used for\n formatted output in IPython notebook.\n '
s = 'Quantum object: '
(M, N) = self.matrix.shape
s += '\\begin{equation*}\\left(\\begin{array}{*{11}c}'
def _format_float(value):
if (value == 0.0):
return '0.0'
elif ((abs(value) > 1000.0) or (abs(value) < 0.001)):
return (('%.3e' % value).replace('e', '\\times10^{') + '}')
elif (abs((value - int(value))) < 0.001):
return ('%.1f' % value)
else:
return ('%.3f' % value)
def _format_element(m, n, d):
s = (' & ' if (n > 0) else '')
if (type(d) == str):
return (s + d)
else:
atol = 0.0001
if (abs(np.imag(d)) < atol):
return (s + _format_float(np.real(d)))
elif (abs(np.real(d)) < atol):
return ((s + _format_float(np.imag(d))) + 'j')
else:
s_re = _format_float(np.real(d))
s_im = _format_float(np.imag(d))
if (np.imag(d) > 0.0):
return (((((s + '(') + s_re) + '+') + s_im) + 'j)')
else:
return ((((s + '(') + s_re) + s_im) + 'j)')
if ((M > 10) and (N > 10)):
for m in range(5):
for n in range(5):
s += _format_element(m, n, self.matrix[(m, n)])
s += ' & \\cdots'
for n in range((N - 5), N):
s += _format_element(m, n, self.matrix[(m, n)])
s += '\\\\'
for n in range(5):
s += _format_element(m, n, '\\vdots')
s += ' & \\ddots'
for n in range((N - 5), N):
s += _format_element(m, n, '\\vdots')
s += '\\\\'
for m in range((M - 5), M):
for n in range(5):
s += _format_element(m, n, self.matrix[(m, n)])
s += ' & \\cdots'
for n in range((N - 5), N):
s += _format_element(m, n, self.matrix[(m, n)])
s += '\\\\'
elif ((M > 10) and (N <= 10)):
for m in range(5):
for n in range(N):
s += _format_element(m, n, self.matrix[(m, n)])
s += '\\\\'
for n in range(N):
s += _format_element(m, n, '\\vdots')
s += '\\\\'
for m in range((M - 5), M):
for n in range(N):
s += _format_element(m, n, self.matrix[(m, n)])
s += '\\\\'
elif ((M <= 10) and (N > 10)):
for m in range(M):
for n in range(5):
s += _format_element(m, n, self.matrix[(m, n)])
s += ' & \\cdots'
for n in range((N - 5), N):
s += _format_element(m, n, self.matrix[(m, n)])
s += '\\\\'
else:
for m in range(M):
for n in range(N):
s += _format_element(m, n, self.matrix[(m, n)])
s += '\\\\'
s += '\\end{array}\\right)\\end{equation*}'
return s | Generate a LaTeX representation of the Qobj instance. Can be used for
formatted output in IPython notebook. | quantpy/qobj.py | _repr_latex_ | esthete88/quantpy | 0 | python | def _repr_latex_(self):
'Generate a LaTeX representation of the Qobj instance. Can be used for\n formatted output in IPython notebook.\n '
s = 'Quantum object: '
(M, N) = self.matrix.shape
s += '\\begin{equation*}\\left(\\begin{array}{*{11}c}'
def _format_float(value):
if (value == 0.0):
return '0.0'
elif ((abs(value) > 1000.0) or (abs(value) < 0.001)):
return (('%.3e' % value).replace('e', '\\times10^{') + '}')
elif (abs((value - int(value))) < 0.001):
return ('%.1f' % value)
else:
return ('%.3f' % value)
def _format_element(m, n, d):
s = (' & ' if (n > 0) else )
if (type(d) == str):
return (s + d)
else:
atol = 0.0001
if (abs(np.imag(d)) < atol):
return (s + _format_float(np.real(d)))
elif (abs(np.real(d)) < atol):
return ((s + _format_float(np.imag(d))) + 'j')
else:
s_re = _format_float(np.real(d))
s_im = _format_float(np.imag(d))
if (np.imag(d) > 0.0):
return (((((s + '(') + s_re) + '+') + s_im) + 'j)')
else:
return ((((s + '(') + s_re) + s_im) + 'j)')
if ((M > 10) and (N > 10)):
for m in range(5):
for n in range(5):
s += _format_element(m, n, self.matrix[(m, n)])
s += ' & \\cdots'
for n in range((N - 5), N):
s += _format_element(m, n, self.matrix[(m, n)])
s += '\\\\'
for n in range(5):
s += _format_element(m, n, '\\vdots')
s += ' & \\ddots'
for n in range((N - 5), N):
s += _format_element(m, n, '\\vdots')
s += '\\\\'
for m in range((M - 5), M):
for n in range(5):
s += _format_element(m, n, self.matrix[(m, n)])
s += ' & \\cdots'
for n in range((N - 5), N):
s += _format_element(m, n, self.matrix[(m, n)])
s += '\\\\'
elif ((M > 10) and (N <= 10)):
for m in range(5):
for n in range(N):
s += _format_element(m, n, self.matrix[(m, n)])
s += '\\\\'
for n in range(N):
s += _format_element(m, n, '\\vdots')
s += '\\\\'
for m in range((M - 5), M):
for n in range(N):
s += _format_element(m, n, self.matrix[(m, n)])
s += '\\\\'
elif ((M <= 10) and (N > 10)):
for m in range(M):
for n in range(5):
s += _format_element(m, n, self.matrix[(m, n)])
s += ' & \\cdots'
for n in range((N - 5), N):
s += _format_element(m, n, self.matrix[(m, n)])
s += '\\\\'
else:
for m in range(M):
for n in range(N):
s += _format_element(m, n, self.matrix[(m, n)])
s += '\\\\'
s += '\\end{array}\\right)\\end{equation*}'
return s | def _repr_latex_(self):
'Generate a LaTeX representation of the Qobj instance. Can be used for\n formatted output in IPython notebook.\n '
s = 'Quantum object: '
(M, N) = self.matrix.shape
s += '\\begin{equation*}\\left(\\begin{array}{*{11}c}'
def _format_float(value):
if (value == 0.0):
return '0.0'
elif ((abs(value) > 1000.0) or (abs(value) < 0.001)):
return (('%.3e' % value).replace('e', '\\times10^{') + '}')
elif (abs((value - int(value))) < 0.001):
return ('%.1f' % value)
else:
return ('%.3f' % value)
def _format_element(m, n, d):
s = (' & ' if (n > 0) else )
if (type(d) == str):
return (s + d)
else:
atol = 0.0001
if (abs(np.imag(d)) < atol):
return (s + _format_float(np.real(d)))
elif (abs(np.real(d)) < atol):
return ((s + _format_float(np.imag(d))) + 'j')
else:
s_re = _format_float(np.real(d))
s_im = _format_float(np.imag(d))
if (np.imag(d) > 0.0):
return (((((s + '(') + s_re) + '+') + s_im) + 'j)')
else:
return ((((s + '(') + s_re) + s_im) + 'j)')
if ((M > 10) and (N > 10)):
for m in range(5):
for n in range(5):
s += _format_element(m, n, self.matrix[(m, n)])
s += ' & \\cdots'
for n in range((N - 5), N):
s += _format_element(m, n, self.matrix[(m, n)])
s += '\\\\'
for n in range(5):
s += _format_element(m, n, '\\vdots')
s += ' & \\ddots'
for n in range((N - 5), N):
s += _format_element(m, n, '\\vdots')
s += '\\\\'
for m in range((M - 5), M):
for n in range(5):
s += _format_element(m, n, self.matrix[(m, n)])
s += ' & \\cdots'
for n in range((N - 5), N):
s += _format_element(m, n, self.matrix[(m, n)])
s += '\\\\'
elif ((M > 10) and (N <= 10)):
for m in range(5):
for n in range(N):
s += _format_element(m, n, self.matrix[(m, n)])
s += '\\\\'
for n in range(N):
s += _format_element(m, n, '\\vdots')
s += '\\\\'
for m in range((M - 5), M):
for n in range(N):
s += _format_element(m, n, self.matrix[(m, n)])
s += '\\\\'
elif ((M <= 10) and (N > 10)):
for m in range(M):
for n in range(5):
s += _format_element(m, n, self.matrix[(m, n)])
s += ' & \\cdots'
for n in range((N - 5), N):
s += _format_element(m, n, self.matrix[(m, n)])
s += '\\\\'
else:
for m in range(M):
for n in range(N):
s += _format_element(m, n, self.matrix[(m, n)])
s += '\\\\'
s += '\\end{array}\\right)\\end{equation*}'
return s<|docstring|>Generate a LaTeX representation of the Qobj instance. Can be used for
formatted output in IPython notebook.<|endoftext|> |
7fd176e8a04c1de4c50a40e674b7ea84640b99a5da6f543a20614d8fcead2760 | def group_by_dir(paths: Iterable[str]) -> dict[(str, set[str])]:
'For a list of file paths, returns a dict of directory path -> files in that dir.'
ret = defaultdict(set)
for path in paths:
(dirname, filename) = os.path.split(path)
ret[dirname].add(filename)
return ret | For a list of file paths, returns a dict of directory path -> files in that dir. | src/python/pants/core/goals/tailor.py | group_by_dir | chebbyChefNEQ/pants | 1 | python | def group_by_dir(paths: Iterable[str]) -> dict[(str, set[str])]:
ret = defaultdict(set)
for path in paths:
(dirname, filename) = os.path.split(path)
ret[dirname].add(filename)
return ret | def group_by_dir(paths: Iterable[str]) -> dict[(str, set[str])]:
ret = defaultdict(set)
for path in paths:
(dirname, filename) = os.path.split(path)
ret[dirname].add(filename)
return ret<|docstring|>For a list of file paths, returns a dict of directory path -> files in that dir.<|endoftext|> |
00eac214ecb3820817e1850fffe380c56ad3ee07fcfca9628c6cc6a23a079b9f | @rule
async def rename_conflicting_targets(ptgts: PutativeTargets) -> UniquelyNamedPutativeTargets:
'Ensure that no target addresses collide.'
all_existing_tgts = (await Get(UnexpandedTargets, AddressSpecs([MaybeEmptyDescendantAddresses('')])))
existing_addrs: Set[str] = {tgt.address.spec for tgt in all_existing_tgts}
uniquely_named_putative_targets: List[PutativeTarget] = []
for ptgt in ptgts:
if (not ptgt.addressable):
uniquely_named_putative_targets.append(ptgt)
continue
idx = 0
possibly_renamed_ptgt = ptgt
if ((possibly_renamed_ptgt.path == '') and (possibly_renamed_ptgt.kwargs.get('name') is None)):
possibly_renamed_ptgt = possibly_renamed_ptgt.rename('root')
while (possibly_renamed_ptgt.address.spec in existing_addrs):
possibly_renamed_ptgt = ptgt.rename(f'{ptgt.name}{idx}')
idx += 1
uniquely_named_putative_targets.append(possibly_renamed_ptgt)
existing_addrs.add(possibly_renamed_ptgt.address.spec)
return UniquelyNamedPutativeTargets(PutativeTargets(uniquely_named_putative_targets)) | Ensure that no target addresses collide. | src/python/pants/core/goals/tailor.py | rename_conflicting_targets | chebbyChefNEQ/pants | 1 | python | @rule
async def rename_conflicting_targets(ptgts: PutativeTargets) -> UniquelyNamedPutativeTargets:
all_existing_tgts = (await Get(UnexpandedTargets, AddressSpecs([MaybeEmptyDescendantAddresses()])))
existing_addrs: Set[str] = {tgt.address.spec for tgt in all_existing_tgts}
uniquely_named_putative_targets: List[PutativeTarget] = []
for ptgt in ptgts:
if (not ptgt.addressable):
uniquely_named_putative_targets.append(ptgt)
continue
idx = 0
possibly_renamed_ptgt = ptgt
if ((possibly_renamed_ptgt.path == ) and (possibly_renamed_ptgt.kwargs.get('name') is None)):
possibly_renamed_ptgt = possibly_renamed_ptgt.rename('root')
while (possibly_renamed_ptgt.address.spec in existing_addrs):
possibly_renamed_ptgt = ptgt.rename(f'{ptgt.name}{idx}')
idx += 1
uniquely_named_putative_targets.append(possibly_renamed_ptgt)
existing_addrs.add(possibly_renamed_ptgt.address.spec)
return UniquelyNamedPutativeTargets(PutativeTargets(uniquely_named_putative_targets)) | @rule
async def rename_conflicting_targets(ptgts: PutativeTargets) -> UniquelyNamedPutativeTargets:
all_existing_tgts = (await Get(UnexpandedTargets, AddressSpecs([MaybeEmptyDescendantAddresses()])))
existing_addrs: Set[str] = {tgt.address.spec for tgt in all_existing_tgts}
uniquely_named_putative_targets: List[PutativeTarget] = []
for ptgt in ptgts:
if (not ptgt.addressable):
uniquely_named_putative_targets.append(ptgt)
continue
idx = 0
possibly_renamed_ptgt = ptgt
if ((possibly_renamed_ptgt.path == ) and (possibly_renamed_ptgt.kwargs.get('name') is None)):
possibly_renamed_ptgt = possibly_renamed_ptgt.rename('root')
while (possibly_renamed_ptgt.address.spec in existing_addrs):
possibly_renamed_ptgt = ptgt.rename(f'{ptgt.name}{idx}')
idx += 1
uniquely_named_putative_targets.append(possibly_renamed_ptgt)
existing_addrs.add(possibly_renamed_ptgt.address.spec)
return UniquelyNamedPutativeTargets(PutativeTargets(uniquely_named_putative_targets))<|docstring|>Ensure that no target addresses collide.<|endoftext|> |
d8b2e3269b3c09e7dfab5b93ad3540c9a28761076b142ca1c3311ebe8af50b37 | def specs_to_dirs(specs: Specs) -> tuple[(str, ...)]:
'Extract cmd-line specs that look like directories.\n\n Error on all other specs.\n\n This is a hack that allows us to emulate "directory specs", until we are able to\n support those more intrinsically.\n\n TODO: If other goals need "directory specs", move this logic to a rule that produces them.\n '
dir_specs = []
other_specs: list[Spec] = [*specs.filesystem_specs.includes, *specs.filesystem_specs.ignores, *specs.address_specs.globs]
for spec in specs.address_specs.literals:
if spec.is_directory_shorthand:
dir_specs.append(spec)
else:
other_specs.append(spec)
if other_specs:
raise ValueError(f"The tailor goal only accepts literal directories as arguments, but you specified: {', '.join((str(spec) for spec in other_specs))}. You can also specify no arguments to run against the entire repository.")
return (tuple((spec.path_component for spec in specs.address_specs.literals)) or ('',)) | Extract cmd-line specs that look like directories.
Error on all other specs.
This is a hack that allows us to emulate "directory specs", until we are able to
support those more intrinsically.
TODO: If other goals need "directory specs", move this logic to a rule that produces them. | src/python/pants/core/goals/tailor.py | specs_to_dirs | chebbyChefNEQ/pants | 1 | python | def specs_to_dirs(specs: Specs) -> tuple[(str, ...)]:
'Extract cmd-line specs that look like directories.\n\n Error on all other specs.\n\n This is a hack that allows us to emulate "directory specs", until we are able to\n support those more intrinsically.\n\n TODO: If other goals need "directory specs", move this logic to a rule that produces them.\n '
dir_specs = []
other_specs: list[Spec] = [*specs.filesystem_specs.includes, *specs.filesystem_specs.ignores, *specs.address_specs.globs]
for spec in specs.address_specs.literals:
if spec.is_directory_shorthand:
dir_specs.append(spec)
else:
other_specs.append(spec)
if other_specs:
raise ValueError(f"The tailor goal only accepts literal directories as arguments, but you specified: {', '.join((str(spec) for spec in other_specs))}. You can also specify no arguments to run against the entire repository.")
return (tuple((spec.path_component for spec in specs.address_specs.literals)) or (,)) | def specs_to_dirs(specs: Specs) -> tuple[(str, ...)]:
'Extract cmd-line specs that look like directories.\n\n Error on all other specs.\n\n This is a hack that allows us to emulate "directory specs", until we are able to\n support those more intrinsically.\n\n TODO: If other goals need "directory specs", move this logic to a rule that produces them.\n '
dir_specs = []
other_specs: list[Spec] = [*specs.filesystem_specs.includes, *specs.filesystem_specs.ignores, *specs.address_specs.globs]
for spec in specs.address_specs.literals:
if spec.is_directory_shorthand:
dir_specs.append(spec)
else:
other_specs.append(spec)
if other_specs:
raise ValueError(f"The tailor goal only accepts literal directories as arguments, but you specified: {', '.join((str(spec) for spec in other_specs))}. You can also specify no arguments to run against the entire repository.")
return (tuple((spec.path_component for spec in specs.address_specs.literals)) or (,))<|docstring|>Extract cmd-line specs that look like directories.
Error on all other specs.
This is a hack that allows us to emulate "directory specs", until we are able to
support those more intrinsically.
TODO: If other goals need "directory specs", move this logic to a rule that produces them.<|endoftext|> |
24fc693e426cb24ac36f7c798058c793e795e11c147f9a76702033b79dfb7e11 | def realias(self, new_alias: (str | None)) -> PutativeTarget:
'A copy of this object with the alias replaced to the given alias.\n\n Returns this object if the alias is None or is identical to this objects existing alias.\n '
return (self if ((new_alias is None) or (new_alias == self.type_alias)) else dataclasses.replace(self, type_alias=new_alias)) | A copy of this object with the alias replaced to the given alias.
Returns this object if the alias is None or is identical to this objects existing alias. | src/python/pants/core/goals/tailor.py | realias | chebbyChefNEQ/pants | 1 | python | def realias(self, new_alias: (str | None)) -> PutativeTarget:
'A copy of this object with the alias replaced to the given alias.\n\n Returns this object if the alias is None or is identical to this objects existing alias.\n '
return (self if ((new_alias is None) or (new_alias == self.type_alias)) else dataclasses.replace(self, type_alias=new_alias)) | def realias(self, new_alias: (str | None)) -> PutativeTarget:
'A copy of this object with the alias replaced to the given alias.\n\n Returns this object if the alias is None or is identical to this objects existing alias.\n '
return (self if ((new_alias is None) or (new_alias == self.type_alias)) else dataclasses.replace(self, type_alias=new_alias))<|docstring|>A copy of this object with the alias replaced to the given alias.
Returns this object if the alias is None or is identical to this objects existing alias.<|endoftext|> |
50b9bb2bdf4ef0a3f76a2d87716e0b44c8d9085f6f086fe858c264ae8907f3a2 | def rename(self, new_name: str) -> PutativeTarget:
'A copy of this object with the name replaced to the given name.'
return dataclasses.replace(self, name=new_name, kwargs={**self.kwargs, 'name': new_name}) | A copy of this object with the name replaced to the given name. | src/python/pants/core/goals/tailor.py | rename | chebbyChefNEQ/pants | 1 | python | def rename(self, new_name: str) -> PutativeTarget:
return dataclasses.replace(self, name=new_name, kwargs={**self.kwargs, 'name': new_name}) | def rename(self, new_name: str) -> PutativeTarget:
return dataclasses.replace(self, name=new_name, kwargs={**self.kwargs, 'name': new_name})<|docstring|>A copy of this object with the name replaced to the given name.<|endoftext|> |
555cfd4a229c2b781124de8820c44e6e250452fbf0ee604cb4442723ed6ab7dd | def restrict_sources(self) -> PutativeTarget:
'A copy of this object with the sources explicitly set to just the triggering sources.'
owned_sources = self.triggering_sources
return dataclasses.replace(self, owned_sources=owned_sources, kwargs={**self.kwargs, 'sources': owned_sources}) | A copy of this object with the sources explicitly set to just the triggering sources. | src/python/pants/core/goals/tailor.py | restrict_sources | chebbyChefNEQ/pants | 1 | python | def restrict_sources(self) -> PutativeTarget:
owned_sources = self.triggering_sources
return dataclasses.replace(self, owned_sources=owned_sources, kwargs={**self.kwargs, 'sources': owned_sources}) | def restrict_sources(self) -> PutativeTarget:
owned_sources = self.triggering_sources
return dataclasses.replace(self, owned_sources=owned_sources, kwargs={**self.kwargs, 'sources': owned_sources})<|docstring|>A copy of this object with the sources explicitly set to just the triggering sources.<|endoftext|> |
21feb3b6e668fb156f23aa77629fabc95ff58cc23a7d43c913ae446fdbb743a6 | def avg(grades):
'Assertions; an example of good defensive programming\n\n >>> avg([68, 89, 96, 88, 75])\n 83.2\n >>> avg([68, 50, 91, 80, 65])\n 70.8\n >>> avg([49, 60, 81, 97, 55])\n 68.4\n '
assert (not (len(grades) == 0)), 'no grades data'
return (sum(grades) / len(grades)) | Assertions; an example of good defensive programming
>>> avg([68, 89, 96, 88, 75])
83.2
>>> avg([68, 50, 91, 80, 65])
70.8
>>> avg([49, 60, 81, 97, 55])
68.4 | grades_average.py | avg | xeroxzen/MIT-Computer-Science-and-Programming-Using-Python | 0 | python | def avg(grades):
'Assertions; an example of good defensive programming\n\n >>> avg([68, 89, 96, 88, 75])\n 83.2\n >>> avg([68, 50, 91, 80, 65])\n 70.8\n >>> avg([49, 60, 81, 97, 55])\n 68.4\n '
assert (not (len(grades) == 0)), 'no grades data'
return (sum(grades) / len(grades)) | def avg(grades):
'Assertions; an example of good defensive programming\n\n >>> avg([68, 89, 96, 88, 75])\n 83.2\n >>> avg([68, 50, 91, 80, 65])\n 70.8\n >>> avg([49, 60, 81, 97, 55])\n 68.4\n '
assert (not (len(grades) == 0)), 'no grades data'
return (sum(grades) / len(grades))<|docstring|>Assertions; an example of good defensive programming
>>> avg([68, 89, 96, 88, 75])
83.2
>>> avg([68, 50, 91, 80, 65])
70.8
>>> avg([49, 60, 81, 97, 55])
68.4<|endoftext|> |
0f2af86cd277913c6309e5da7e0bbb5b87950a007e56e1cc63fcceda9a99163b | def download_species(self, uri, release, staging, download, species: str):
'\n Download protein homology for species and suffix if file does not already exist.\n\n Most entries do not require suffix to be provided, but some such as sus_scrofa_usmarc have no standard ftp\n entries requiring a custom suffix.\n '
protein_uri = (uri + f'{species}/{species}.json')
resource_stage = Dict()
resource_stage.uri = protein_uri
resource_stage.output_filename = f'{release}-{species}.json'
resource_stage.output_dir = staging
return download.ftp_download(resource_stage) | Download protein homology for species and suffix if file does not already exist.
Most entries do not require suffix to be provided, but some such as sus_scrofa_usmarc have no standard ftp
entries requiring a custom suffix. | plugins/Homologues.py | download_species | opentargets/platform-input-support | 4 | python | def download_species(self, uri, release, staging, download, species: str):
'\n Download protein homology for species and suffix if file does not already exist.\n\n Most entries do not require suffix to be provided, but some such as sus_scrofa_usmarc have no standard ftp\n entries requiring a custom suffix.\n '
protein_uri = (uri + f'{species}/{species}.json')
resource_stage = Dict()
resource_stage.uri = protein_uri
resource_stage.output_filename = f'{release}-{species}.json'
resource_stage.output_dir = staging
return download.ftp_download(resource_stage) | def download_species(self, uri, release, staging, download, species: str):
'\n Download protein homology for species and suffix if file does not already exist.\n\n Most entries do not require suffix to be provided, but some such as sus_scrofa_usmarc have no standard ftp\n entries requiring a custom suffix.\n '
protein_uri = (uri + f'{species}/{species}.json')
resource_stage = Dict()
resource_stage.uri = protein_uri
resource_stage.output_filename = f'{release}-{species}.json'
resource_stage.output_dir = staging
return download.ftp_download(resource_stage)<|docstring|>Download protein homology for species and suffix if file does not already exist.
Most entries do not require suffix to be provided, but some such as sus_scrofa_usmarc have no standard ftp
entries requiring a custom suffix.<|endoftext|> |
4962104ac8ae58e5635cf89908bc8f12160668c1933f52760b82b4bc45591d8f | def test_predictor_torch_export(self):
'Verify that q-values before model export equal q-values after\n model export. Meant to catch issues with export logic.'
environment = Gridworld()
samples = Samples(mdp_ids=['0'], sequence_numbers=[0], sequence_number_ordinals=[1], states=[{0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0, 15: 1.0, 24: 1.0}], actions=['D'], action_probabilities=[0.5], rewards=[0], possible_actions=[['R', 'D']], next_states=[{5: 1.0}], next_actions=['U'], terminals=[False], possible_next_actions=[['R', 'U', 'D']])
tdps = environment.preprocess_samples(samples, 1)
assert (len(tdps) == 1), 'Invalid number of data pages'
trainer = self.get_trainer(environment, {}, False, False, False)
input = rlt.PreprocessedState.from_tensor(tdps[0].states)
pre_export_q_values = trainer.q_network(input).q_values.detach().numpy()
preprocessor = Preprocessor(environment.normalization, False)
cpu_q_network = trainer.q_network.cpu_model()
cpu_q_network.eval()
dqn_with_preprocessor = DiscreteDqnWithPreprocessor(cpu_q_network, preprocessor)
serving_module = DiscreteDqnPredictorWrapper(dqn_with_preprocessor, action_names=environment.ACTIONS)
with tempfile.TemporaryDirectory() as tmpdirname:
buf = export_module_to_buffer(serving_module)
tmp_path = os.path.join(tmpdirname, 'model')
with open(tmp_path, 'wb') as f:
f.write(buf.getvalue())
f.close()
predictor = DiscreteDqnTorchPredictor(torch.jit.load(tmp_path))
post_export_q_values = predictor.predict([samples.states[0]])
for (i, action) in enumerate(environment.ACTIONS):
self.assertAlmostEqual(float(pre_export_q_values[0][i]), float(post_export_q_values[0][action]), places=4) | Verify that q-values before model export equal q-values after
model export. Meant to catch issues with export logic. | ml/rl/test/gridworld/test_gridworld_pytorch.py | test_predictor_torch_export | joshrose/Horizon | 2 | python | def test_predictor_torch_export(self):
'Verify that q-values before model export equal q-values after\n model export. Meant to catch issues with export logic.'
environment = Gridworld()
samples = Samples(mdp_ids=['0'], sequence_numbers=[0], sequence_number_ordinals=[1], states=[{0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0, 15: 1.0, 24: 1.0}], actions=['D'], action_probabilities=[0.5], rewards=[0], possible_actions=[['R', 'D']], next_states=[{5: 1.0}], next_actions=['U'], terminals=[False], possible_next_actions=[['R', 'U', 'D']])
tdps = environment.preprocess_samples(samples, 1)
assert (len(tdps) == 1), 'Invalid number of data pages'
trainer = self.get_trainer(environment, {}, False, False, False)
input = rlt.PreprocessedState.from_tensor(tdps[0].states)
pre_export_q_values = trainer.q_network(input).q_values.detach().numpy()
preprocessor = Preprocessor(environment.normalization, False)
cpu_q_network = trainer.q_network.cpu_model()
cpu_q_network.eval()
dqn_with_preprocessor = DiscreteDqnWithPreprocessor(cpu_q_network, preprocessor)
serving_module = DiscreteDqnPredictorWrapper(dqn_with_preprocessor, action_names=environment.ACTIONS)
with tempfile.TemporaryDirectory() as tmpdirname:
buf = export_module_to_buffer(serving_module)
tmp_path = os.path.join(tmpdirname, 'model')
with open(tmp_path, 'wb') as f:
f.write(buf.getvalue())
f.close()
predictor = DiscreteDqnTorchPredictor(torch.jit.load(tmp_path))
post_export_q_values = predictor.predict([samples.states[0]])
for (i, action) in enumerate(environment.ACTIONS):
self.assertAlmostEqual(float(pre_export_q_values[0][i]), float(post_export_q_values[0][action]), places=4) | def test_predictor_torch_export(self):
'Verify that q-values before model export equal q-values after\n model export. Meant to catch issues with export logic.'
environment = Gridworld()
samples = Samples(mdp_ids=['0'], sequence_numbers=[0], sequence_number_ordinals=[1], states=[{0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0, 15: 1.0, 24: 1.0}], actions=['D'], action_probabilities=[0.5], rewards=[0], possible_actions=[['R', 'D']], next_states=[{5: 1.0}], next_actions=['U'], terminals=[False], possible_next_actions=[['R', 'U', 'D']])
tdps = environment.preprocess_samples(samples, 1)
assert (len(tdps) == 1), 'Invalid number of data pages'
trainer = self.get_trainer(environment, {}, False, False, False)
input = rlt.PreprocessedState.from_tensor(tdps[0].states)
pre_export_q_values = trainer.q_network(input).q_values.detach().numpy()
preprocessor = Preprocessor(environment.normalization, False)
cpu_q_network = trainer.q_network.cpu_model()
cpu_q_network.eval()
dqn_with_preprocessor = DiscreteDqnWithPreprocessor(cpu_q_network, preprocessor)
serving_module = DiscreteDqnPredictorWrapper(dqn_with_preprocessor, action_names=environment.ACTIONS)
with tempfile.TemporaryDirectory() as tmpdirname:
buf = export_module_to_buffer(serving_module)
tmp_path = os.path.join(tmpdirname, 'model')
with open(tmp_path, 'wb') as f:
f.write(buf.getvalue())
f.close()
predictor = DiscreteDqnTorchPredictor(torch.jit.load(tmp_path))
post_export_q_values = predictor.predict([samples.states[0]])
for (i, action) in enumerate(environment.ACTIONS):
self.assertAlmostEqual(float(pre_export_q_values[0][i]), float(post_export_q_values[0][action]), places=4)<|docstring|>Verify that q-values before model export equal q-values after
model export. Meant to catch issues with export logic.<|endoftext|> |
6f2a789ff60c189f18ae53912a7d1768130cbaa0b7a6765ba639b3178c19d193 | def build_generator(model_name, logger=None):
'Builds generator module by model name.'
if (model_name not in MODEL_POOL):
raise ValueError(f'Model `{model_name}` is not registered in `MODEL_POOL` in `model_settings.py`!')
gan_type = model_name.split('_')[0]
if (gan_type in ['styleganinv']):
return StyleGANGenerator(model_name, logger=logger)
raise NotImplementedError(f'Unsupported GAN type `{gan_type}`!') | Builds generator module by model name. | models/helper.py | build_generator | Tommy-Ngx/In-domainGAN | 319 | python | def build_generator(model_name, logger=None):
if (model_name not in MODEL_POOL):
raise ValueError(f'Model `{model_name}` is not registered in `MODEL_POOL` in `model_settings.py`!')
gan_type = model_name.split('_')[0]
if (gan_type in ['styleganinv']):
return StyleGANGenerator(model_name, logger=logger)
raise NotImplementedError(f'Unsupported GAN type `{gan_type}`!') | def build_generator(model_name, logger=None):
if (model_name not in MODEL_POOL):
raise ValueError(f'Model `{model_name}` is not registered in `MODEL_POOL` in `model_settings.py`!')
gan_type = model_name.split('_')[0]
if (gan_type in ['styleganinv']):
return StyleGANGenerator(model_name, logger=logger)
raise NotImplementedError(f'Unsupported GAN type `{gan_type}`!')<|docstring|>Builds generator module by model name.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.