body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
aee873fb8317d0977e176c074d81a07a52a9e246a2d6f89ee1e031fb641e6629 | def grep_data(cutoff, files):
'Return a dictionary of data in the form\n \n data = {\n config : {\n root : {\n peaks: { \n iter1: [(wavelength_1, intensity_1),\n (wavelength_2, intensity_2)],\n iter2: [(wavelength_1, intensity_1),\n (wavelength_2, intensity_2)],\n }\n }\n }\n '
res = {}
for file in files:
if (is_gaussian(file) and is_fluorescence(file)):
(res, name) = update_dict_with_name(file, res)
(res, root) = find_root(file, res, name)
res = find_spectral_data(file, res, name, root, cutoff)
return res | Return a dictionary of data in the form
data = {
config : {
root : {
peaks: {
iter1: [(wavelength_1, intensity_1),
(wavelength_2, intensity_2)],
iter2: [(wavelength_1, intensity_1),
(wavelength_2, intensity_2)],
}
}
} | autochem/scripts/fluorescence.py | grep_data | tommason14/monash_automation | 7 | python | def grep_data(cutoff, files):
'Return a dictionary of data in the form\n \n data = {\n config : {\n root : {\n peaks: { \n iter1: [(wavelength_1, intensity_1),\n (wavelength_2, intensity_2)],\n iter2: [(wavelength_1, intensity_1),\n (wavelength_2, intensity_2)],\n }\n }\n }\n '
res = {}
for file in files:
if (is_gaussian(file) and is_fluorescence(file)):
(res, name) = update_dict_with_name(file, res)
(res, root) = find_root(file, res, name)
res = find_spectral_data(file, res, name, root, cutoff)
return res | def grep_data(cutoff, files):
'Return a dictionary of data in the form\n \n data = {\n config : {\n root : {\n peaks: { \n iter1: [(wavelength_1, intensity_1),\n (wavelength_2, intensity_2)],\n iter2: [(wavelength_1, intensity_1),\n (wavelength_2, intensity_2)],\n }\n }\n }\n '
res = {}
for file in files:
if (is_gaussian(file) and is_fluorescence(file)):
(res, name) = update_dict_with_name(file, res)
(res, root) = find_root(file, res, name)
res = find_spectral_data(file, res, name, root, cutoff)
return res<|docstring|>Return a dictionary of data in the form
data = {
config : {
root : {
peaks: {
iter1: [(wavelength_1, intensity_1),
(wavelength_2, intensity_2)],
iter2: [(wavelength_1, intensity_1),
(wavelength_2, intensity_2)],
}
}
}<|endoftext|> |
7733c201c835c6f16ea814f833d9b64f9575c02c4240e74752d52b157068dbc7 | def transform(res):
' Transforms dictionary to a list of lists '
flattened = []
for name in sorted(res):
for root in sorted(res[name]):
for iteration in res[name][root]['peaks']:
for peak in res[name][root]['peaks'][iteration]:
(energy, wave, intensity) = peak
flattened.append([name, root, iteration, energy, wave, intensity])
return flattened | Transforms dictionary to a list of lists | autochem/scripts/fluorescence.py | transform | tommason14/monash_automation | 7 | python | def transform(res):
' '
flattened = []
for name in sorted(res):
for root in sorted(res[name]):
for iteration in res[name][root]['peaks']:
for peak in res[name][root]['peaks'][iteration]:
(energy, wave, intensity) = peak
flattened.append([name, root, iteration, energy, wave, intensity])
return flattened | def transform(res):
' '
flattened = []
for name in sorted(res):
for root in sorted(res[name]):
for iteration in res[name][root]['peaks']:
for peak in res[name][root]['peaks'][iteration]:
(energy, wave, intensity) = peak
flattened.append([name, root, iteration, energy, wave, intensity])
return flattened<|docstring|>Transforms dictionary to a list of lists<|endoftext|> |
7ea029146a3c048778b0a780366679159c199fd6b1c76ea48bfae60166a0efd0 | def one_level_dict(res):
' Transforms nested dict to dictionary only one level deep '
configs = []
roots = []
iters = []
energies = []
waves = []
ints = []
for config in res:
for root in res[config]:
for iteration in res[config][root]['peaks']:
data = res[config][root]['peaks'][iteration]
for val in data:
(energy, wave, intensity) = val
configs.append(config)
roots.append(root)
iters.append(iteration)
energies.append(energy)
waves.append(wave)
ints.append(intensity)
output = {}
output['Config'] = configs
output['Root'] = roots
output['Iteration'] = iters
output['Transition Energies (eV)'] = energies
output['Wavelength (nm)'] = waves
output['Intensity (au)'] = ints
return output | Transforms nested dict to dictionary only one level deep | autochem/scripts/fluorescence.py | one_level_dict | tommason14/monash_automation | 7 | python | def one_level_dict(res):
' '
configs = []
roots = []
iters = []
energies = []
waves = []
ints = []
for config in res:
for root in res[config]:
for iteration in res[config][root]['peaks']:
data = res[config][root]['peaks'][iteration]
for val in data:
(energy, wave, intensity) = val
configs.append(config)
roots.append(root)
iters.append(iteration)
energies.append(energy)
waves.append(wave)
ints.append(intensity)
output = {}
output['Config'] = configs
output['Root'] = roots
output['Iteration'] = iters
output['Transition Energies (eV)'] = energies
output['Wavelength (nm)'] = waves
output['Intensity (au)'] = ints
return output | def one_level_dict(res):
' '
configs = []
roots = []
iters = []
energies = []
waves = []
ints = []
for config in res:
for root in res[config]:
for iteration in res[config][root]['peaks']:
data = res[config][root]['peaks'][iteration]
for val in data:
(energy, wave, intensity) = val
configs.append(config)
roots.append(root)
iters.append(iteration)
energies.append(energy)
waves.append(wave)
ints.append(intensity)
output = {}
output['Config'] = configs
output['Root'] = roots
output['Iteration'] = iters
output['Transition Energies (eV)'] = energies
output['Wavelength (nm)'] = waves
output['Intensity (au)'] = ints
return output<|docstring|>Transforms nested dict to dictionary only one level deep<|endoftext|> |
df206d48f8730af89d9016e99ae28a64099a66e212ed66ba037be2b6e3c42d5e | @data_ingredient.config
def data_config():
' Dataset configuration '
dataset = 'PASCAL'
base_dir = ''
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
height = 401
width = 401
bs = 4
test_bs = 1
num_workers = min(bs, 4)
pin_memory = True
train_n = 5000
test_n = 1000
seed = 1234
test_seed = 5678
one_cls = 0
cache = True | Dataset configuration | data_kits/datasets.py | data_config | Jarvis73/PEMP | 8 | python | @data_ingredient.config
def data_config():
' '
dataset = 'PASCAL'
base_dir =
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
height = 401
width = 401
bs = 4
test_bs = 1
num_workers = min(bs, 4)
pin_memory = True
train_n = 5000
test_n = 1000
seed = 1234
test_seed = 5678
one_cls = 0
cache = True | @data_ingredient.config
def data_config():
' '
dataset = 'PASCAL'
base_dir =
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
height = 401
width = 401
bs = 4
test_bs = 1
num_workers = min(bs, 4)
pin_memory = True
train_n = 5000
test_n = 1000
seed = 1234
test_seed = 5678
one_cls = 0
cache = True<|docstring|>Dataset configuration<|endoftext|> |
38579c9e6345957990691d33194c65ed8462e5abaa5060f3f65e9034a1e33aa3 | @data_ingredient.capture
def get_val_labels(split, dataset):
'\n Get validation label list\n\n Parameters\n ----------\n split: int\n Split number\n dataset: str\n Dataset name. [PASCAL, COCO]\n\n Returns\n -------\n\n '
if (dataset == 'PASCAL'):
return list(range(((split * 5) + 1), ((split * 5) + 6)))
elif (dataset == 'COCO'):
return list(range(((split * 20) + 1), ((split * 20) + 21)))
else:
raise ValueError(error_info) | Get validation label list
Parameters
----------
split: int
Split number
dataset: str
Dataset name. [PASCAL, COCO]
Returns
------- | data_kits/datasets.py | get_val_labels | Jarvis73/PEMP | 8 | python | @data_ingredient.capture
def get_val_labels(split, dataset):
'\n Get validation label list\n\n Parameters\n ----------\n split: int\n Split number\n dataset: str\n Dataset name. [PASCAL, COCO]\n\n Returns\n -------\n\n '
if (dataset == 'PASCAL'):
return list(range(((split * 5) + 1), ((split * 5) + 6)))
elif (dataset == 'COCO'):
return list(range(((split * 20) + 1), ((split * 20) + 21)))
else:
raise ValueError(error_info) | @data_ingredient.capture
def get_val_labels(split, dataset):
'\n Get validation label list\n\n Parameters\n ----------\n split: int\n Split number\n dataset: str\n Dataset name. [PASCAL, COCO]\n\n Returns\n -------\n\n '
if (dataset == 'PASCAL'):
return list(range(((split * 5) + 1), ((split * 5) + 6)))
elif (dataset == 'COCO'):
return list(range(((split * 20) + 1), ((split * 20) + 21)))
else:
raise ValueError(error_info)<|docstring|>Get validation label list
Parameters
----------
split: int
Split number
dataset: str
Dataset name. [PASCAL, COCO]
Returns
-------<|endoftext|> |
f8db957c0fab5596981ac455437199f33854a4981de53bb78578b3e9a78f01cb | def logistic_growth_fit(t, y, y_0_sigma=0.01, K_sigma=1, sigma_sigma=0.001, log_lambda_mu=(- 1.5), log_lambda_sigma=0.75, chains=4, iter_sampling=1000, iter_warmup=1000, t_ppc=[], return_samples=False):
'\n Fit a logistic growth model to a single growth curve.\n\n Parameters\n ----------\n t : array-like\n Time measurements.\n y : array-like\n Population measurements, e.g., OD or cell count.\n y_0_sigma : float, default 0.01\n Standard Deviation of half normal prior for initial measurement at t=0. Default is chosen for OD measurements.\n K_sigma : float, default 5\n Standard Deviation of half normal prior for carrying capacity. Default is chosen for OD measurements.\n sigma_sigma : float, default 0.01\n Standard Deviation of half normal prior for standard deviation of the normal likelihood.\n log_lambda_mu : float, default -1.5\n Mean of Normal prior on the log of the growth rate.\n sigma_lambda_mu : float, default 0.75\n Standard Deviation of Normal prior on the log of the growth rate.\n chains : int, default 4\n Number chains used in Stan.\n iter_sampling : int, default 1000\n Number if sampling steps in Stan.\n iter_warmup : int, default 1000\n Number of warmup steps in Stan.\n t_ppc : array-like, default []\n Time points for posterior predictive check. If empty, then same time points as measurements are taken.\n return_samples : boolean, default False\n If True, the arviz object is returned.\n \n Returns\n -------\n med_growth_rate : float\n Median of inferred growth rates.\n med_K : float\n Median of inferred carrying capacities.\n samples : arviz object\n Returned if return_samples is given as True.\n \n '
if (t_ppc == []):
t_ppc = t
data = {'t': t, 'y': y, 'N': len(t), 'y_0_sigma': y_0_sigma, 'K_sigma': K_sigma, 'sigma_sigma': sigma_sigma, 'log_lambda_sigma': log_lambda_sigma, 'log_lambda_mu': log_lambda_mu, 'N_ppc': len(t_ppc), 't_ppc': t_ppc}
sm = cmdstanpy.CmdStanModel(stan_file=f'{standir}/logistic_growth_model.stan')
samples = sm.sample(data=data, chains=chains, iter_warmup=iter_warmup, iter_sampling=iter_sampling, show_progress=False)
samples = az.from_cmdstanpy(posterior=samples)
med_K = np.median(samples.posterior['K'])
med_growth_rate = np.median(samples.posterior['lambda'])
if return_samples:
return (med_growth_rate, med_K, samples)
else:
return (med_growth_rate, med_K) | Fit a logistic growth model to a single growth curve.
Parameters
----------
t : array-like
Time measurements.
y : array-like
Population measurements, e.g., OD or cell count.
y_0_sigma : float, default 0.01
Standard Deviation of half normal prior for initial measurement at t=0. Default is chosen for OD measurements.
K_sigma : float, default 5
Standard Deviation of half normal prior for carrying capacity. Default is chosen for OD measurements.
sigma_sigma : float, default 0.01
Standard Deviation of half normal prior for standard deviation of the normal likelihood.
log_lambda_mu : float, default -1.5
Mean of Normal prior on the log of the growth rate.
sigma_lambda_mu : float, default 0.75
Standard Deviation of Normal prior on the log of the growth rate.
chains : int, default 4
Number chains used in Stan.
iter_sampling : int, default 1000
Number if sampling steps in Stan.
iter_warmup : int, default 1000
Number of warmup steps in Stan.
t_ppc : array-like, default []
Time points for posterior predictive check. If empty, then same time points as measurements are taken.
return_samples : boolean, default False
If True, the arviz object is returned.
Returns
-------
med_growth_rate : float
Median of inferred growth rates.
med_K : float
Median of inferred carrying capacities.
samples : arviz object
Returned if return_samples is given as True. | fit_seq/growth_models.py | logistic_growth_fit | RPGroup-PBoC/fit_seq | 1 | python | def logistic_growth_fit(t, y, y_0_sigma=0.01, K_sigma=1, sigma_sigma=0.001, log_lambda_mu=(- 1.5), log_lambda_sigma=0.75, chains=4, iter_sampling=1000, iter_warmup=1000, t_ppc=[], return_samples=False):
'\n Fit a logistic growth model to a single growth curve.\n\n Parameters\n ----------\n t : array-like\n Time measurements.\n y : array-like\n Population measurements, e.g., OD or cell count.\n y_0_sigma : float, default 0.01\n Standard Deviation of half normal prior for initial measurement at t=0. Default is chosen for OD measurements.\n K_sigma : float, default 5\n Standard Deviation of half normal prior for carrying capacity. Default is chosen for OD measurements.\n sigma_sigma : float, default 0.01\n Standard Deviation of half normal prior for standard deviation of the normal likelihood.\n log_lambda_mu : float, default -1.5\n Mean of Normal prior on the log of the growth rate.\n sigma_lambda_mu : float, default 0.75\n Standard Deviation of Normal prior on the log of the growth rate.\n chains : int, default 4\n Number chains used in Stan.\n iter_sampling : int, default 1000\n Number if sampling steps in Stan.\n iter_warmup : int, default 1000\n Number of warmup steps in Stan.\n t_ppc : array-like, default []\n Time points for posterior predictive check. If empty, then same time points as measurements are taken.\n return_samples : boolean, default False\n If True, the arviz object is returned.\n \n Returns\n -------\n med_growth_rate : float\n Median of inferred growth rates.\n med_K : float\n Median of inferred carrying capacities.\n samples : arviz object\n Returned if return_samples is given as True.\n \n '
if (t_ppc == []):
t_ppc = t
data = {'t': t, 'y': y, 'N': len(t), 'y_0_sigma': y_0_sigma, 'K_sigma': K_sigma, 'sigma_sigma': sigma_sigma, 'log_lambda_sigma': log_lambda_sigma, 'log_lambda_mu': log_lambda_mu, 'N_ppc': len(t_ppc), 't_ppc': t_ppc}
sm = cmdstanpy.CmdStanModel(stan_file=f'{standir}/logistic_growth_model.stan')
samples = sm.sample(data=data, chains=chains, iter_warmup=iter_warmup, iter_sampling=iter_sampling, show_progress=False)
samples = az.from_cmdstanpy(posterior=samples)
med_K = np.median(samples.posterior['K'])
med_growth_rate = np.median(samples.posterior['lambda'])
if return_samples:
return (med_growth_rate, med_K, samples)
else:
return (med_growth_rate, med_K) | def logistic_growth_fit(t, y, y_0_sigma=0.01, K_sigma=1, sigma_sigma=0.001, log_lambda_mu=(- 1.5), log_lambda_sigma=0.75, chains=4, iter_sampling=1000, iter_warmup=1000, t_ppc=[], return_samples=False):
'\n Fit a logistic growth model to a single growth curve.\n\n Parameters\n ----------\n t : array-like\n Time measurements.\n y : array-like\n Population measurements, e.g., OD or cell count.\n y_0_sigma : float, default 0.01\n Standard Deviation of half normal prior for initial measurement at t=0. Default is chosen for OD measurements.\n K_sigma : float, default 5\n Standard Deviation of half normal prior for carrying capacity. Default is chosen for OD measurements.\n sigma_sigma : float, default 0.01\n Standard Deviation of half normal prior for standard deviation of the normal likelihood.\n log_lambda_mu : float, default -1.5\n Mean of Normal prior on the log of the growth rate.\n sigma_lambda_mu : float, default 0.75\n Standard Deviation of Normal prior on the log of the growth rate.\n chains : int, default 4\n Number chains used in Stan.\n iter_sampling : int, default 1000\n Number if sampling steps in Stan.\n iter_warmup : int, default 1000\n Number of warmup steps in Stan.\n t_ppc : array-like, default []\n Time points for posterior predictive check. If empty, then same time points as measurements are taken.\n return_samples : boolean, default False\n If True, the arviz object is returned.\n \n Returns\n -------\n med_growth_rate : float\n Median of inferred growth rates.\n med_K : float\n Median of inferred carrying capacities.\n samples : arviz object\n Returned if return_samples is given as True.\n \n '
if (t_ppc == []):
t_ppc = t
data = {'t': t, 'y': y, 'N': len(t), 'y_0_sigma': y_0_sigma, 'K_sigma': K_sigma, 'sigma_sigma': sigma_sigma, 'log_lambda_sigma': log_lambda_sigma, 'log_lambda_mu': log_lambda_mu, 'N_ppc': len(t_ppc), 't_ppc': t_ppc}
sm = cmdstanpy.CmdStanModel(stan_file=f'{standir}/logistic_growth_model.stan')
samples = sm.sample(data=data, chains=chains, iter_warmup=iter_warmup, iter_sampling=iter_sampling, show_progress=False)
samples = az.from_cmdstanpy(posterior=samples)
med_K = np.median(samples.posterior['K'])
med_growth_rate = np.median(samples.posterior['lambda'])
if return_samples:
return (med_growth_rate, med_K, samples)
else:
return (med_growth_rate, med_K)<|docstring|>Fit a logistic growth model to a single growth curve.
Parameters
----------
t : array-like
Time measurements.
y : array-like
Population measurements, e.g., OD or cell count.
y_0_sigma : float, default 0.01
Standard Deviation of half normal prior for initial measurement at t=0. Default is chosen for OD measurements.
K_sigma : float, default 5
Standard Deviation of half normal prior for carrying capacity. Default is chosen for OD measurements.
sigma_sigma : float, default 0.01
Standard Deviation of half normal prior for standard deviation of the normal likelihood.
log_lambda_mu : float, default -1.5
Mean of Normal prior on the log of the growth rate.
sigma_lambda_mu : float, default 0.75
Standard Deviation of Normal prior on the log of the growth rate.
chains : int, default 4
Number chains used in Stan.
iter_sampling : int, default 1000
Number if sampling steps in Stan.
iter_warmup : int, default 1000
Number of warmup steps in Stan.
t_ppc : array-like, default []
Time points for posterior predictive check. If empty, then same time points as measurements are taken.
return_samples : boolean, default False
If True, the arviz object is returned.
Returns
-------
med_growth_rate : float
Median of inferred growth rates.
med_K : float
Median of inferred carrying capacities.
samples : arviz object
Returned if return_samples is given as True.<|endoftext|> |
a4f56333db5e5773888fcfbcc1cce16726169b3098f1607c3297ff62556db70f | def ClusterLocs(self, points, crlb, dist, cb=None):
'\n points: [numspots, numdims]\n crlb: [numspots, numdims]\n '
numdims = points.shape[1]
newpts = np.ascontiguousarray((points * 1), dtype=np.float32)
newcrlb = np.ascontiguousarray((crlb * 1), dtype=np.float32)
mapping = np.zeros(len(points), dtype=np.int32)
if np.isscalar(dist):
dist = (np.ones(numdims) * dist).astype(np.float32)
else:
dist = np.ascontiguousarray(dist, dtype=np.float32)
def callback_(mappingPtr, centerPtr):
mapping = ctl.as_array(mappingPtr, (len(points),))
nclust = (np.max(mapping) + 1)
centers = ctl.as_array(centerPtr, (nclust, numdims))
if (cb is not None):
r = cb(mapping, centers)
if (r is None):
return 1
return r
return 1
newcount = self._ClusterLocs(numdims, newpts, mapping, dist, len(points), self._ClusterLocsCallback(callback_))
if (newcount >= 0):
return (newpts[:newcount], newcrlb[:newcount], mapping)
raise ValueError('Something went wrong in ClusterLocs') | points: [numspots, numdims]
crlb: [numspots, numdims] | photonpy/cpp/postprocess.py | ClusterLocs | qnano/photonpy | 5 | python | def ClusterLocs(self, points, crlb, dist, cb=None):
'\n points: [numspots, numdims]\n crlb: [numspots, numdims]\n '
numdims = points.shape[1]
newpts = np.ascontiguousarray((points * 1), dtype=np.float32)
newcrlb = np.ascontiguousarray((crlb * 1), dtype=np.float32)
mapping = np.zeros(len(points), dtype=np.int32)
if np.isscalar(dist):
dist = (np.ones(numdims) * dist).astype(np.float32)
else:
dist = np.ascontiguousarray(dist, dtype=np.float32)
def callback_(mappingPtr, centerPtr):
mapping = ctl.as_array(mappingPtr, (len(points),))
nclust = (np.max(mapping) + 1)
centers = ctl.as_array(centerPtr, (nclust, numdims))
if (cb is not None):
r = cb(mapping, centers)
if (r is None):
return 1
return r
return 1
newcount = self._ClusterLocs(numdims, newpts, mapping, dist, len(points), self._ClusterLocsCallback(callback_))
if (newcount >= 0):
return (newpts[:newcount], newcrlb[:newcount], mapping)
raise ValueError('Something went wrong in ClusterLocs') | def ClusterLocs(self, points, crlb, dist, cb=None):
'\n points: [numspots, numdims]\n crlb: [numspots, numdims]\n '
numdims = points.shape[1]
newpts = np.ascontiguousarray((points * 1), dtype=np.float32)
newcrlb = np.ascontiguousarray((crlb * 1), dtype=np.float32)
mapping = np.zeros(len(points), dtype=np.int32)
if np.isscalar(dist):
dist = (np.ones(numdims) * dist).astype(np.float32)
else:
dist = np.ascontiguousarray(dist, dtype=np.float32)
def callback_(mappingPtr, centerPtr):
mapping = ctl.as_array(mappingPtr, (len(points),))
nclust = (np.max(mapping) + 1)
centers = ctl.as_array(centerPtr, (nclust, numdims))
if (cb is not None):
r = cb(mapping, centers)
if (r is None):
return 1
return r
return 1
newcount = self._ClusterLocs(numdims, newpts, mapping, dist, len(points), self._ClusterLocsCallback(callback_))
if (newcount >= 0):
return (newpts[:newcount], newcrlb[:newcount], mapping)
raise ValueError('Something went wrong in ClusterLocs')<|docstring|>points: [numspots, numdims]
crlb: [numspots, numdims]<|endoftext|> |
993c12f6abb7de6daabbbb825ee294c8e187ed9280cb41a06a17e7d68a7083df | def LinkLocalizations(self, xyI, crlbXYI, framenum, maxdist, maxIntensityDist, frameskip):
'\n linked: int [numspots], all spots that are linked will have the same index in linked array.\n '
xyI = np.ascontiguousarray(xyI, dtype=np.float32)
crlbXYI = np.ascontiguousarray(crlbXYI, dtype=np.float32)
framenum = np.ascontiguousarray(framenum, dtype=np.int32)
linked = np.zeros(len(xyI), dtype=np.int32)
framecounts = np.zeros(len(xyI), dtype=np.int32)
startframes = np.zeros(len(xyI), dtype=np.int32)
resultXYI = np.zeros(xyI.shape, dtype=np.float32)
resultCRLBXYI = np.zeros(crlbXYI.shape, dtype=np.float32)
assert (crlbXYI.shape[1] == 3)
assert (xyI.shape[1] == 3)
assert (len(xyI) == len(crlbXYI))
nlinked = self._LinkLocalizations(len(xyI), framenum, xyI, crlbXYI, maxdist, maxIntensityDist, frameskip, linked, startframes, framecounts, resultXYI, resultCRLBXYI)
startframes = startframes[:nlinked]
framecounts = framecounts[:nlinked]
resultXYI = resultXYI[:nlinked]
resultCRLBXYI = resultCRLBXYI[:nlinked]
return (linked, framecounts, startframes, resultXYI, resultCRLBXYI) | linked: int [numspots], all spots that are linked will have the same index in linked array. | photonpy/cpp/postprocess.py | LinkLocalizations | qnano/photonpy | 5 | python | def LinkLocalizations(self, xyI, crlbXYI, framenum, maxdist, maxIntensityDist, frameskip):
'\n \n '
xyI = np.ascontiguousarray(xyI, dtype=np.float32)
crlbXYI = np.ascontiguousarray(crlbXYI, dtype=np.float32)
framenum = np.ascontiguousarray(framenum, dtype=np.int32)
linked = np.zeros(len(xyI), dtype=np.int32)
framecounts = np.zeros(len(xyI), dtype=np.int32)
startframes = np.zeros(len(xyI), dtype=np.int32)
resultXYI = np.zeros(xyI.shape, dtype=np.float32)
resultCRLBXYI = np.zeros(crlbXYI.shape, dtype=np.float32)
assert (crlbXYI.shape[1] == 3)
assert (xyI.shape[1] == 3)
assert (len(xyI) == len(crlbXYI))
nlinked = self._LinkLocalizations(len(xyI), framenum, xyI, crlbXYI, maxdist, maxIntensityDist, frameskip, linked, startframes, framecounts, resultXYI, resultCRLBXYI)
startframes = startframes[:nlinked]
framecounts = framecounts[:nlinked]
resultXYI = resultXYI[:nlinked]
resultCRLBXYI = resultCRLBXYI[:nlinked]
return (linked, framecounts, startframes, resultXYI, resultCRLBXYI) | def LinkLocalizations(self, xyI, crlbXYI, framenum, maxdist, maxIntensityDist, frameskip):
'\n \n '
xyI = np.ascontiguousarray(xyI, dtype=np.float32)
crlbXYI = np.ascontiguousarray(crlbXYI, dtype=np.float32)
framenum = np.ascontiguousarray(framenum, dtype=np.int32)
linked = np.zeros(len(xyI), dtype=np.int32)
framecounts = np.zeros(len(xyI), dtype=np.int32)
startframes = np.zeros(len(xyI), dtype=np.int32)
resultXYI = np.zeros(xyI.shape, dtype=np.float32)
resultCRLBXYI = np.zeros(crlbXYI.shape, dtype=np.float32)
assert (crlbXYI.shape[1] == 3)
assert (xyI.shape[1] == 3)
assert (len(xyI) == len(crlbXYI))
nlinked = self._LinkLocalizations(len(xyI), framenum, xyI, crlbXYI, maxdist, maxIntensityDist, frameskip, linked, startframes, framecounts, resultXYI, resultCRLBXYI)
startframes = startframes[:nlinked]
framecounts = framecounts[:nlinked]
resultXYI = resultXYI[:nlinked]
resultCRLBXYI = resultCRLBXYI[:nlinked]
return (linked, framecounts, startframes, resultXYI, resultCRLBXYI)<|docstring|>linked: int [numspots], all spots that are linked will have the same index in linked array.<|endoftext|> |
22e4bebcba56f31a04f0fbc4d87f6005ac336fea4a3ce5a20d5fb9c589b527c7 | def __call__(self, adversary, **kwargs):
'\n Generate the adversarial sample.\n\n Args:\n adversary(object): The adversary object.\n **kwargs: Other named arguments.\n '
self._preprocess(adversary)
return self._apply(adversary, **kwargs) | Generate the adversarial sample.
Args:
adversary(object): The adversary object.
**kwargs: Other named arguments. | adversarialbox/attacks/base.py | __call__ | StijnMatsHendriks/adversarial_attack_demo | 819 | python | def __call__(self, adversary, **kwargs):
'\n Generate the adversarial sample.\n\n Args:\n adversary(object): The adversary object.\n **kwargs: Other named arguments.\n '
self._preprocess(adversary)
return self._apply(adversary, **kwargs) | def __call__(self, adversary, **kwargs):
'\n Generate the adversarial sample.\n\n Args:\n adversary(object): The adversary object.\n **kwargs: Other named arguments.\n '
self._preprocess(adversary)
return self._apply(adversary, **kwargs)<|docstring|>Generate the adversarial sample.
Args:
adversary(object): The adversary object.
**kwargs: Other named arguments.<|endoftext|> |
62081379271a4876cb4255ba1341c68c497d7fc997322563c3d443ac0e5575f3 | @abstractmethod
def _apply(self, adversary, **kwargs):
'\n Search an adversarial example.\n\n Args:\n adversary(object): The adversary object.\n **kwargs: Other named arguments.\n '
raise NotImplementedError | Search an adversarial example.
Args:
adversary(object): The adversary object.
**kwargs: Other named arguments. | adversarialbox/attacks/base.py | _apply | StijnMatsHendriks/adversarial_attack_demo | 819 | python | @abstractmethod
def _apply(self, adversary, **kwargs):
'\n Search an adversarial example.\n\n Args:\n adversary(object): The adversary object.\n **kwargs: Other named arguments.\n '
raise NotImplementedError | @abstractmethod
def _apply(self, adversary, **kwargs):
'\n Search an adversarial example.\n\n Args:\n adversary(object): The adversary object.\n **kwargs: Other named arguments.\n '
raise NotImplementedError<|docstring|>Search an adversarial example.
Args:
adversary(object): The adversary object.
**kwargs: Other named arguments.<|endoftext|> |
453b7ddfdc7eef9947d34b97bbc5f0f051243c93527159ef860b0ce58968c5d9 | def _preprocess(self, adversary):
'\n Preprocess the adversary object.\n\n :param adversary: adversary\n :return: None\n '
if (adversary.original_label is None):
adversary.original_label = np.argmax(self.model.predict(adversary.original))
if (adversary.is_targeted_attack and (adversary.target_label is None)):
if (adversary.target is None):
raise ValueError('When adversary.is_targeted_attack is true, adversary.target_label or adversary.target must be set.')
else:
adversary.target_label = np.argmax(self.model.predict(adversary.target))
logging.info('adversary:\n original_label: {}\n target_label: {}\n is_targeted_attack: {}'.format(adversary.original_label, adversary.target_label, adversary.is_targeted_attack)) | Preprocess the adversary object.
:param adversary: adversary
:return: None | adversarialbox/attacks/base.py | _preprocess | StijnMatsHendriks/adversarial_attack_demo | 819 | python | def _preprocess(self, adversary):
'\n Preprocess the adversary object.\n\n :param adversary: adversary\n :return: None\n '
if (adversary.original_label is None):
adversary.original_label = np.argmax(self.model.predict(adversary.original))
if (adversary.is_targeted_attack and (adversary.target_label is None)):
if (adversary.target is None):
raise ValueError('When adversary.is_targeted_attack is true, adversary.target_label or adversary.target must be set.')
else:
adversary.target_label = np.argmax(self.model.predict(adversary.target))
logging.info('adversary:\n original_label: {}\n target_label: {}\n is_targeted_attack: {}'.format(adversary.original_label, adversary.target_label, adversary.is_targeted_attack)) | def _preprocess(self, adversary):
'\n Preprocess the adversary object.\n\n :param adversary: adversary\n :return: None\n '
if (adversary.original_label is None):
adversary.original_label = np.argmax(self.model.predict(adversary.original))
if (adversary.is_targeted_attack and (adversary.target_label is None)):
if (adversary.target is None):
raise ValueError('When adversary.is_targeted_attack is true, adversary.target_label or adversary.target must be set.')
else:
adversary.target_label = np.argmax(self.model.predict(adversary.target))
logging.info('adversary:\n original_label: {}\n target_label: {}\n is_targeted_attack: {}'.format(adversary.original_label, adversary.target_label, adversary.is_targeted_attack))<|docstring|>Preprocess the adversary object.
:param adversary: adversary
:return: None<|endoftext|> |
a6637d67677e9285dbbbd72cad41f1e842b8137558a4978200160da55c8f9593 | def quick_clip(file_path, start_time, end_time, output_path=''):
'\n Clips file between start_time and end_time. Copies all stream in file between time codes.\n :param file_path: string, path of file to clip\n :param start_time: string, timecode of when to start clip\n :param end_time: string, timecode of when to stop clip\n :param output_path: string, optional, path of output file\n :return:\n '
media = MediaObject(file_path)
cvt = MediaConverter(media, output_path)
for videoIndex in media.videoStreams:
cvt.createVideoStream('copy', 'copy', 0, videoStream=videoIndex)
for audioIndex in media.audioStreams:
cvt.createAudioStream(audioEncoder='copy', audioStream=audioIndex)
cvt.createSubtitleStreams(media.subtitleStreams)
cvt.clip(start_time, end_time) | Clips file between start_time and end_time. Copies all stream in file between time codes.
:param file_path: string, path of file to clip
:param start_time: string, timecode of when to start clip
:param end_time: string, timecode of when to stop clip
:param output_path: string, optional, path of output file
:return: | tympeg/tools.py | quick_clip | taishengy/py-mpeg | 1 | python | def quick_clip(file_path, start_time, end_time, output_path=):
'\n Clips file between start_time and end_time. Copies all stream in file between time codes.\n :param file_path: string, path of file to clip\n :param start_time: string, timecode of when to start clip\n :param end_time: string, timecode of when to stop clip\n :param output_path: string, optional, path of output file\n :return:\n '
media = MediaObject(file_path)
cvt = MediaConverter(media, output_path)
for videoIndex in media.videoStreams:
cvt.createVideoStream('copy', 'copy', 0, videoStream=videoIndex)
for audioIndex in media.audioStreams:
cvt.createAudioStream(audioEncoder='copy', audioStream=audioIndex)
cvt.createSubtitleStreams(media.subtitleStreams)
cvt.clip(start_time, end_time) | def quick_clip(file_path, start_time, end_time, output_path=):
'\n Clips file between start_time and end_time. Copies all stream in file between time codes.\n :param file_path: string, path of file to clip\n :param start_time: string, timecode of when to start clip\n :param end_time: string, timecode of when to stop clip\n :param output_path: string, optional, path of output file\n :return:\n '
media = MediaObject(file_path)
cvt = MediaConverter(media, output_path)
for videoIndex in media.videoStreams:
cvt.createVideoStream('copy', 'copy', 0, videoStream=videoIndex)
for audioIndex in media.audioStreams:
cvt.createAudioStream(audioEncoder='copy', audioStream=audioIndex)
cvt.createSubtitleStreams(media.subtitleStreams)
cvt.clip(start_time, end_time)<|docstring|>Clips file between start_time and end_time. Copies all stream in file between time codes.
:param file_path: string, path of file to clip
:param start_time: string, timecode of when to start clip
:param end_time: string, timecode of when to stop clip
:param output_path: string, optional, path of output file
:return:<|endoftext|> |
a3ce4c04b1774463f45176c40c4738c39eda2332bc50ff2a9269de4159016410 | def convert_files_in_dir_to_vcodec(input_folder, video_codec, video_encoder, rate_control_method, video_rate, speed, audio_encoder, audio_bitrate, channels):
" Searches directory for videos NOT encoded with video_codec, moves them to a separate file and encodes them\n to the selected codec, saving the encodes in the original directory. Retains all streams.\n\n :param input_folder: string, the folder to be searched and converted\n :param video_codec: string, video Codec to search for 'hevc' for h265 video\n :param video_encoder: string, video encoder ffmpeg should use ('x265', 'x264', 'vp8', 'vp9')\n :param rate_control_method: string, rate control method ('crf', 'cbr', 'vbr)\n :param video_rate: int, rate of video. Either quality factor or bitrate\n :param speed: string, speed of x26X family encoders\n :param audio_encoder: string, audio encoder ('opus', 'aac', 'fdk', etc...)\n :param audio_bitrate: int, bitrate of audio\n :param channels: string, channel layout of audio ('mono', 'stereo')\n :return:\n "
original_files_dir = path.join(input_folder, 'original_files/')
sorting_media_array = makeMediaObjectsInDirectory(input_folder)
if (len(sorting_media_array) < 1):
return
nothing_to_convert = True
for media in sorting_media_array:
if (media.videoCodec != str(video_codec)):
nothing_to_convert = False
if (not path.isdir(original_files_dir)):
mkdir(original_files_dir)
rename(path.join(input_folder, str(media.fileName)), path.join(original_files_dir, str(media.fileName)))
if nothing_to_convert:
return
converting_media_array = makeMediaObjectsInDirectory(original_files_dir)
total_files = str(len(converting_media_array))
print((('\n\nConverting ' + total_files) + ' files...\n\n'))
count = 0
input_file_size = 0
output_file_size = 0
time_start = time.time()
total_input_size = (get_dir_size(original_files_dir) / 1000000)
for media in converting_media_array:
(name, ext) = path.splitext(media.fileName)
output_file_path = path.join(input_folder, (name + '.mkv'))
cvt = MediaConverter(media, output_file_path)
cvt.createVideoStream(video_encoder, rate_control_method, video_rate, speed)
for audioStream in range(0, len(media.audioStreams)):
cvt.createAudioStream(media.audioStreams[audioStream], audio_encoder, audio_bitrate, audioChannels=channels)
cvt.createSubtitleStreams(media.subtitleStreams)
count += 1
print((((('Converting file ' + str(count)) + ' of ') + total_files) + ':'))
print('\t{0} ({1:,.2f} MB)\n'.format(media.filePath, (path.getsize((original_files_dir + media.fileName)) / 1000000)))
start = time.time()
cvt.convert()
end = time.time()
output_file_size += (path.getsize(output_file_path) / 1000000)
input_file_size += (path.getsize((original_files_dir + media.fileName)) / 1000000)
minutes = ((end - start) / 60)
input_rate = ((path.getsize((original_files_dir + media.fileName)) / 1000000) / minutes)
avg_rate = (input_file_size / ((end - time_start) / 60))
(eta_hours, eta_mins) = divmod(round(((total_input_size - input_file_size) / avg_rate), 0), 60)
print('\nCompleted file {0} of {1} in {2:,.2f} min'.format(count, total_files, minutes))
print('Completed file at input rate of: {0:,.2f} MB/min'.format(input_rate))
print('Average rate of: {0:,.2f} MB/min'.format(avg_rate))
print('ETA: {0}:{1}'.format(int(eta_hours), int(eta_mins)))
print('Total input converted: {0:,.2f} MB of {1:,.2f} MB'.format(input_file_size, total_input_size))
print('Total output size: {0:,.2f} MB'.format(output_file_size))
print('Output/Input ratio: {0:,.3f}'.format((output_file_size / input_file_size)))
print('\n\n')
time_end = time.time()
total_seconds = (time_end - time_start)
(m, s) = divmod(total_seconds, 60)
if (m == 0):
minutes = 1
else:
minutes = m
(h, m) = divmod(m, 60)
print(('Total operation completed in: %d:%02d:%02d' % (h, m, s)))
print((((('Total size of files converted: ' + str(input_file_size)) + ' MB => ') + str(output_file_size)) + ' MB'))
print((('Average rate of input converted: ' + str((input_file_size / minutes))) + ' MB/min')) | Searches directory for videos NOT encoded with video_codec, moves them to a separate file and encodes them
to the selected codec, saving the encodes in the original directory. Retains all streams.
:param input_folder: string, the folder to be searched and converted
:param video_codec: string, video Codec to search for 'hevc' for h265 video
:param video_encoder: string, video encoder ffmpeg should use ('x265', 'x264', 'vp8', 'vp9')
:param rate_control_method: string, rate control method ('crf', 'cbr', 'vbr)
:param video_rate: int, rate of video. Either quality factor or bitrate
:param speed: string, speed of x26X family encoders
:param audio_encoder: string, audio encoder ('opus', 'aac', 'fdk', etc...)
:param audio_bitrate: int, bitrate of audio
:param channels: string, channel layout of audio ('mono', 'stereo')
:return: | tympeg/tools.py | convert_files_in_dir_to_vcodec | taishengy/py-mpeg | 1 | python | def convert_files_in_dir_to_vcodec(input_folder, video_codec, video_encoder, rate_control_method, video_rate, speed, audio_encoder, audio_bitrate, channels):
" Searches directory for videos NOT encoded with video_codec, moves them to a separate file and encodes them\n to the selected codec, saving the encodes in the original directory. Retains all streams.\n\n :param input_folder: string, the folder to be searched and converted\n :param video_codec: string, video Codec to search for 'hevc' for h265 video\n :param video_encoder: string, video encoder ffmpeg should use ('x265', 'x264', 'vp8', 'vp9')\n :param rate_control_method: string, rate control method ('crf', 'cbr', 'vbr)\n :param video_rate: int, rate of video. Either quality factor or bitrate\n :param speed: string, speed of x26X family encoders\n :param audio_encoder: string, audio encoder ('opus', 'aac', 'fdk', etc...)\n :param audio_bitrate: int, bitrate of audio\n :param channels: string, channel layout of audio ('mono', 'stereo')\n :return:\n "
original_files_dir = path.join(input_folder, 'original_files/')
sorting_media_array = makeMediaObjectsInDirectory(input_folder)
if (len(sorting_media_array) < 1):
return
nothing_to_convert = True
for media in sorting_media_array:
if (media.videoCodec != str(video_codec)):
nothing_to_convert = False
if (not path.isdir(original_files_dir)):
mkdir(original_files_dir)
rename(path.join(input_folder, str(media.fileName)), path.join(original_files_dir, str(media.fileName)))
if nothing_to_convert:
return
converting_media_array = makeMediaObjectsInDirectory(original_files_dir)
total_files = str(len(converting_media_array))
print((('\n\nConverting ' + total_files) + ' files...\n\n'))
count = 0
input_file_size = 0
output_file_size = 0
time_start = time.time()
total_input_size = (get_dir_size(original_files_dir) / 1000000)
for media in converting_media_array:
(name, ext) = path.splitext(media.fileName)
output_file_path = path.join(input_folder, (name + '.mkv'))
cvt = MediaConverter(media, output_file_path)
cvt.createVideoStream(video_encoder, rate_control_method, video_rate, speed)
for audioStream in range(0, len(media.audioStreams)):
cvt.createAudioStream(media.audioStreams[audioStream], audio_encoder, audio_bitrate, audioChannels=channels)
cvt.createSubtitleStreams(media.subtitleStreams)
count += 1
print((((('Converting file ' + str(count)) + ' of ') + total_files) + ':'))
print('\t{0} ({1:,.2f} MB)\n'.format(media.filePath, (path.getsize((original_files_dir + media.fileName)) / 1000000)))
start = time.time()
cvt.convert()
end = time.time()
output_file_size += (path.getsize(output_file_path) / 1000000)
input_file_size += (path.getsize((original_files_dir + media.fileName)) / 1000000)
minutes = ((end - start) / 60)
input_rate = ((path.getsize((original_files_dir + media.fileName)) / 1000000) / minutes)
avg_rate = (input_file_size / ((end - time_start) / 60))
(eta_hours, eta_mins) = divmod(round(((total_input_size - input_file_size) / avg_rate), 0), 60)
print('\nCompleted file {0} of {1} in {2:,.2f} min'.format(count, total_files, minutes))
print('Completed file at input rate of: {0:,.2f} MB/min'.format(input_rate))
print('Average rate of: {0:,.2f} MB/min'.format(avg_rate))
print('ETA: {0}:{1}'.format(int(eta_hours), int(eta_mins)))
print('Total input converted: {0:,.2f} MB of {1:,.2f} MB'.format(input_file_size, total_input_size))
print('Total output size: {0:,.2f} MB'.format(output_file_size))
print('Output/Input ratio: {0:,.3f}'.format((output_file_size / input_file_size)))
print('\n\n')
time_end = time.time()
total_seconds = (time_end - time_start)
(m, s) = divmod(total_seconds, 60)
if (m == 0):
minutes = 1
else:
minutes = m
(h, m) = divmod(m, 60)
print(('Total operation completed in: %d:%02d:%02d' % (h, m, s)))
print((((('Total size of files converted: ' + str(input_file_size)) + ' MB => ') + str(output_file_size)) + ' MB'))
print((('Average rate of input converted: ' + str((input_file_size / minutes))) + ' MB/min')) | def convert_files_in_dir_to_vcodec(input_folder, video_codec, video_encoder, rate_control_method, video_rate, speed, audio_encoder, audio_bitrate, channels):
" Searches directory for videos NOT encoded with video_codec, moves them to a separate file and encodes them\n to the selected codec, saving the encodes in the original directory. Retains all streams.\n\n :param input_folder: string, the folder to be searched and converted\n :param video_codec: string, video Codec to search for 'hevc' for h265 video\n :param video_encoder: string, video encoder ffmpeg should use ('x265', 'x264', 'vp8', 'vp9')\n :param rate_control_method: string, rate control method ('crf', 'cbr', 'vbr)\n :param video_rate: int, rate of video. Either quality factor or bitrate\n :param speed: string, speed of x26X family encoders\n :param audio_encoder: string, audio encoder ('opus', 'aac', 'fdk', etc...)\n :param audio_bitrate: int, bitrate of audio\n :param channels: string, channel layout of audio ('mono', 'stereo')\n :return:\n "
original_files_dir = path.join(input_folder, 'original_files/')
sorting_media_array = makeMediaObjectsInDirectory(input_folder)
if (len(sorting_media_array) < 1):
return
nothing_to_convert = True
for media in sorting_media_array:
if (media.videoCodec != str(video_codec)):
nothing_to_convert = False
if (not path.isdir(original_files_dir)):
mkdir(original_files_dir)
rename(path.join(input_folder, str(media.fileName)), path.join(original_files_dir, str(media.fileName)))
if nothing_to_convert:
return
converting_media_array = makeMediaObjectsInDirectory(original_files_dir)
total_files = str(len(converting_media_array))
print((('\n\nConverting ' + total_files) + ' files...\n\n'))
count = 0
input_file_size = 0
output_file_size = 0
time_start = time.time()
total_input_size = (get_dir_size(original_files_dir) / 1000000)
for media in converting_media_array:
(name, ext) = path.splitext(media.fileName)
output_file_path = path.join(input_folder, (name + '.mkv'))
cvt = MediaConverter(media, output_file_path)
cvt.createVideoStream(video_encoder, rate_control_method, video_rate, speed)
for audioStream in range(0, len(media.audioStreams)):
cvt.createAudioStream(media.audioStreams[audioStream], audio_encoder, audio_bitrate, audioChannels=channels)
cvt.createSubtitleStreams(media.subtitleStreams)
count += 1
print((((('Converting file ' + str(count)) + ' of ') + total_files) + ':'))
print('\t{0} ({1:,.2f} MB)\n'.format(media.filePath, (path.getsize((original_files_dir + media.fileName)) / 1000000)))
start = time.time()
cvt.convert()
end = time.time()
output_file_size += (path.getsize(output_file_path) / 1000000)
input_file_size += (path.getsize((original_files_dir + media.fileName)) / 1000000)
minutes = ((end - start) / 60)
input_rate = ((path.getsize((original_files_dir + media.fileName)) / 1000000) / minutes)
avg_rate = (input_file_size / ((end - time_start) / 60))
(eta_hours, eta_mins) = divmod(round(((total_input_size - input_file_size) / avg_rate), 0), 60)
print('\nCompleted file {0} of {1} in {2:,.2f} min'.format(count, total_files, minutes))
print('Completed file at input rate of: {0:,.2f} MB/min'.format(input_rate))
print('Average rate of: {0:,.2f} MB/min'.format(avg_rate))
print('ETA: {0}:{1}'.format(int(eta_hours), int(eta_mins)))
print('Total input converted: {0:,.2f} MB of {1:,.2f} MB'.format(input_file_size, total_input_size))
print('Total output size: {0:,.2f} MB'.format(output_file_size))
print('Output/Input ratio: {0:,.3f}'.format((output_file_size / input_file_size)))
print('\n\n')
time_end = time.time()
total_seconds = (time_end - time_start)
(m, s) = divmod(total_seconds, 60)
if (m == 0):
minutes = 1
else:
minutes = m
(h, m) = divmod(m, 60)
print(('Total operation completed in: %d:%02d:%02d' % (h, m, s)))
print((((('Total size of files converted: ' + str(input_file_size)) + ' MB => ') + str(output_file_size)) + ' MB'))
print((('Average rate of input converted: ' + str((input_file_size / minutes))) + ' MB/min'))<|docstring|>Searches directory for videos NOT encoded with video_codec, moves them to a separate file and encodes them
to the selected codec, saving the encodes in the original directory. Retains all streams.
:param input_folder: string, the folder to be searched and converted
:param video_codec: string, video Codec to search for 'hevc' for h265 video
:param video_encoder: string, video encoder ffmpeg should use ('x265', 'x264', 'vp8', 'vp9')
:param rate_control_method: string, rate control method ('crf', 'cbr', 'vbr)
:param video_rate: int, rate of video. Either quality factor or bitrate
:param speed: string, speed of x26X family encoders
:param audio_encoder: string, audio encoder ('opus', 'aac', 'fdk', etc...)
:param audio_bitrate: int, bitrate of audio
:param channels: string, channel layout of audio ('mono', 'stereo')
:return:<|endoftext|> |
1883c096fbe657fe0b25329d77e1db8854f6b229caee893592610429d7aa2058 | def save_bits_per_pixel_dist(parent_dir, output_file_path, exclude_codec):
" Calculates the bits/pixel of files in parent_dir and outputs data as a csv. Useful for visualizing\n bits/pixel in excel/calc to define intervals in convert_sub_dirs.py or more introspection.\n\n :param parent_dir: string, Path of directory that contains media files to analyze\n :param output_file_path: string, path of where you want the .csv file\n :param exclude_codec: string, codec of files you want to exclude 'hevc' for 265 and 'avc1' for 264\n :return:\n "
directories = sorted(list_dirs(parent_dir))
with open(output_file_path, 'w', encoding='utf8') as file:
file.write('{},{} bits,{} bits,{} bytes,{}\n'.format('bits/pixel', 'video bitrate', 'audio bitrate', 'file size', 'file path'))
for dirs in directories:
media_array = sorted(makeMediaObjectsInDirectory(dirs), key=(lambda media: media.fileName))
for media in media_array:
if (media.videoCodec != exclude_codec):
bits_pixel = calc_bits_per_pixel(media)
video_bitrate = media.video_bitrate
file.write('{},{},{},{},{}\n'.format(bits_pixel, video_bitrate, media.audio_bitrate, media.file_size, media.filePath))
file.close() | Calculates the bits/pixel of files in parent_dir and outputs data as a csv. Useful for visualizing
bits/pixel in excel/calc to define intervals in convert_sub_dirs.py or more introspection.
:param parent_dir: string, Path of directory that contains media files to analyze
:param output_file_path: string, path of where you want the .csv file
:param exclude_codec: string, codec of files you want to exclude 'hevc' for 265 and 'avc1' for 264
:return: | tympeg/tools.py | save_bits_per_pixel_dist | taishengy/py-mpeg | 1 | python | def save_bits_per_pixel_dist(parent_dir, output_file_path, exclude_codec):
" Calculates the bits/pixel of files in parent_dir and outputs data as a csv. Useful for visualizing\n bits/pixel in excel/calc to define intervals in convert_sub_dirs.py or more introspection.\n\n :param parent_dir: string, Path of directory that contains media files to analyze\n :param output_file_path: string, path of where you want the .csv file\n :param exclude_codec: string, codec of files you want to exclude 'hevc' for 265 and 'avc1' for 264\n :return:\n "
directories = sorted(list_dirs(parent_dir))
with open(output_file_path, 'w', encoding='utf8') as file:
file.write('{},{} bits,{} bits,{} bytes,{}\n'.format('bits/pixel', 'video bitrate', 'audio bitrate', 'file size', 'file path'))
for dirs in directories:
media_array = sorted(makeMediaObjectsInDirectory(dirs), key=(lambda media: media.fileName))
for media in media_array:
if (media.videoCodec != exclude_codec):
bits_pixel = calc_bits_per_pixel(media)
video_bitrate = media.video_bitrate
file.write('{},{},{},{},{}\n'.format(bits_pixel, video_bitrate, media.audio_bitrate, media.file_size, media.filePath))
file.close() | def save_bits_per_pixel_dist(parent_dir, output_file_path, exclude_codec):
" Calculates the bits/pixel of files in parent_dir and outputs data as a csv. Useful for visualizing\n bits/pixel in excel/calc to define intervals in convert_sub_dirs.py or more introspection.\n\n :param parent_dir: string, Path of directory that contains media files to analyze\n :param output_file_path: string, path of where you want the .csv file\n :param exclude_codec: string, codec of files you want to exclude 'hevc' for 265 and 'avc1' for 264\n :return:\n "
directories = sorted(list_dirs(parent_dir))
with open(output_file_path, 'w', encoding='utf8') as file:
file.write('{},{} bits,{} bits,{} bytes,{}\n'.format('bits/pixel', 'video bitrate', 'audio bitrate', 'file size', 'file path'))
for dirs in directories:
media_array = sorted(makeMediaObjectsInDirectory(dirs), key=(lambda media: media.fileName))
for media in media_array:
if (media.videoCodec != exclude_codec):
bits_pixel = calc_bits_per_pixel(media)
video_bitrate = media.video_bitrate
file.write('{},{},{},{},{}\n'.format(bits_pixel, video_bitrate, media.audio_bitrate, media.file_size, media.filePath))
file.close()<|docstring|>Calculates the bits/pixel of files in parent_dir and outputs data as a csv. Useful for visualizing
bits/pixel in excel/calc to define intervals in convert_sub_dirs.py or more introspection.
:param parent_dir: string, Path of directory that contains media files to analyze
:param output_file_path: string, path of where you want the .csv file
:param exclude_codec: string, codec of files you want to exclude 'hevc' for 265 and 'avc1' for 264
:return:<|endoftext|> |
4fd1480cac7101bbf0c5c20785e4bf4d415b2234dd5adc1573298b4dff098b09 | @pytest.mark.asyncio
async def test_no_scopes():
'The credential should raise ValueError when get_token is called with no scopes'
credential = AuthorizationCodeCredential('tenant-id', 'client-id', 'auth-code', 'http://localhost')
with pytest.raises(ValueError):
(await credential.get_token()) | The credential should raise ValueError when get_token is called with no scopes | sdk/identity/azure-identity/tests/test_auth_code_async.py | test_no_scopes | jlautman/azure-sdk-for-python | 1 | python | @pytest.mark.asyncio
async def test_no_scopes():
credential = AuthorizationCodeCredential('tenant-id', 'client-id', 'auth-code', 'http://localhost')
with pytest.raises(ValueError):
(await credential.get_token()) | @pytest.mark.asyncio
async def test_no_scopes():
credential = AuthorizationCodeCredential('tenant-id', 'client-id', 'auth-code', 'http://localhost')
with pytest.raises(ValueError):
(await credential.get_token())<|docstring|>The credential should raise ValueError when get_token is called with no scopes<|endoftext|> |
998e7a43fe16ab059f231185c72de07e5477a4c3e88aa12d4bd779f917f1d737 | def _shorten_mods(modifier_list):
'replace modifier names with their first letters and in a fixed sequence\n '
result = ''
for (values, outval) in ((('Ctrl', 'CTRL', 'CLTR'), 'C'), (('Alt', 'ALT'), 'A'), (('Shift', 'SHIFT'), 'S'), (('WinKey',), 'W')):
for test in values:
if (test in modifier_list):
result += outval
break
return result | replace modifier names with their first letters and in a fixed sequence | plugin_examples/dckeys.py | _shorten_mods | albertvisser/hotkeys | 1 | python | def _shorten_mods(modifier_list):
'\n '
result =
for (values, outval) in ((('Ctrl', 'CTRL', 'CLTR'), 'C'), (('Alt', 'ALT'), 'A'), (('Shift', 'SHIFT'), 'S'), (('WinKey',), 'W')):
for test in values:
if (test in modifier_list):
result += outval
break
return result | def _shorten_mods(modifier_list):
'\n '
result =
for (values, outval) in ((('Ctrl', 'CTRL', 'CLTR'), 'C'), (('Alt', 'ALT'), 'A'), (('Shift', 'SHIFT'), 'S'), (('WinKey',), 'W')):
for test in values:
if (test in modifier_list):
result += outval
break
return result<|docstring|>replace modifier names with their first letters and in a fixed sequence<|endoftext|> |
7d71258d4adb0f67742bbe107824372fb96718942f396d61b04cc3bef6ab3b40 | def _translate_keynames(inp):
'translate cursor keys as shown in html to notation in xml'
convert = {'↑': 'Up', '↓': 'Down', '←': 'Left', '→': 'Right', 'Delete': 'Del', 'С': 'C', 'Pgdown': 'PgDn', 'Pgup': 'PgUp', 'Num *': 'Num*', 'Num +': 'Num+', 'Num -': 'Num-'}
try:
return convert[inp.strip()]
except KeyError:
return inp.strip() | translate cursor keys as shown in html to notation in xml | plugin_examples/dckeys.py | _translate_keynames | albertvisser/hotkeys | 1 | python | def _translate_keynames(inp):
convert = {'↑': 'Up', '↓': 'Down', '←': 'Left', '→': 'Right', 'Delete': 'Del', 'С': 'C', 'Pgdown': 'PgDn', 'Pgup': 'PgUp', 'Num *': 'Num*', 'Num +': 'Num+', 'Num -': 'Num-'}
try:
return convert[inp.strip()]
except KeyError:
return inp.strip() | def _translate_keynames(inp):
convert = {'↑': 'Up', '↓': 'Down', '←': 'Left', '→': 'Right', 'Delete': 'Del', 'С': 'C', 'Pgdown': 'PgDn', 'Pgup': 'PgUp', 'Num *': 'Num*', 'Num +': 'Num+', 'Num -': 'Num-'}
try:
return convert[inp.strip()]
except KeyError:
return inp.strip()<|docstring|>translate cursor keys as shown in html to notation in xml<|endoftext|> |
c800ee541c49c6271f8e433065654df9ebd83612e294b67b14bda9cbd07a26b2 | def parse_keytext(text):
'leid keynamen en modifiers op uit tekst\n\n geeft een list terug van keynaam - modifier-list paren\n voorziet nog niet in , key al dan niet met modifiers\n '
retval = []
if text.startswith('Esc, Q ('):
return [('Esc', ''), ('Q', '')]
shortcuts = text.split(', ')
for sc in shortcuts:
test = sc.split('+')
keyname = test[(- 1)].title()
modifiers = test[:(- 1)]
if (keyname == ''):
keyname = '+'
if (modifiers[(- 1)] == ''):
modifiers.pop()
elif (modifiers[(- 1)] == 'Num '):
keyname = (modifiers.pop() + keyname)
retval.append((keyname, _shorten_mods(modifiers)))
return retval | leid keynamen en modifiers op uit tekst
geeft een list terug van keynaam - modifier-list paren
voorziet nog niet in , key al dan niet met modifiers | plugin_examples/dckeys.py | parse_keytext | albertvisser/hotkeys | 1 | python | def parse_keytext(text):
'leid keynamen en modifiers op uit tekst\n\n geeft een list terug van keynaam - modifier-list paren\n voorziet nog niet in , key al dan niet met modifiers\n '
retval = []
if text.startswith('Esc, Q ('):
return [('Esc', ), ('Q', )]
shortcuts = text.split(', ')
for sc in shortcuts:
test = sc.split('+')
keyname = test[(- 1)].title()
modifiers = test[:(- 1)]
if (keyname == ):
keyname = '+'
if (modifiers[(- 1)] == ):
modifiers.pop()
elif (modifiers[(- 1)] == 'Num '):
keyname = (modifiers.pop() + keyname)
retval.append((keyname, _shorten_mods(modifiers)))
return retval | def parse_keytext(text):
'leid keynamen en modifiers op uit tekst\n\n geeft een list terug van keynaam - modifier-list paren\n voorziet nog niet in , key al dan niet met modifiers\n '
retval = []
if text.startswith('Esc, Q ('):
return [('Esc', ), ('Q', )]
shortcuts = text.split(', ')
for sc in shortcuts:
test = sc.split('+')
keyname = test[(- 1)].title()
modifiers = test[:(- 1)]
if (keyname == ):
keyname = '+'
if (modifiers[(- 1)] == ):
modifiers.pop()
elif (modifiers[(- 1)] == 'Num '):
keyname = (modifiers.pop() + keyname)
retval.append((keyname, _shorten_mods(modifiers)))
return retval<|docstring|>leid keynamen en modifiers op uit tekst
geeft een list terug van keynaam - modifier-list paren
voorziet nog niet in , key al dan niet met modifiers<|endoftext|> |
5ff7fd897a87cf2cedc6e71418ad89b85d25807cd8cbacc97f3382d418d60146 | def analyze_keydefs(root):
'build the data for a specific category (corresponds with a section in the\n html)\n\n input:\n met BeautifulSoup geparsede node data\n resultaat:\n gegevens om toe te voegen aan de output van get_cmddict\n '
(cmddict, cmdparms) = ({}, {})
dflt_assign = collections.defaultdict(set)
command_list = []
for tbl in root.children:
if ((not tbl.name) or (tbl.name != 'table')):
continue
for row in tbl.children:
if ((not row.name) or (row.name != 'tr')):
continue
if (('class' in row.attrs) and (row['class'] in ('rowcategorytitle', 'rowsubtitle'))):
continue
(command, defkey, params, desctable) = ('', '', [], [])
for col in row.children:
if ((not col.name) or (col.name != 'td')):
continue
if ('class' not in col.attrs):
continue
if ('cmdcell' in col['class']):
for item in col.children:
if ((not item.name) or (item.name != 'div')):
continue
if ('cmdname' in item['class']):
command = item.a.text.strip('.')
elif ('longcmdname' in item['class']):
command = item.a.text.strip('.')
elif ('shrtctkey' in item['class']):
defkey = item.text
elif ('cmdhintcell' in col['class']):
desctable = []
for item in col.children:
if (not item.name):
desctable.append(item)
elif (item.name != 'table'):
desctable.append(' '.join([x.strip() for x in item.get_text().split('\n')]))
elif ('innercmddesc' in item['class']):
for line in item.children:
if (line.name != 'tr'):
continue
name = value = desc = ''
for cell in line.children:
if (cell.name != 'td'):
continue
if ('class' not in cell.attrs):
desctable.append(cell.text)
continue
if ('innerdescparamcell' in cell['class']):
name = cell.get_text()
elif ('innerdescvaluecell' in cell['class']):
value = cell.get_text()
elif ('innerdescdesccell' in cell['class']):
desc = cell.get_text()
params.append((name, value, desc))
cmddesc = ' '.join([x.strip() for x in desctable if x.strip()]).strip(' .')
if defkey:
allkeys = parse_keytext(defkey)
for (key, mods) in allkeys:
test = (_translate_keynames(key), mods)
dflt_assign[test].add(command)
if command:
command_list.append(command)
cmddict[command] = cmddesc
if params:
cmdparms[command] = params
return (cmddict, dflt_assign, cmdparms, command_list) | build the data for a specific category (corresponds with a section in the
html)
input:
met BeautifulSoup geparsede node data
resultaat:
gegevens om toe te voegen aan de output van get_cmddict | plugin_examples/dckeys.py | analyze_keydefs | albertvisser/hotkeys | 1 | python | def analyze_keydefs(root):
'build the data for a specific category (corresponds with a section in the\n html)\n\n input:\n met BeautifulSoup geparsede node data\n resultaat:\n gegevens om toe te voegen aan de output van get_cmddict\n '
(cmddict, cmdparms) = ({}, {})
dflt_assign = collections.defaultdict(set)
command_list = []
for tbl in root.children:
if ((not tbl.name) or (tbl.name != 'table')):
continue
for row in tbl.children:
if ((not row.name) or (row.name != 'tr')):
continue
if (('class' in row.attrs) and (row['class'] in ('rowcategorytitle', 'rowsubtitle'))):
continue
(command, defkey, params, desctable) = (, , [], [])
for col in row.children:
if ((not col.name) or (col.name != 'td')):
continue
if ('class' not in col.attrs):
continue
if ('cmdcell' in col['class']):
for item in col.children:
if ((not item.name) or (item.name != 'div')):
continue
if ('cmdname' in item['class']):
command = item.a.text.strip('.')
elif ('longcmdname' in item['class']):
command = item.a.text.strip('.')
elif ('shrtctkey' in item['class']):
defkey = item.text
elif ('cmdhintcell' in col['class']):
desctable = []
for item in col.children:
if (not item.name):
desctable.append(item)
elif (item.name != 'table'):
desctable.append(' '.join([x.strip() for x in item.get_text().split('\n')]))
elif ('innercmddesc' in item['class']):
for line in item.children:
if (line.name != 'tr'):
continue
name = value = desc =
for cell in line.children:
if (cell.name != 'td'):
continue
if ('class' not in cell.attrs):
desctable.append(cell.text)
continue
if ('innerdescparamcell' in cell['class']):
name = cell.get_text()
elif ('innerdescvaluecell' in cell['class']):
value = cell.get_text()
elif ('innerdescdesccell' in cell['class']):
desc = cell.get_text()
params.append((name, value, desc))
cmddesc = ' '.join([x.strip() for x in desctable if x.strip()]).strip(' .')
if defkey:
allkeys = parse_keytext(defkey)
for (key, mods) in allkeys:
test = (_translate_keynames(key), mods)
dflt_assign[test].add(command)
if command:
command_list.append(command)
cmddict[command] = cmddesc
if params:
cmdparms[command] = params
return (cmddict, dflt_assign, cmdparms, command_list) | def analyze_keydefs(root):
'build the data for a specific category (corresponds with a section in the\n html)\n\n input:\n met BeautifulSoup geparsede node data\n resultaat:\n gegevens om toe te voegen aan de output van get_cmddict\n '
(cmddict, cmdparms) = ({}, {})
dflt_assign = collections.defaultdict(set)
command_list = []
for tbl in root.children:
if ((not tbl.name) or (tbl.name != 'table')):
continue
for row in tbl.children:
if ((not row.name) or (row.name != 'tr')):
continue
if (('class' in row.attrs) and (row['class'] in ('rowcategorytitle', 'rowsubtitle'))):
continue
(command, defkey, params, desctable) = (, , [], [])
for col in row.children:
if ((not col.name) or (col.name != 'td')):
continue
if ('class' not in col.attrs):
continue
if ('cmdcell' in col['class']):
for item in col.children:
if ((not item.name) or (item.name != 'div')):
continue
if ('cmdname' in item['class']):
command = item.a.text.strip('.')
elif ('longcmdname' in item['class']):
command = item.a.text.strip('.')
elif ('shrtctkey' in item['class']):
defkey = item.text
elif ('cmdhintcell' in col['class']):
desctable = []
for item in col.children:
if (not item.name):
desctable.append(item)
elif (item.name != 'table'):
desctable.append(' '.join([x.strip() for x in item.get_text().split('\n')]))
elif ('innercmddesc' in item['class']):
for line in item.children:
if (line.name != 'tr'):
continue
name = value = desc =
for cell in line.children:
if (cell.name != 'td'):
continue
if ('class' not in cell.attrs):
desctable.append(cell.text)
continue
if ('innerdescparamcell' in cell['class']):
name = cell.get_text()
elif ('innerdescvaluecell' in cell['class']):
value = cell.get_text()
elif ('innerdescdesccell' in cell['class']):
desc = cell.get_text()
params.append((name, value, desc))
cmddesc = ' '.join([x.strip() for x in desctable if x.strip()]).strip(' .')
if defkey:
allkeys = parse_keytext(defkey)
for (key, mods) in allkeys:
test = (_translate_keynames(key), mods)
dflt_assign[test].add(command)
if command:
command_list.append(command)
cmddict[command] = cmddesc
if params:
cmdparms[command] = params
return (cmddict, dflt_assign, cmdparms, command_list)<|docstring|>build the data for a specific category (corresponds with a section in the
html)
input:
met BeautifulSoup geparsede node data
resultaat:
gegevens om toe te voegen aan de output van get_cmddict<|endoftext|> |
e171b9fa9c49557aab3f3f2254638b75625441a5b67239b78aa981ce8705feef | def buildcsv(page, showinfo=True):
'lees de keyboard definities uit het/de settings file(s) van het tool zelf\n en geef ze terug voor schrijven naar het csv bestand\n\n input: het door de plugin gegenereerde scherm en een indicatie of het getoond\n moet worden\n returns: een mapping voor het csv file en een aantal hulptabellen\n '
builder = CsvBuilder(page, showinfo)
(kbfile, dc_keys, dc_cmds, dc_sett, dc_desc) = builder.get_settings_pathnames()
if showinfo:
new_kbfile = builder.check_path_setting(kbfile)
if (new_kbfile and (not kbfile)):
kbfile = page.settings['DC_PATH'] = new_kbfile
builder.get_keydefs(get_data_from_xml(kbfile))
builder.get_stdkeys(get_data_from_html(dc_keys))
builder.get_toolbarcmds(get_data_from_xml(dc_sett))
builder.get_cmddict(get_data_from_html(dc_cmds))
builder.assemble_shortcuts()
desclist = get_data_from_csv(dc_desc)
newdesclist = builder.add_missing_descriptions(desclist)
if (newdesclist != desclist):
save_list_to_csv(newdesclist, dc_desc)
builder.format_shortcuts()
for name in builder.contexts_list:
test = name.split('_')[0].title()
builder.contexts.add(test)
return (builder.shortcuts, {'stdkeys': builder.stdkeys, 'defaults': builder.defaults, 'cmddict': builder.cmddict, 'contexts': list(builder.contexts), 'restrictions': list(builder.controls), 'cmdparms': builder.params, 'catdict': builder.catdict}) | lees de keyboard definities uit het/de settings file(s) van het tool zelf
en geef ze terug voor schrijven naar het csv bestand
input: het door de plugin gegenereerde scherm en een indicatie of het getoond
moet worden
returns: een mapping voor het csv file en een aantal hulptabellen | plugin_examples/dckeys.py | buildcsv | albertvisser/hotkeys | 1 | python | def buildcsv(page, showinfo=True):
'lees de keyboard definities uit het/de settings file(s) van het tool zelf\n en geef ze terug voor schrijven naar het csv bestand\n\n input: het door de plugin gegenereerde scherm en een indicatie of het getoond\n moet worden\n returns: een mapping voor het csv file en een aantal hulptabellen\n '
builder = CsvBuilder(page, showinfo)
(kbfile, dc_keys, dc_cmds, dc_sett, dc_desc) = builder.get_settings_pathnames()
if showinfo:
new_kbfile = builder.check_path_setting(kbfile)
if (new_kbfile and (not kbfile)):
kbfile = page.settings['DC_PATH'] = new_kbfile
builder.get_keydefs(get_data_from_xml(kbfile))
builder.get_stdkeys(get_data_from_html(dc_keys))
builder.get_toolbarcmds(get_data_from_xml(dc_sett))
builder.get_cmddict(get_data_from_html(dc_cmds))
builder.assemble_shortcuts()
desclist = get_data_from_csv(dc_desc)
newdesclist = builder.add_missing_descriptions(desclist)
if (newdesclist != desclist):
save_list_to_csv(newdesclist, dc_desc)
builder.format_shortcuts()
for name in builder.contexts_list:
test = name.split('_')[0].title()
builder.contexts.add(test)
return (builder.shortcuts, {'stdkeys': builder.stdkeys, 'defaults': builder.defaults, 'cmddict': builder.cmddict, 'contexts': list(builder.contexts), 'restrictions': list(builder.controls), 'cmdparms': builder.params, 'catdict': builder.catdict}) | def buildcsv(page, showinfo=True):
'lees de keyboard definities uit het/de settings file(s) van het tool zelf\n en geef ze terug voor schrijven naar het csv bestand\n\n input: het door de plugin gegenereerde scherm en een indicatie of het getoond\n moet worden\n returns: een mapping voor het csv file en een aantal hulptabellen\n '
builder = CsvBuilder(page, showinfo)
(kbfile, dc_keys, dc_cmds, dc_sett, dc_desc) = builder.get_settings_pathnames()
if showinfo:
new_kbfile = builder.check_path_setting(kbfile)
if (new_kbfile and (not kbfile)):
kbfile = page.settings['DC_PATH'] = new_kbfile
builder.get_keydefs(get_data_from_xml(kbfile))
builder.get_stdkeys(get_data_from_html(dc_keys))
builder.get_toolbarcmds(get_data_from_xml(dc_sett))
builder.get_cmddict(get_data_from_html(dc_cmds))
builder.assemble_shortcuts()
desclist = get_data_from_csv(dc_desc)
newdesclist = builder.add_missing_descriptions(desclist)
if (newdesclist != desclist):
save_list_to_csv(newdesclist, dc_desc)
builder.format_shortcuts()
for name in builder.contexts_list:
test = name.split('_')[0].title()
builder.contexts.add(test)
return (builder.shortcuts, {'stdkeys': builder.stdkeys, 'defaults': builder.defaults, 'cmddict': builder.cmddict, 'contexts': list(builder.contexts), 'restrictions': list(builder.controls), 'cmdparms': builder.params, 'catdict': builder.catdict})<|docstring|>lees de keyboard definities uit het/de settings file(s) van het tool zelf
en geef ze terug voor schrijven naar het csv bestand
input: het door de plugin gegenereerde scherm en een indicatie of het getoond
moet worden
returns: een mapping voor het csv file en een aantal hulptabellen<|endoftext|> |
1bdc7ffdd48aeb3a65c051104404014c06072be980cdb4b4cb5433c9bb068b37 | def build_shortcut(key, mods):
'return text for key combo\n '
mod2str = (('C', 'Ctrl+'), ('S', 'Shift+'), ('A', 'Alt+'), ('W', 'WinKey+'))
result = ''
for (x, y) in mod2str:
if (x in mods):
result += y
key = key.capitalize()
return (result + key) | return text for key combo | plugin_examples/dckeys.py | build_shortcut | albertvisser/hotkeys | 1 | python | def build_shortcut(key, mods):
'\n '
mod2str = (('C', 'Ctrl+'), ('S', 'Shift+'), ('A', 'Alt+'), ('W', 'WinKey+'))
result =
for (x, y) in mod2str:
if (x in mods):
result += y
key = key.capitalize()
return (result + key) | def build_shortcut(key, mods):
'\n '
mod2str = (('C', 'Ctrl+'), ('S', 'Shift+'), ('A', 'Alt+'), ('W', 'WinKey+'))
result =
for (x, y) in mod2str:
if (x in mods):
result += y
key = key.capitalize()
return (result + key)<|docstring|>return text for key combo<|endoftext|> |
feb8d0cb2b9fc151d9ded0135b034c82c778d3084e1a8db2840ea4ac265a6743 | def savekeys(page):
'schrijf de gegevens terug\n '
ok = show_cancel_message(page, text=how_to_save)
if (not ok):
return
kbfile = get_file_to_save(page, extension='SCF files (*.scf)', start=page.settings['DC_PATH'])
if (not kbfile):
return
root = ET.Element('doublecmd', DCVersion='0.6.6 beta')
head = ET.SubElement(root, 'Hotkeys', Version='20')
oldform = ''
for item in sorted(page.data.values(), key=(lambda x: x[3])):
(key, mods, kind, context, cmnd, parm, ctrl, desc) = item
if (context != oldform):
newform = ET.SubElement(head, 'Form', Name=context)
oldform = context
hotkey = ET.SubElement(newform, 'Hotkey')
shortcut = ET.SubElement(hotkey, 'Shortcut')
shortcut.text = build_shortcut(key, mods)
command = ET.SubElement(hotkey, 'Command')
command.text = cmnd
if parm:
param = ET.SubElement(hotkey, 'Param')
param.text = parm
if ctrl:
control = ET.SubElement(hotkey, 'Control')
control.text = ctrl
shutil.copyfile(kbfile, (kbfile + '.bak'))
ET.ElementTree(root).write(kbfile, encoding='UTF-8', xml_declaration=True) | schrijf de gegevens terug | plugin_examples/dckeys.py | savekeys | albertvisser/hotkeys | 1 | python | def savekeys(page):
'\n '
ok = show_cancel_message(page, text=how_to_save)
if (not ok):
return
kbfile = get_file_to_save(page, extension='SCF files (*.scf)', start=page.settings['DC_PATH'])
if (not kbfile):
return
root = ET.Element('doublecmd', DCVersion='0.6.6 beta')
head = ET.SubElement(root, 'Hotkeys', Version='20')
oldform =
for item in sorted(page.data.values(), key=(lambda x: x[3])):
(key, mods, kind, context, cmnd, parm, ctrl, desc) = item
if (context != oldform):
newform = ET.SubElement(head, 'Form', Name=context)
oldform = context
hotkey = ET.SubElement(newform, 'Hotkey')
shortcut = ET.SubElement(hotkey, 'Shortcut')
shortcut.text = build_shortcut(key, mods)
command = ET.SubElement(hotkey, 'Command')
command.text = cmnd
if parm:
param = ET.SubElement(hotkey, 'Param')
param.text = parm
if ctrl:
control = ET.SubElement(hotkey, 'Control')
control.text = ctrl
shutil.copyfile(kbfile, (kbfile + '.bak'))
ET.ElementTree(root).write(kbfile, encoding='UTF-8', xml_declaration=True) | def savekeys(page):
'\n '
ok = show_cancel_message(page, text=how_to_save)
if (not ok):
return
kbfile = get_file_to_save(page, extension='SCF files (*.scf)', start=page.settings['DC_PATH'])
if (not kbfile):
return
root = ET.Element('doublecmd', DCVersion='0.6.6 beta')
head = ET.SubElement(root, 'Hotkeys', Version='20')
oldform =
for item in sorted(page.data.values(), key=(lambda x: x[3])):
(key, mods, kind, context, cmnd, parm, ctrl, desc) = item
if (context != oldform):
newform = ET.SubElement(head, 'Form', Name=context)
oldform = context
hotkey = ET.SubElement(newform, 'Hotkey')
shortcut = ET.SubElement(hotkey, 'Shortcut')
shortcut.text = build_shortcut(key, mods)
command = ET.SubElement(hotkey, 'Command')
command.text = cmnd
if parm:
param = ET.SubElement(hotkey, 'Param')
param.text = parm
if ctrl:
control = ET.SubElement(hotkey, 'Control')
control.text = ctrl
shutil.copyfile(kbfile, (kbfile + '.bak'))
ET.ElementTree(root).write(kbfile, encoding='UTF-8', xml_declaration=True)<|docstring|>schrijf de gegevens terug<|endoftext|> |
678f8e6e39a34bb2ba17f32cf3f0b1547136e913134d9d7f818e5b922f55bbcb | def add_extra_attributes(win):
'stuff needed for redefining keyboard combos\n\n key, mods, cmnd, params, controls\n '
win.commandsdict = win.otherstuff['cmddict']
win.commandslist = sorted(win.commandsdict.keys())
win.descriptions = win.commandsdict
win.contextslist = win.otherstuff['contexts']
win.contextactionsdict = {x: win.commandslist for x in win.contextslist}
win.controlslist = win.otherstuff['restrictions'] | stuff needed for redefining keyboard combos
key, mods, cmnd, params, controls | plugin_examples/dckeys.py | add_extra_attributes | albertvisser/hotkeys | 1 | python | def add_extra_attributes(win):
'stuff needed for redefining keyboard combos\n\n key, mods, cmnd, params, controls\n '
win.commandsdict = win.otherstuff['cmddict']
win.commandslist = sorted(win.commandsdict.keys())
win.descriptions = win.commandsdict
win.contextslist = win.otherstuff['contexts']
win.contextactionsdict = {x: win.commandslist for x in win.contextslist}
win.controlslist = win.otherstuff['restrictions'] | def add_extra_attributes(win):
'stuff needed for redefining keyboard combos\n\n key, mods, cmnd, params, controls\n '
win.commandsdict = win.otherstuff['cmddict']
win.commandslist = sorted(win.commandsdict.keys())
win.descriptions = win.commandsdict
win.contextslist = win.otherstuff['contexts']
win.contextactionsdict = {x: win.commandslist for x in win.contextslist}
win.controlslist = win.otherstuff['restrictions']<|docstring|>stuff needed for redefining keyboard combos
key, mods, cmnd, params, controls<|endoftext|> |
7f3d4472c38da7063059ed69ad5774ace434c74ef18da2a0600276f4a121da74 | def get_frameheight():
'return fixed height for extra panel\n '
return 120 | return fixed height for extra panel | plugin_examples/dckeys.py | get_frameheight | albertvisser/hotkeys | 1 | python | def get_frameheight():
'\n '
return 120 | def get_frameheight():
'\n '
return 120<|docstring|>return fixed height for extra panel<|endoftext|> |
0a8798e40ee70038dd2dcb4d2dfd2155545dd6438ee6bfdc6a09d226ee908bd0 | def get_settings_pathnames(self):
'get the paths for the settings files to read\n '
kbfile = self.page.settings.get('DC_PATH', '')
if (not kbfile):
kbfile = self.page.settings['DC_PATH'] = os.path.join(CONFPATH, 'shortcuts.scf')
dc_keys = self.page.settings.get('DC_KEYS', '')
if (not dc_keys):
dc_keys = self.page_settings['DC_KEYS'] = os.path.join(DOCSPATH, 'shortcuts.html')
dc_cmds = self.page.settings.get('DC_CMDS', '')
if (not dc_cmds):
dc_cmds = self.page.settings['DC_CMDS'] = os.path.join(DOCSPATH, 'cmds.html')
if dc_keys.startswith('http'):
if (not os.path.exists('/tmp/dc_files/shortcuts.html')):
import subprocess
subprocess.run(['wget', '-i', dc_keys, '-P', '/tmp/dc_files', '-nc'])
dc_keys = os.path.join('/tmp/dc_files', os.path.basename(dc_keys))
dc_cmds = os.path.join('/tmp/dc_files', os.path.basename(dc_cmds))
dc_sett = self.page.settings.get('DC_SETT', '')
if (not dc_sett):
dc_sett = self.page.settings['DC_SETT'] = os.path.join(CONFPATH, 'doublecmd.xml')
dc_desc = self.page.settings.get('DC_DESC', '')
if (not dc_desc):
dc_desc = self.page.settings['DC_DESC'] = os.path.join(HERE, 'dc_descs.csv')
return (kbfile, dc_keys, dc_cmds, dc_sett, dc_desc) | get the paths for the settings files to read | plugin_examples/dckeys.py | get_settings_pathnames | albertvisser/hotkeys | 1 | python | def get_settings_pathnames(self):
'\n '
kbfile = self.page.settings.get('DC_PATH', )
if (not kbfile):
kbfile = self.page.settings['DC_PATH'] = os.path.join(CONFPATH, 'shortcuts.scf')
dc_keys = self.page.settings.get('DC_KEYS', )
if (not dc_keys):
dc_keys = self.page_settings['DC_KEYS'] = os.path.join(DOCSPATH, 'shortcuts.html')
dc_cmds = self.page.settings.get('DC_CMDS', )
if (not dc_cmds):
dc_cmds = self.page.settings['DC_CMDS'] = os.path.join(DOCSPATH, 'cmds.html')
if dc_keys.startswith('http'):
if (not os.path.exists('/tmp/dc_files/shortcuts.html')):
import subprocess
subprocess.run(['wget', '-i', dc_keys, '-P', '/tmp/dc_files', '-nc'])
dc_keys = os.path.join('/tmp/dc_files', os.path.basename(dc_keys))
dc_cmds = os.path.join('/tmp/dc_files', os.path.basename(dc_cmds))
dc_sett = self.page.settings.get('DC_SETT', )
if (not dc_sett):
dc_sett = self.page.settings['DC_SETT'] = os.path.join(CONFPATH, 'doublecmd.xml')
dc_desc = self.page.settings.get('DC_DESC', )
if (not dc_desc):
dc_desc = self.page.settings['DC_DESC'] = os.path.join(HERE, 'dc_descs.csv')
return (kbfile, dc_keys, dc_cmds, dc_sett, dc_desc) | def get_settings_pathnames(self):
'\n '
kbfile = self.page.settings.get('DC_PATH', )
if (not kbfile):
kbfile = self.page.settings['DC_PATH'] = os.path.join(CONFPATH, 'shortcuts.scf')
dc_keys = self.page.settings.get('DC_KEYS', )
if (not dc_keys):
dc_keys = self.page_settings['DC_KEYS'] = os.path.join(DOCSPATH, 'shortcuts.html')
dc_cmds = self.page.settings.get('DC_CMDS', )
if (not dc_cmds):
dc_cmds = self.page.settings['DC_CMDS'] = os.path.join(DOCSPATH, 'cmds.html')
if dc_keys.startswith('http'):
if (not os.path.exists('/tmp/dc_files/shortcuts.html')):
import subprocess
subprocess.run(['wget', '-i', dc_keys, '-P', '/tmp/dc_files', '-nc'])
dc_keys = os.path.join('/tmp/dc_files', os.path.basename(dc_keys))
dc_cmds = os.path.join('/tmp/dc_files', os.path.basename(dc_cmds))
dc_sett = self.page.settings.get('DC_SETT', )
if (not dc_sett):
dc_sett = self.page.settings['DC_SETT'] = os.path.join(CONFPATH, 'doublecmd.xml')
dc_desc = self.page.settings.get('DC_DESC', )
if (not dc_desc):
dc_desc = self.page.settings['DC_DESC'] = os.path.join(HERE, 'dc_descs.csv')
return (kbfile, dc_keys, dc_cmds, dc_sett, dc_desc)<|docstring|>get the paths for the settings files to read<|endoftext|> |
35918406e217b6f4e8918edb0da881b41d08063ceef91f1e8586cf1a5c6d5a22 | def check_path_setting(self, initial):
'show instructions and ask for keyboard shortcuts file to use\n '
kbfile = ''
(ok, cancel) = ask_ync_question(self.page.gui, text=instructions)
if ok:
kbfile = get_file_to_open(self.page.gui, extension='SCF files (*.scf)', start=initial)
return kbfile | show instructions and ask for keyboard shortcuts file to use | plugin_examples/dckeys.py | check_path_setting | albertvisser/hotkeys | 1 | python | def check_path_setting(self, initial):
'\n '
kbfile =
(ok, cancel) = ask_ync_question(self.page.gui, text=instructions)
if ok:
kbfile = get_file_to_open(self.page.gui, extension='SCF files (*.scf)', start=initial)
return kbfile | def check_path_setting(self, initial):
'\n '
kbfile =
(ok, cancel) = ask_ync_question(self.page.gui, text=instructions)
if ok:
kbfile = get_file_to_open(self.page.gui, extension='SCF files (*.scf)', start=initial)
return kbfile<|docstring|>show instructions and ask for keyboard shortcuts file to use<|endoftext|> |
5076c0214b1c0a6b9a80cc7afe94f9044a566674642f86708d7258141c056de4 | def get_keydefs(self, data):
'huidige keydefs afleiden\n\n input: met ElementTree ingelezen data volgens pad zoals opgegeven in DC_PATH\n (shortcuts.scf in de settings directory)\n resultaat: een mapping van een tuple van keyname, modifiers en context op een dictionary met\n command string, parameter en controls\n een lijst van alle gevonden contexten\n een lijst van alle gevonden mogelijke controls\n '
root = data.getroot()
for form in list(root.find('Hotkeys')):
ctx = form.get('Name')
self.contexts.add(ctx)
for hotkey in form:
shortcut = hotkey.find('Shortcut').text
if shortcut.endswith('+'):
parts = shortcut[:(- 1)].split('+')
parts[(- 1)] += '+'
else:
parts = shortcut.split('+')
keyname = parts[(- 1)]
modifiers = _shorten_mods(parts[:(- 1)])
command = hotkey.find('Command').text
test = hotkey.findall('Param')
if (test is None):
parameter = ''
else:
parameter = ';'.join([param.text for param in test])
test = hotkey.findall('Control')
if (test is None):
ctrls = ''
self.controls.add('')
else:
ctrls = []
for control in test:
ctrls.append(control.text)
self.controls.add(control.text)
ctrls = ';'.join(ctrls)
self.definedkeys[(keyname, modifiers, ctx)] = {'cmd': command, 'param': parameter, 'ctrl': ctrls} | huidige keydefs afleiden
input: met ElementTree ingelezen data volgens pad zoals opgegeven in DC_PATH
(shortcuts.scf in de settings directory)
resultaat: een mapping van een tuple van keyname, modifiers en context op een dictionary met
command string, parameter en controls
een lijst van alle gevonden contexten
een lijst van alle gevonden mogelijke controls | plugin_examples/dckeys.py | get_keydefs | albertvisser/hotkeys | 1 | python | def get_keydefs(self, data):
'huidige keydefs afleiden\n\n input: met ElementTree ingelezen data volgens pad zoals opgegeven in DC_PATH\n (shortcuts.scf in de settings directory)\n resultaat: een mapping van een tuple van keyname, modifiers en context op een dictionary met\n command string, parameter en controls\n een lijst van alle gevonden contexten\n een lijst van alle gevonden mogelijke controls\n '
root = data.getroot()
for form in list(root.find('Hotkeys')):
ctx = form.get('Name')
self.contexts.add(ctx)
for hotkey in form:
shortcut = hotkey.find('Shortcut').text
if shortcut.endswith('+'):
parts = shortcut[:(- 1)].split('+')
parts[(- 1)] += '+'
else:
parts = shortcut.split('+')
keyname = parts[(- 1)]
modifiers = _shorten_mods(parts[:(- 1)])
command = hotkey.find('Command').text
test = hotkey.findall('Param')
if (test is None):
parameter =
else:
parameter = ';'.join([param.text for param in test])
test = hotkey.findall('Control')
if (test is None):
ctrls =
self.controls.add()
else:
ctrls = []
for control in test:
ctrls.append(control.text)
self.controls.add(control.text)
ctrls = ';'.join(ctrls)
self.definedkeys[(keyname, modifiers, ctx)] = {'cmd': command, 'param': parameter, 'ctrl': ctrls} | def get_keydefs(self, data):
'huidige keydefs afleiden\n\n input: met ElementTree ingelezen data volgens pad zoals opgegeven in DC_PATH\n (shortcuts.scf in de settings directory)\n resultaat: een mapping van een tuple van keyname, modifiers en context op een dictionary met\n command string, parameter en controls\n een lijst van alle gevonden contexten\n een lijst van alle gevonden mogelijke controls\n '
root = data.getroot()
for form in list(root.find('Hotkeys')):
ctx = form.get('Name')
self.contexts.add(ctx)
for hotkey in form:
shortcut = hotkey.find('Shortcut').text
if shortcut.endswith('+'):
parts = shortcut[:(- 1)].split('+')
parts[(- 1)] += '+'
else:
parts = shortcut.split('+')
keyname = parts[(- 1)]
modifiers = _shorten_mods(parts[:(- 1)])
command = hotkey.find('Command').text
test = hotkey.findall('Param')
if (test is None):
parameter =
else:
parameter = ';'.join([param.text for param in test])
test = hotkey.findall('Control')
if (test is None):
ctrls =
self.controls.add()
else:
ctrls = []
for control in test:
ctrls.append(control.text)
self.controls.add(control.text)
ctrls = ';'.join(ctrls)
self.definedkeys[(keyname, modifiers, ctx)] = {'cmd': command, 'param': parameter, 'ctrl': ctrls}<|docstring|>huidige keydefs afleiden
input: met ElementTree ingelezen data volgens pad zoals opgegeven in DC_PATH
(shortcuts.scf in de settings directory)
resultaat: een mapping van een tuple van keyname, modifiers en context op een dictionary met
command string, parameter en controls
een lijst van alle gevonden contexten
een lijst van alle gevonden mogelijke controls<|endoftext|> |
f6639aa0c5fa21939d312d08be286c865cdfe591f61574ce1bb53575af9ce996 | def get_stdkeys(self, soup):
'determine standard keys\n\n input: met BeautifulSoup ingelezen data volgens pad zoals opgegeven in DC_KEYS\n (shortcuts.html in de docs directory)\n resultaat: een mapping van een tuple van keyname, modifiers en context op omschrijving\n een list met mogelijke contexts\n '
sections = soup.find_all('div')
for div in sections:
context = div.select('h2 a')
if (not context):
continue
context = context[0]['name']
if (context in ('warning', 'options')):
continue
context = context.replace('_window', '').title()
self.contexts_list.append(context)
tbody = div.select('table tr')
for row in tbody:
if ('class' in row.attrs):
continue
keynames = ()
for col in row.select('td'):
if (('class' in col.attrs) and ('varcell' in col['class'])):
keynames = parse_keytext(col.div.text)
else:
oms = '\n'.join([x.strip() for x in col.get_text().split('\n')])
if keynames:
for (name, mods) in keynames:
self.stdkeys[(_translate_keynames(name), mods, context)] = oms | determine standard keys
input: met BeautifulSoup ingelezen data volgens pad zoals opgegeven in DC_KEYS
(shortcuts.html in de docs directory)
resultaat: een mapping van een tuple van keyname, modifiers en context op omschrijving
een list met mogelijke contexts | plugin_examples/dckeys.py | get_stdkeys | albertvisser/hotkeys | 1 | python | def get_stdkeys(self, soup):
'determine standard keys\n\n input: met BeautifulSoup ingelezen data volgens pad zoals opgegeven in DC_KEYS\n (shortcuts.html in de docs directory)\n resultaat: een mapping van een tuple van keyname, modifiers en context op omschrijving\n een list met mogelijke contexts\n '
sections = soup.find_all('div')
for div in sections:
context = div.select('h2 a')
if (not context):
continue
context = context[0]['name']
if (context in ('warning', 'options')):
continue
context = context.replace('_window', ).title()
self.contexts_list.append(context)
tbody = div.select('table tr')
for row in tbody:
if ('class' in row.attrs):
continue
keynames = ()
for col in row.select('td'):
if (('class' in col.attrs) and ('varcell' in col['class'])):
keynames = parse_keytext(col.div.text)
else:
oms = '\n'.join([x.strip() for x in col.get_text().split('\n')])
if keynames:
for (name, mods) in keynames:
self.stdkeys[(_translate_keynames(name), mods, context)] = oms | def get_stdkeys(self, soup):
'determine standard keys\n\n input: met BeautifulSoup ingelezen data volgens pad zoals opgegeven in DC_KEYS\n (shortcuts.html in de docs directory)\n resultaat: een mapping van een tuple van keyname, modifiers en context op omschrijving\n een list met mogelijke contexts\n '
sections = soup.find_all('div')
for div in sections:
context = div.select('h2 a')
if (not context):
continue
context = context[0]['name']
if (context in ('warning', 'options')):
continue
context = context.replace('_window', ).title()
self.contexts_list.append(context)
tbody = div.select('table tr')
for row in tbody:
if ('class' in row.attrs):
continue
keynames = ()
for col in row.select('td'):
if (('class' in col.attrs) and ('varcell' in col['class'])):
keynames = parse_keytext(col.div.text)
else:
oms = '\n'.join([x.strip() for x in col.get_text().split('\n')])
if keynames:
for (name, mods) in keynames:
self.stdkeys[(_translate_keynames(name), mods, context)] = oms<|docstring|>determine standard keys
input: met BeautifulSoup ingelezen data volgens pad zoals opgegeven in DC_KEYS
(shortcuts.html in de docs directory)
resultaat: een mapping van een tuple van keyname, modifiers en context op omschrijving
een list met mogelijke contexts<|endoftext|> |
ef19056b6fe24f5b183a447bc8694206c330fb1f13251a0cd0bd1272ab0d2adf | def get_cmddict(self, soup):
'build dictionary of commands with descriptions\n\n input: met BeautifulSoup ingelezen data volgens pad zoals opgegeven in DC_CMDS\n (cmds.html in de docs directory)\n resultaat: een mapping met key = commandonaam en value = een tekst (de omschrijving)\n een mapping met key = een tuple van keyname, modifiers en value = een list\n (eigenlijk set) van commandonamen (meestal 1?)\n een mapping met key = commandonaam en value = een list van tuples van\n naam, waardebereik en omschrijving\n een mapping met key = categorie en value = een list van commandonamen\n '
newsoup = soup.select('div[class="dchelpage"]')[0]
for div in newsoup.children:
if ((not div.name) or (div.name != 'div')):
continue
for hx in div.children:
if ((not hx.name) or (hx.name != 'h2')):
continue
for a in hx.children:
if (a.name == 'a'):
cat = a['name']
if cat.startswith('cat'):
cat = cat[3:]
(cd, da, cp, cl) = analyze_keydefs(div)
self.cmddict.update(cd)
self.defaults.update(da)
self.params.update(cp)
self.catdict[cat] = cl | build dictionary of commands with descriptions
input: met BeautifulSoup ingelezen data volgens pad zoals opgegeven in DC_CMDS
(cmds.html in de docs directory)
resultaat: een mapping met key = commandonaam en value = een tekst (de omschrijving)
een mapping met key = een tuple van keyname, modifiers en value = een list
(eigenlijk set) van commandonamen (meestal 1?)
een mapping met key = commandonaam en value = een list van tuples van
naam, waardebereik en omschrijving
een mapping met key = categorie en value = een list van commandonamen | plugin_examples/dckeys.py | get_cmddict | albertvisser/hotkeys | 1 | python | def get_cmddict(self, soup):
'build dictionary of commands with descriptions\n\n input: met BeautifulSoup ingelezen data volgens pad zoals opgegeven in DC_CMDS\n (cmds.html in de docs directory)\n resultaat: een mapping met key = commandonaam en value = een tekst (de omschrijving)\n een mapping met key = een tuple van keyname, modifiers en value = een list\n (eigenlijk set) van commandonamen (meestal 1?)\n een mapping met key = commandonaam en value = een list van tuples van\n naam, waardebereik en omschrijving\n een mapping met key = categorie en value = een list van commandonamen\n '
newsoup = soup.select('div[class="dchelpage"]')[0]
for div in newsoup.children:
if ((not div.name) or (div.name != 'div')):
continue
for hx in div.children:
if ((not hx.name) or (hx.name != 'h2')):
continue
for a in hx.children:
if (a.name == 'a'):
cat = a['name']
if cat.startswith('cat'):
cat = cat[3:]
(cd, da, cp, cl) = analyze_keydefs(div)
self.cmddict.update(cd)
self.defaults.update(da)
self.params.update(cp)
self.catdict[cat] = cl | def get_cmddict(self, soup):
'build dictionary of commands with descriptions\n\n input: met BeautifulSoup ingelezen data volgens pad zoals opgegeven in DC_CMDS\n (cmds.html in de docs directory)\n resultaat: een mapping met key = commandonaam en value = een tekst (de omschrijving)\n een mapping met key = een tuple van keyname, modifiers en value = een list\n (eigenlijk set) van commandonamen (meestal 1?)\n een mapping met key = commandonaam en value = een list van tuples van\n naam, waardebereik en omschrijving\n een mapping met key = categorie en value = een list van commandonamen\n '
newsoup = soup.select('div[class="dchelpage"]')[0]
for div in newsoup.children:
if ((not div.name) or (div.name != 'div')):
continue
for hx in div.children:
if ((not hx.name) or (hx.name != 'h2')):
continue
for a in hx.children:
if (a.name == 'a'):
cat = a['name']
if cat.startswith('cat'):
cat = cat[3:]
(cd, da, cp, cl) = analyze_keydefs(div)
self.cmddict.update(cd)
self.defaults.update(da)
self.params.update(cp)
self.catdict[cat] = cl<|docstring|>build dictionary of commands with descriptions
input: met BeautifulSoup ingelezen data volgens pad zoals opgegeven in DC_CMDS
(cmds.html in de docs directory)
resultaat: een mapping met key = commandonaam en value = een tekst (de omschrijving)
een mapping met key = een tuple van keyname, modifiers en value = een list
(eigenlijk set) van commandonamen (meestal 1?)
een mapping met key = commandonaam en value = een list van tuples van
naam, waardebereik en omschrijving
een mapping met key = categorie en value = een list van commandonamen<|endoftext|> |
46f6bd166dfceb620b65efa9b6e569cd96d5be4f088ae9717dd45433d87f9720 | def get_toolbarcmds(self, data):
'lees de zelfgedefinieerde toolbar items\n\n input:\n met ElementTree ingelezen data volgens pad zoals opgegeven in DC_SETT\n (doublecmd.xml in de settings directory)\n resultaat:\n een mapping met key = de parameter voor cm_ToolBarCmd\n om de details in value te kunnen koppelen aan de betreffende keyboard shortcuts\n '
root = data.getroot()
for toolbar in list(root.find('Toolbars')):
for row in list(toolbar):
for item in row.findall('Program'):
key = item.find('ID').text
desc = item.find('Hint').text
cmd = item.find('Command').text
parm = item.find('Params').text
self.tbcmddict[key] = (desc, cmd, parm) | lees de zelfgedefinieerde toolbar items
input:
met ElementTree ingelezen data volgens pad zoals opgegeven in DC_SETT
(doublecmd.xml in de settings directory)
resultaat:
een mapping met key = de parameter voor cm_ToolBarCmd
om de details in value te kunnen koppelen aan de betreffende keyboard shortcuts | plugin_examples/dckeys.py | get_toolbarcmds | albertvisser/hotkeys | 1 | python | def get_toolbarcmds(self, data):
'lees de zelfgedefinieerde toolbar items\n\n input:\n met ElementTree ingelezen data volgens pad zoals opgegeven in DC_SETT\n (doublecmd.xml in de settings directory)\n resultaat:\n een mapping met key = de parameter voor cm_ToolBarCmd\n om de details in value te kunnen koppelen aan de betreffende keyboard shortcuts\n '
root = data.getroot()
for toolbar in list(root.find('Toolbars')):
for row in list(toolbar):
for item in row.findall('Program'):
key = item.find('ID').text
desc = item.find('Hint').text
cmd = item.find('Command').text
parm = item.find('Params').text
self.tbcmddict[key] = (desc, cmd, parm) | def get_toolbarcmds(self, data):
'lees de zelfgedefinieerde toolbar items\n\n input:\n met ElementTree ingelezen data volgens pad zoals opgegeven in DC_SETT\n (doublecmd.xml in de settings directory)\n resultaat:\n een mapping met key = de parameter voor cm_ToolBarCmd\n om de details in value te kunnen koppelen aan de betreffende keyboard shortcuts\n '
root = data.getroot()
for toolbar in list(root.find('Toolbars')):
for row in list(toolbar):
for item in row.findall('Program'):
key = item.find('ID').text
desc = item.find('Hint').text
cmd = item.find('Command').text
parm = item.find('Params').text
self.tbcmddict[key] = (desc, cmd, parm)<|docstring|>lees de zelfgedefinieerde toolbar items
input:
met ElementTree ingelezen data volgens pad zoals opgegeven in DC_SETT
(doublecmd.xml in de settings directory)
resultaat:
een mapping met key = de parameter voor cm_ToolBarCmd
om de details in value te kunnen koppelen aan de betreffende keyboard shortcuts<|endoftext|> |
7f3f786a0e675c5be8d9e2a19ff9e7dcc055e49a6a5cc16eff82113441312e23 | def add_missing_descriptions(self, desclist):
'update missing descriptions in cmddict\n '
if self.showinfo:
self.page.dialog_data = {'descdict': dict(desclist), 'cmddict': self.cmddict}
if show_dialog(self.page, DcCompleteDialog):
desclist = list(self.page.dialog_data.items())
for (command, description) in desclist:
if ((command not in self.cmddict) or (not self.cmddict[command])):
self.cmddict[command] = description
self.desclist = desclist
return desclist | update missing descriptions in cmddict | plugin_examples/dckeys.py | add_missing_descriptions | albertvisser/hotkeys | 1 | python | def add_missing_descriptions(self, desclist):
'\n '
if self.showinfo:
self.page.dialog_data = {'descdict': dict(desclist), 'cmddict': self.cmddict}
if show_dialog(self.page, DcCompleteDialog):
desclist = list(self.page.dialog_data.items())
for (command, description) in desclist:
if ((command not in self.cmddict) or (not self.cmddict[command])):
self.cmddict[command] = description
self.desclist = desclist
return desclist | def add_missing_descriptions(self, desclist):
'\n '
if self.showinfo:
self.page.dialog_data = {'descdict': dict(desclist), 'cmddict': self.cmddict}
if show_dialog(self.page, DcCompleteDialog):
desclist = list(self.page.dialog_data.items())
for (command, description) in desclist:
if ((command not in self.cmddict) or (not self.cmddict[command])):
self.cmddict[command] = description
self.desclist = desclist
return desclist<|docstring|>update missing descriptions in cmddict<|endoftext|> |
78553593c0dc7be4b1a85fc6122ef8b2718cd26acd0ce06cc7d7da8d3bf8bc98 | def assemble_shortcuts(self):
"automatische mapping van bekende keycombo's op commando's d.m.v. gegevens uit de\n dictionaries met omschrijvingen\n\n bepaal tegelijkertijd of dit een standaard definitie is of een aangepaste\n\n en tenslotte: ombouwen naar een dictionary die in het csv bestand kan worden ingelezen\n "
for (key_in_context, definitions_dict) in self.definedkeys.items():
definitions_dict['standard'] = ''
if (self.defaults.get(key_in_context[:2]) == {definitions_dict['cmd']}):
definitions_dict['standard'] = 'S'
stdkeys_oms = self.stdkeys.get(key_in_context, '')
if stdkeys_oms:
definitions_dict['desc'] = stdkeys_oms
definitions_dict['standard'] = 'S'
if (definitions_dict['cmd'] == 'cm_ExecuteToolbarItem'):
definitions_dict['standard'] = 'U'
itemid = definitions_dict['param'].split('=', 1)[1]
(oms, cmd, parm) = self.tbcmddict[itemid]
definitions_dict['desc'] = '{} ({} {})'.format(oms, cmd, parm)
else:
cmddict_oms = self.cmddict.get(definitions_dict['cmd'], None)
if (cmddict_oms is None):
self.unlisted_cmds.append(definitions_dict['cmd'])
cmddict_oms = ''
if (cmddict_oms and (not stdkeys_oms)):
definitions_dict['desc'] = cmddict_oms
elif (not cmddict_oms):
self.cmddict[definitions_dict['cmd']] = stdkeys_oms
elif (cmddict_oms and (cmddict_oms != stdkeys_oms)):
self.tobematched[key_in_context] = {'stdkeys_oms': stdkeys_oms, 'cmddict_oms': cmddict_oms}
definitions_dict['desc'] = cmddict_oms
self.shortcuts[key_in_context] = definitions_dict
for (stdkey, value) in self.stdkeys.items():
if (stdkey not in self.shortcuts):
self.shortcuts[stdkey] = {'cmd': '', 'param': '', 'ctrl': '', 'standard': 'S', 'desc': value} | automatische mapping van bekende keycombo's op commando's d.m.v. gegevens uit de
dictionaries met omschrijvingen
bepaal tegelijkertijd of dit een standaard definitie is of een aangepaste
en tenslotte: ombouwen naar een dictionary die in het csv bestand kan worden ingelezen | plugin_examples/dckeys.py | assemble_shortcuts | albertvisser/hotkeys | 1 | python | def assemble_shortcuts(self):
"automatische mapping van bekende keycombo's op commando's d.m.v. gegevens uit de\n dictionaries met omschrijvingen\n\n bepaal tegelijkertijd of dit een standaard definitie is of een aangepaste\n\n en tenslotte: ombouwen naar een dictionary die in het csv bestand kan worden ingelezen\n "
for (key_in_context, definitions_dict) in self.definedkeys.items():
definitions_dict['standard'] =
if (self.defaults.get(key_in_context[:2]) == {definitions_dict['cmd']}):
definitions_dict['standard'] = 'S'
stdkeys_oms = self.stdkeys.get(key_in_context, )
if stdkeys_oms:
definitions_dict['desc'] = stdkeys_oms
definitions_dict['standard'] = 'S'
if (definitions_dict['cmd'] == 'cm_ExecuteToolbarItem'):
definitions_dict['standard'] = 'U'
itemid = definitions_dict['param'].split('=', 1)[1]
(oms, cmd, parm) = self.tbcmddict[itemid]
definitions_dict['desc'] = '{} ({} {})'.format(oms, cmd, parm)
else:
cmddict_oms = self.cmddict.get(definitions_dict['cmd'], None)
if (cmddict_oms is None):
self.unlisted_cmds.append(definitions_dict['cmd'])
cmddict_oms =
if (cmddict_oms and (not stdkeys_oms)):
definitions_dict['desc'] = cmddict_oms
elif (not cmddict_oms):
self.cmddict[definitions_dict['cmd']] = stdkeys_oms
elif (cmddict_oms and (cmddict_oms != stdkeys_oms)):
self.tobematched[key_in_context] = {'stdkeys_oms': stdkeys_oms, 'cmddict_oms': cmddict_oms}
definitions_dict['desc'] = cmddict_oms
self.shortcuts[key_in_context] = definitions_dict
for (stdkey, value) in self.stdkeys.items():
if (stdkey not in self.shortcuts):
self.shortcuts[stdkey] = {'cmd': , 'param': , 'ctrl': , 'standard': 'S', 'desc': value} | def assemble_shortcuts(self):
"automatische mapping van bekende keycombo's op commando's d.m.v. gegevens uit de\n dictionaries met omschrijvingen\n\n bepaal tegelijkertijd of dit een standaard definitie is of een aangepaste\n\n en tenslotte: ombouwen naar een dictionary die in het csv bestand kan worden ingelezen\n "
for (key_in_context, definitions_dict) in self.definedkeys.items():
definitions_dict['standard'] =
if (self.defaults.get(key_in_context[:2]) == {definitions_dict['cmd']}):
definitions_dict['standard'] = 'S'
stdkeys_oms = self.stdkeys.get(key_in_context, )
if stdkeys_oms:
definitions_dict['desc'] = stdkeys_oms
definitions_dict['standard'] = 'S'
if (definitions_dict['cmd'] == 'cm_ExecuteToolbarItem'):
definitions_dict['standard'] = 'U'
itemid = definitions_dict['param'].split('=', 1)[1]
(oms, cmd, parm) = self.tbcmddict[itemid]
definitions_dict['desc'] = '{} ({} {})'.format(oms, cmd, parm)
else:
cmddict_oms = self.cmddict.get(definitions_dict['cmd'], None)
if (cmddict_oms is None):
self.unlisted_cmds.append(definitions_dict['cmd'])
cmddict_oms =
if (cmddict_oms and (not stdkeys_oms)):
definitions_dict['desc'] = cmddict_oms
elif (not cmddict_oms):
self.cmddict[definitions_dict['cmd']] = stdkeys_oms
elif (cmddict_oms and (cmddict_oms != stdkeys_oms)):
self.tobematched[key_in_context] = {'stdkeys_oms': stdkeys_oms, 'cmddict_oms': cmddict_oms}
definitions_dict['desc'] = cmddict_oms
self.shortcuts[key_in_context] = definitions_dict
for (stdkey, value) in self.stdkeys.items():
if (stdkey not in self.shortcuts):
self.shortcuts[stdkey] = {'cmd': , 'param': , 'ctrl': , 'standard': 'S', 'desc': value}<|docstring|>automatische mapping van bekende keycombo's op commando's d.m.v. gegevens uit de
dictionaries met omschrijvingen
bepaal tegelijkertijd of dit een standaard definitie is of een aangepaste
en tenslotte: ombouwen naar een dictionary die in het csv bestand kan worden ingelezen<|endoftext|> |
9caf15c8ca413a2f0abf279918c1b81da8b1c328096b647dd8ce899742dc6d48 | @click.group()
def storage():
'Manage an external storage.' | Manage an external storage. | renku/cli/storage.py | storage | cyberhck/renku-python | 0 | python | @click.group()
def storage():
| @click.group()
def storage():
<|docstring|>Manage an external storage.<|endoftext|> |
d21a914dd3fadd1c0cf409bd50685380d63d58b19f2375ab6fa79250ee2ab6ed | @storage.command()
@click.argument('paths', type=click.Path(exists=True, dir_okay=True), nargs=(- 1), required=True)
@pass_local_client
def pull(client, paths):
'Pull the specified paths from external storage.'
client.pull_paths_from_storage(*paths) | Pull the specified paths from external storage. | renku/cli/storage.py | pull | cyberhck/renku-python | 0 | python | @storage.command()
@click.argument('paths', type=click.Path(exists=True, dir_okay=True), nargs=(- 1), required=True)
@pass_local_client
def pull(client, paths):
client.pull_paths_from_storage(*paths) | @storage.command()
@click.argument('paths', type=click.Path(exists=True, dir_okay=True), nargs=(- 1), required=True)
@pass_local_client
def pull(client, paths):
client.pull_paths_from_storage(*paths)<|docstring|>Pull the specified paths from external storage.<|endoftext|> |
989cea9d4c6f7e6399f7b7560b6163243c65c8a32c9c4bd1ce141a5b7b931203 | def apply_selection(self, index, view, is_selected):
'Applies the selection to the view. This is called internally when\n a view is displayed and it needs to be shown as selected or as not\n selected.\n\n It is called when :meth:`select_node` or :meth:`deselect_node` is\n called or when a view needs to be refreshed. Its function is purely to\n update the view to reflect the selection state. So the function may be\n called multiple times even if the selection state may not have changed.\n\n If the view is a instance of\n :class:`~kivy.uix.recycleview.views.RecycleDataViewBehavior`, its\n :meth:`~kivy.uix.recycleview.views.RecycleDataViewBehavior.apply_selection` method will be called every time the view needs to refresh\n the selection state. Otherwise, the this method is responsible\n for applying the selection.\n\n :Parameters:\n\n `index`: int\n The index of the data item that is associated with the view.\n `view`: widget\n The widget that is the view of this data item.\n `is_selected`: bool\n Whether the item is selected.\n '
viewclass = view.__class__
if (viewclass not in _view_base_cache):
_view_base_cache[viewclass] = isinstance(view, RecycleDataViewBehavior)
if _view_base_cache[viewclass]:
view.apply_selection(self.recycleview, index, is_selected) | Applies the selection to the view. This is called internally when
a view is displayed and it needs to be shown as selected or as not
selected.
It is called when :meth:`select_node` or :meth:`deselect_node` is
called or when a view needs to be refreshed. Its function is purely to
update the view to reflect the selection state. So the function may be
called multiple times even if the selection state may not have changed.
If the view is a instance of
:class:`~kivy.uix.recycleview.views.RecycleDataViewBehavior`, its
:meth:`~kivy.uix.recycleview.views.RecycleDataViewBehavior.apply_selection` method will be called every time the view needs to refresh
the selection state. Otherwise, the this method is responsible
for applying the selection.
:Parameters:
`index`: int
The index of the data item that is associated with the view.
`view`: widget
The widget that is the view of this data item.
`is_selected`: bool
Whether the item is selected. | venv/Lib/site-packages/kivy/uix/recycleview/layout.py | apply_selection | michael-fourie/notebot | 13,889 | python | def apply_selection(self, index, view, is_selected):
'Applies the selection to the view. This is called internally when\n a view is displayed and it needs to be shown as selected or as not\n selected.\n\n It is called when :meth:`select_node` or :meth:`deselect_node` is\n called or when a view needs to be refreshed. Its function is purely to\n update the view to reflect the selection state. So the function may be\n called multiple times even if the selection state may not have changed.\n\n If the view is a instance of\n :class:`~kivy.uix.recycleview.views.RecycleDataViewBehavior`, its\n :meth:`~kivy.uix.recycleview.views.RecycleDataViewBehavior.apply_selection` method will be called every time the view needs to refresh\n the selection state. Otherwise, the this method is responsible\n for applying the selection.\n\n :Parameters:\n\n `index`: int\n The index of the data item that is associated with the view.\n `view`: widget\n The widget that is the view of this data item.\n `is_selected`: bool\n Whether the item is selected.\n '
viewclass = view.__class__
if (viewclass not in _view_base_cache):
_view_base_cache[viewclass] = isinstance(view, RecycleDataViewBehavior)
if _view_base_cache[viewclass]:
view.apply_selection(self.recycleview, index, is_selected) | def apply_selection(self, index, view, is_selected):
'Applies the selection to the view. This is called internally when\n a view is displayed and it needs to be shown as selected or as not\n selected.\n\n It is called when :meth:`select_node` or :meth:`deselect_node` is\n called or when a view needs to be refreshed. Its function is purely to\n update the view to reflect the selection state. So the function may be\n called multiple times even if the selection state may not have changed.\n\n If the view is a instance of\n :class:`~kivy.uix.recycleview.views.RecycleDataViewBehavior`, its\n :meth:`~kivy.uix.recycleview.views.RecycleDataViewBehavior.apply_selection` method will be called every time the view needs to refresh\n the selection state. Otherwise, the this method is responsible\n for applying the selection.\n\n :Parameters:\n\n `index`: int\n The index of the data item that is associated with the view.\n `view`: widget\n The widget that is the view of this data item.\n `is_selected`: bool\n Whether the item is selected.\n '
viewclass = view.__class__
if (viewclass not in _view_base_cache):
_view_base_cache[viewclass] = isinstance(view, RecycleDataViewBehavior)
if _view_base_cache[viewclass]:
view.apply_selection(self.recycleview, index, is_selected)<|docstring|>Applies the selection to the view. This is called internally when
a view is displayed and it needs to be shown as selected or as not
selected.
It is called when :meth:`select_node` or :meth:`deselect_node` is
called or when a view needs to be refreshed. Its function is purely to
update the view to reflect the selection state. So the function may be
called multiple times even if the selection state may not have changed.
If the view is a instance of
:class:`~kivy.uix.recycleview.views.RecycleDataViewBehavior`, its
:meth:`~kivy.uix.recycleview.views.RecycleDataViewBehavior.apply_selection` method will be called every time the view needs to refresh
the selection state. Otherwise, the this method is responsible
for applying the selection.
:Parameters:
`index`: int
The index of the data item that is associated with the view.
`view`: widget
The widget that is the view of this data item.
`is_selected`: bool
Whether the item is selected.<|endoftext|> |
665d1051c6a330f64f39b9fdf4569e94e76e5a0116e12aafc1218f853959f7a0 | def compute_visible_views(self, data, viewport):
'`viewport` is in coordinates of the layout manager.\n '
pass | `viewport` is in coordinates of the layout manager. | venv/Lib/site-packages/kivy/uix/recycleview/layout.py | compute_visible_views | michael-fourie/notebot | 13,889 | python | def compute_visible_views(self, data, viewport):
'\n '
pass | def compute_visible_views(self, data, viewport):
'\n '
pass<|docstring|>`viewport` is in coordinates of the layout manager.<|endoftext|> |
c179be26dd1d79c7a4956d97e846f4d1dc6ecde6bfba7caa4a8c7a09dab7f368 | def set_visible_views(self, indices, data, viewport):
'`viewport` is in coordinates of the layout manager.\n '
pass | `viewport` is in coordinates of the layout manager. | venv/Lib/site-packages/kivy/uix/recycleview/layout.py | set_visible_views | michael-fourie/notebot | 13,889 | python | def set_visible_views(self, indices, data, viewport):
'\n '
pass | def set_visible_views(self, indices, data, viewport):
'\n '
pass<|docstring|>`viewport` is in coordinates of the layout manager.<|endoftext|> |
0b3f8fd968ced70aa43aed0f9703a51589bbbb6dde465d96494ca92a923bdc10 | def refresh_view_layout(self, index, layout, view, viewport):
'`See :meth:`~kivy.uix.recycleview.views.RecycleDataAdapter.refresh_view_layout`.\n '
self.recycleview.view_adapter.refresh_view_layout(index, layout, view, viewport) | `See :meth:`~kivy.uix.recycleview.views.RecycleDataAdapter.refresh_view_layout`. | venv/Lib/site-packages/kivy/uix/recycleview/layout.py | refresh_view_layout | michael-fourie/notebot | 13,889 | python | def refresh_view_layout(self, index, layout, view, viewport):
'\n '
self.recycleview.view_adapter.refresh_view_layout(index, layout, view, viewport) | def refresh_view_layout(self, index, layout, view, viewport):
'\n '
self.recycleview.view_adapter.refresh_view_layout(index, layout, view, viewport)<|docstring|>`See :meth:`~kivy.uix.recycleview.views.RecycleDataAdapter.refresh_view_layout`.<|endoftext|> |
ee4fb98b0e6b37e2b3fbb662cba9384d761754320f8eeefcdd66ec5c66a17dbb | def get_view_index_at(self, pos):
'Return the view `index` on which position, `pos`, falls.\n\n `pos` is in coordinates of the layout manager.\n '
pass | Return the view `index` on which position, `pos`, falls.
`pos` is in coordinates of the layout manager. | venv/Lib/site-packages/kivy/uix/recycleview/layout.py | get_view_index_at | michael-fourie/notebot | 13,889 | python | def get_view_index_at(self, pos):
'Return the view `index` on which position, `pos`, falls.\n\n `pos` is in coordinates of the layout manager.\n '
pass | def get_view_index_at(self, pos):
'Return the view `index` on which position, `pos`, falls.\n\n `pos` is in coordinates of the layout manager.\n '
pass<|docstring|>Return the view `index` on which position, `pos`, falls.
`pos` is in coordinates of the layout manager.<|endoftext|> |
a817f4628c8b4a794ccfac80c2db648e780c8221965e31a26ff70f5a1d5b73bf | def goto_view(self, index):
'Moves the views so that the view corresponding to `index` is\n visible.\n '
pass | Moves the views so that the view corresponding to `index` is
visible. | venv/Lib/site-packages/kivy/uix/recycleview/layout.py | goto_view | michael-fourie/notebot | 13,889 | python | def goto_view(self, index):
'Moves the views so that the view corresponding to `index` is\n visible.\n '
pass | def goto_view(self, index):
'Moves the views so that the view corresponding to `index` is\n visible.\n '
pass<|docstring|>Moves the views so that the view corresponding to `index` is
visible.<|endoftext|> |
95a9b61235d830997f159e7d18874786943c33f460de8d402dd7910bf0cfafb6 | def __init__(self, tags, andor=SEARCH_OR):
'Initializes an EntryMatcherTag\n\n Arguments:\n andor: Determines if ALL conditions must be met or ANY condition must be\n met. Should be SEARCH_AND or SEARCH_OR\n tags: Set of tags to match (can be primary or secondary.)\n '
self.tags = tags
self.andor = andor | Initializes an EntryMatcherTag
Arguments:
andor: Determines if ALL conditions must be met or ANY condition must be
met. Should be SEARCH_AND or SEARCH_OR
tags: Set of tags to match (can be primary or secondary.) | lib/sjb/cs/classes.py | __init__ | sbryant1014/sjb_cheatsheet | 0 | python | def __init__(self, tags, andor=SEARCH_OR):
'Initializes an EntryMatcherTag\n\n Arguments:\n andor: Determines if ALL conditions must be met or ANY condition must be\n met. Should be SEARCH_AND or SEARCH_OR\n tags: Set of tags to match (can be primary or secondary.)\n '
self.tags = tags
self.andor = andor | def __init__(self, tags, andor=SEARCH_OR):
'Initializes an EntryMatcherTag\n\n Arguments:\n andor: Determines if ALL conditions must be met or ANY condition must be\n met. Should be SEARCH_AND or SEARCH_OR\n tags: Set of tags to match (can be primary or secondary.)\n '
self.tags = tags
self.andor = andor<|docstring|>Initializes an EntryMatcherTag
Arguments:
andor: Determines if ALL conditions must be met or ANY condition must be
met. Should be SEARCH_AND or SEARCH_OR
tags: Set of tags to match (can be primary or secondary.)<|endoftext|> |
d9a568e842015892b0c0f9db13d8ec0fceeb1b58de92b505cc71dd6789efacf1 | def matches(self, item):
'Returns true if the item has any/all of the specified tags.'
if (not super().matches(item)):
return False
if (not self.tags):
return True
if (self.andor is SEARCH_AND):
for tag in self.tags:
if ((tag != item.primary) and (tag not in item.tags)):
return False
return True
elif (self.andor is SEARCH_OR):
for tag in self.tags:
if ((tag == item.primary) or (tag in item.tags)):
return True
return False
else:
raise sjb.common.base.IllegalStateError('Entry.matches', ('invalid andor argument ' + str(self.andor))) | Returns true if the item has any/all of the specified tags. | lib/sjb/cs/classes.py | matches | sbryant1014/sjb_cheatsheet | 0 | python | def matches(self, item):
if (not super().matches(item)):
return False
if (not self.tags):
return True
if (self.andor is SEARCH_AND):
for tag in self.tags:
if ((tag != item.primary) and (tag not in item.tags)):
return False
return True
elif (self.andor is SEARCH_OR):
for tag in self.tags:
if ((tag == item.primary) or (tag in item.tags)):
return True
return False
else:
raise sjb.common.base.IllegalStateError('Entry.matches', ('invalid andor argument ' + str(self.andor))) | def matches(self, item):
if (not super().matches(item)):
return False
if (not self.tags):
return True
if (self.andor is SEARCH_AND):
for tag in self.tags:
if ((tag != item.primary) and (tag not in item.tags)):
return False
return True
elif (self.andor is SEARCH_OR):
for tag in self.tags:
if ((tag == item.primary) or (tag in item.tags)):
return True
return False
else:
raise sjb.common.base.IllegalStateError('Entry.matches', ('invalid andor argument ' + str(self.andor)))<|docstring|>Returns true if the item has any/all of the specified tags.<|endoftext|> |
a1f2a8b1b46e3ce642c7d77899937b3d5224cd87b06b105e4289eb62f44d3faf | def __eq__(self, other):
'Returns true if self and other have identical fields.'
if (not super().__eq__(other)):
return False
if (self.clue != other.clue):
return False
if (self.primary != other.primary):
return False
if (self.tags != other.tags):
return False
if (self.answer != other.answer):
return False
return True | Returns true if self and other have identical fields. | lib/sjb/cs/classes.py | __eq__ | sbryant1014/sjb_cheatsheet | 0 | python | def __eq__(self, other):
if (not super().__eq__(other)):
return False
if (self.clue != other.clue):
return False
if (self.primary != other.primary):
return False
if (self.tags != other.tags):
return False
if (self.answer != other.answer):
return False
return True | def __eq__(self, other):
if (not super().__eq__(other)):
return False
if (self.clue != other.clue):
return False
if (self.primary != other.primary):
return False
if (self.tags != other.tags):
return False
if (self.answer != other.answer):
return False
return True<|docstring|>Returns true if self and other have identical fields.<|endoftext|> |
9ecab620a82ae9151777eb3c0b8aa8b92e7aa4949c73cb16e82b4da3ed66dc6c | def validate(self):
'Validates that the values of this item are sensible.\n\n This method should be called twice: The first time at the end of the\n initialization code to make sure the user is not misusing the constructor.\n The second time should be before saving to a database to make sure that\n manipulations made to this item after initialization were valid.\n\n Raises:\n sjb.common.base.ValidationError: If validation fails\n '
super().validate()
if ((not self.clue) or (not isinstance(self.clue, str))):
raise sjb.common.base.ValidationError(('Bad entry clue: ' + str(self.clue)))
if ((not self.primary) or (not isinstance(self.primary, str))):
raise sjb.common.base.ValidationError(('Bad primary: ' + str(self.primary)))
if ((not self.answer) or (not isinstance(self.answer, str))):
raise sjb.common.base.ValidationError(('Bad answer: ' + str(self.answer)))
if (not isinstance(self.tags, set)):
raise sjb.common.base.ValidationError(('Bad tags: ' + str(self.tags)))
if ((self.oid is not None) and (not isinstance(self.oid, int))):
raise sjb.common.base.ValidationError(('Bad oid: ' + str(self.oid))) | Validates that the values of this item are sensible.
This method should be called twice: The first time at the end of the
initialization code to make sure the user is not misusing the constructor.
The second time should be before saving to a database to make sure that
manipulations made to this item after initialization were valid.
Raises:
sjb.common.base.ValidationError: If validation fails | lib/sjb/cs/classes.py | validate | sbryant1014/sjb_cheatsheet | 0 | python | def validate(self):
'Validates that the values of this item are sensible.\n\n This method should be called twice: The first time at the end of the\n initialization code to make sure the user is not misusing the constructor.\n The second time should be before saving to a database to make sure that\n manipulations made to this item after initialization were valid.\n\n Raises:\n sjb.common.base.ValidationError: If validation fails\n '
super().validate()
if ((not self.clue) or (not isinstance(self.clue, str))):
raise sjb.common.base.ValidationError(('Bad entry clue: ' + str(self.clue)))
if ((not self.primary) or (not isinstance(self.primary, str))):
raise sjb.common.base.ValidationError(('Bad primary: ' + str(self.primary)))
if ((not self.answer) or (not isinstance(self.answer, str))):
raise sjb.common.base.ValidationError(('Bad answer: ' + str(self.answer)))
if (not isinstance(self.tags, set)):
raise sjb.common.base.ValidationError(('Bad tags: ' + str(self.tags)))
if ((self.oid is not None) and (not isinstance(self.oid, int))):
raise sjb.common.base.ValidationError(('Bad oid: ' + str(self.oid))) | def validate(self):
'Validates that the values of this item are sensible.\n\n This method should be called twice: The first time at the end of the\n initialization code to make sure the user is not misusing the constructor.\n The second time should be before saving to a database to make sure that\n manipulations made to this item after initialization were valid.\n\n Raises:\n sjb.common.base.ValidationError: If validation fails\n '
super().validate()
if ((not self.clue) or (not isinstance(self.clue, str))):
raise sjb.common.base.ValidationError(('Bad entry clue: ' + str(self.clue)))
if ((not self.primary) or (not isinstance(self.primary, str))):
raise sjb.common.base.ValidationError(('Bad primary: ' + str(self.primary)))
if ((not self.answer) or (not isinstance(self.answer, str))):
raise sjb.common.base.ValidationError(('Bad answer: ' + str(self.answer)))
if (not isinstance(self.tags, set)):
raise sjb.common.base.ValidationError(('Bad tags: ' + str(self.tags)))
if ((self.oid is not None) and (not isinstance(self.oid, int))):
raise sjb.common.base.ValidationError(('Bad oid: ' + str(self.oid)))<|docstring|>Validates that the values of this item are sensible.
This method should be called twice: The first time at the end of the
initialization code to make sure the user is not misusing the constructor.
The second time should be before saving to a database to make sure that
manipulations made to this item after initialization were valid.
Raises:
sjb.common.base.ValidationError: If validation fails<|endoftext|> |
a4fd36a400d5bd6ff93bcb99699d65560d60812406ca5e5e057af013ef97a15c | @property
def tag_set(self):
'set(str): TODO: Set of tags in this CheatSheet.'
return self._tag_set | set(str): TODO: Set of tags in this CheatSheet. | lib/sjb/cs/classes.py | tag_set | sbryant1014/sjb_cheatsheet | 0 | python | @property
def tag_set(self):
return self._tag_set | @property
def tag_set(self):
return self._tag_set<|docstring|>set(str): TODO: Set of tags in this CheatSheet.<|endoftext|> |
b48b28ec9e5b7f9da6925c3ce2044985af51cb95457c683c896bfdacd1721d40 | @property
def primary_map(self):
'TODO: Dict mapping primary tags to lists of entries.'
return self._primary_map | TODO: Dict mapping primary tags to lists of entries. | lib/sjb/cs/classes.py | primary_map | sbryant1014/sjb_cheatsheet | 0 | python | @property
def primary_map(self):
return self._primary_map | @property
def primary_map(self):
return self._primary_map<|docstring|>TODO: Dict mapping primary tags to lists of entries.<|endoftext|> |
a4a042ef2a4b75b4acb30814a2f04f24714ec0f130eeb89ce894d0d39354b4e5 | def add_item(self, item, initial_load=False):
'Adds an entry to this cheatsheet.\n\n Args:\n item: The Entry object to add. It should only have an oid set if it was\n loaded from cheat sheet file.\n initial_load: Indicates that this entry object is from the cheat sheet\n file and is not a new addition to the cheat sheet.\n\n Raises:\n cheatsheet.base_classes.IllegalStateError: If initial_load is False but\n entry has an oid OR if initial_load is True but entry lacks an oid.\n '
super().add_item(item, initial_load=initial_load)
self._update_object_maps(item) | Adds an entry to this cheatsheet.
Args:
item: The Entry object to add. It should only have an oid set if it was
loaded from cheat sheet file.
initial_load: Indicates that this entry object is from the cheat sheet
file and is not a new addition to the cheat sheet.
Raises:
cheatsheet.base_classes.IllegalStateError: If initial_load is False but
entry has an oid OR if initial_load is True but entry lacks an oid. | lib/sjb/cs/classes.py | add_item | sbryant1014/sjb_cheatsheet | 0 | python | def add_item(self, item, initial_load=False):
'Adds an entry to this cheatsheet.\n\n Args:\n item: The Entry object to add. It should only have an oid set if it was\n loaded from cheat sheet file.\n initial_load: Indicates that this entry object is from the cheat sheet\n file and is not a new addition to the cheat sheet.\n\n Raises:\n cheatsheet.base_classes.IllegalStateError: If initial_load is False but\n entry has an oid OR if initial_load is True but entry lacks an oid.\n '
super().add_item(item, initial_load=initial_load)
self._update_object_maps(item) | def add_item(self, item, initial_load=False):
'Adds an entry to this cheatsheet.\n\n Args:\n item: The Entry object to add. It should only have an oid set if it was\n loaded from cheat sheet file.\n initial_load: Indicates that this entry object is from the cheat sheet\n file and is not a new addition to the cheat sheet.\n\n Raises:\n cheatsheet.base_classes.IllegalStateError: If initial_load is False but\n entry has an oid OR if initial_load is True but entry lacks an oid.\n '
super().add_item(item, initial_load=initial_load)
self._update_object_maps(item)<|docstring|>Adds an entry to this cheatsheet.
Args:
item: The Entry object to add. It should only have an oid set if it was
loaded from cheat sheet file.
initial_load: Indicates that this entry object is from the cheat sheet
file and is not a new addition to the cheat sheet.
Raises:
cheatsheet.base_classes.IllegalStateError: If initial_load is False but
entry has an oid OR if initial_load is True but entry lacks an oid.<|endoftext|> |
89390792f915f42c3a19c28c1bb63ccf14b7c26509e5d6c64a3ea4e736bac402 | def remove_item(self, oid):
'Removes the entry with the specified oid and updates meta data.\n\n Returns:\n Entry: The removed entry object.\n\n Raises:\n sjb.common.base.InvalidIDError: If no item has a matching oid.\n '
removed = super().remove_item(oid)
self._recompute_object_maps()
return removed | Removes the entry with the specified oid and updates meta data.
Returns:
Entry: The removed entry object.
Raises:
sjb.common.base.InvalidIDError: If no item has a matching oid. | lib/sjb/cs/classes.py | remove_item | sbryant1014/sjb_cheatsheet | 0 | python | def remove_item(self, oid):
'Removes the entry with the specified oid and updates meta data.\n\n Returns:\n Entry: The removed entry object.\n\n Raises:\n sjb.common.base.InvalidIDError: If no item has a matching oid.\n '
removed = super().remove_item(oid)
self._recompute_object_maps()
return removed | def remove_item(self, oid):
'Removes the entry with the specified oid and updates meta data.\n\n Returns:\n Entry: The removed entry object.\n\n Raises:\n sjb.common.base.InvalidIDError: If no item has a matching oid.\n '
removed = super().remove_item(oid)
self._recompute_object_maps()
return removed<|docstring|>Removes the entry with the specified oid and updates meta data.
Returns:
Entry: The removed entry object.
Raises:
sjb.common.base.InvalidIDError: If no item has a matching oid.<|endoftext|> |
1665634e1b60e987dc4ccc748ae543bd11d77ce835a16aafdce051e0e18bc821 | def update_item(self, oid, clue=None, answer=None, primary=None, tags=None):
'Updates entry given by oid and returns the result.\n\n Only arguments that are not None will be updated. If no entry is found at\n that oid, an Error is raised. The meta objects are updated to reflect the\n new contents of the entry.\n\n Returns:\n Entry: The newly updated entry object.\n\n Raises:\n sjb.common.base.InvalidIDError: If no item has a matching oid.\n '
item = self.get_item(oid)
original_item = copy.deepcopy(item)
item.primary = (primary if (primary is not None) else item.primary)
item.clue = (clue if (clue is not None) else item.clue)
item.answer = (answer if (answer is not None) else item.answer)
item.tags = (tags if (tags is not None) else item.tags)
if (original_item != item):
self._mark_modified()
self._recompute_object_maps()
return item | Updates entry given by oid and returns the result.
Only arguments that are not None will be updated. If no entry is found at
that oid, an Error is raised. The meta objects are updated to reflect the
new contents of the entry.
Returns:
Entry: The newly updated entry object.
Raises:
sjb.common.base.InvalidIDError: If no item has a matching oid. | lib/sjb/cs/classes.py | update_item | sbryant1014/sjb_cheatsheet | 0 | python | def update_item(self, oid, clue=None, answer=None, primary=None, tags=None):
'Updates entry given by oid and returns the result.\n\n Only arguments that are not None will be updated. If no entry is found at\n that oid, an Error is raised. The meta objects are updated to reflect the\n new contents of the entry.\n\n Returns:\n Entry: The newly updated entry object.\n\n Raises:\n sjb.common.base.InvalidIDError: If no item has a matching oid.\n '
item = self.get_item(oid)
original_item = copy.deepcopy(item)
item.primary = (primary if (primary is not None) else item.primary)
item.clue = (clue if (clue is not None) else item.clue)
item.answer = (answer if (answer is not None) else item.answer)
item.tags = (tags if (tags is not None) else item.tags)
if (original_item != item):
self._mark_modified()
self._recompute_object_maps()
return item | def update_item(self, oid, clue=None, answer=None, primary=None, tags=None):
'Updates entry given by oid and returns the result.\n\n Only arguments that are not None will be updated. If no entry is found at\n that oid, an Error is raised. The meta objects are updated to reflect the\n new contents of the entry.\n\n Returns:\n Entry: The newly updated entry object.\n\n Raises:\n sjb.common.base.InvalidIDError: If no item has a matching oid.\n '
item = self.get_item(oid)
original_item = copy.deepcopy(item)
item.primary = (primary if (primary is not None) else item.primary)
item.clue = (clue if (clue is not None) else item.clue)
item.answer = (answer if (answer is not None) else item.answer)
item.tags = (tags if (tags is not None) else item.tags)
if (original_item != item):
self._mark_modified()
self._recompute_object_maps()
return item<|docstring|>Updates entry given by oid and returns the result.
Only arguments that are not None will be updated. If no entry is found at
that oid, an Error is raised. The meta objects are updated to reflect the
new contents of the entry.
Returns:
Entry: The newly updated entry object.
Raises:
sjb.common.base.InvalidIDError: If no item has a matching oid.<|endoftext|> |
bc28f9a571880df30ea0ae4ecc5fbdebc587892fb78749c86690c2ce12261073 | def get_new_tags(self, primary, tags):
'Computes set of primary and tags that are not in database.\n\n Arguments:\n primary: str primary tag to check if present in primary set.\n tags: set(str) of tags to check if present in tag set.\n\n Returns:\n # TODO: of primary + all tags that are new to database.\n '
new_elts = (tags - self._tag_set)
if (primary not in self._primary_map):
new_elts.add(primary)
return new_elts | Computes set of primary and tags that are not in database.
Arguments:
primary: str primary tag to check if present in primary set.
tags: set(str) of tags to check if present in tag set.
Returns:
# TODO: of primary + all tags that are new to database. | lib/sjb/cs/classes.py | get_new_tags | sbryant1014/sjb_cheatsheet | 0 | python | def get_new_tags(self, primary, tags):
'Computes set of primary and tags that are not in database.\n\n Arguments:\n primary: str primary tag to check if present in primary set.\n tags: set(str) of tags to check if present in tag set.\n\n Returns:\n # TODO: of primary + all tags that are new to database.\n '
new_elts = (tags - self._tag_set)
if (primary not in self._primary_map):
new_elts.add(primary)
return new_elts | def get_new_tags(self, primary, tags):
'Computes set of primary and tags that are not in database.\n\n Arguments:\n primary: str primary tag to check if present in primary set.\n tags: set(str) of tags to check if present in tag set.\n\n Returns:\n # TODO: of primary + all tags that are new to database.\n '
new_elts = (tags - self._tag_set)
if (primary not in self._primary_map):
new_elts.add(primary)
return new_elts<|docstring|>Computes set of primary and tags that are not in database.
Arguments:
primary: str primary tag to check if present in primary set.
tags: set(str) of tags to check if present in tag set.
Returns:
# TODO: of primary + all tags that are new to database.<|endoftext|> |
e3e0a3ad982162159a82e08b4f6b299529ef1e451ddbbcf66347b10363870b81 | def _update_object_maps(self, item):
'Updates meta objects to reflect the contents of item.'
if (item.primary not in self._primary_map.keys()):
self._primary_map[item.primary] = []
self._primary_map[item.primary].append(item)
for tag in item.tags:
self._tag_set.add(tag)
self._tag_set.add(item.primary) | Updates meta objects to reflect the contents of item. | lib/sjb/cs/classes.py | _update_object_maps | sbryant1014/sjb_cheatsheet | 0 | python | def _update_object_maps(self, item):
if (item.primary not in self._primary_map.keys()):
self._primary_map[item.primary] = []
self._primary_map[item.primary].append(item)
for tag in item.tags:
self._tag_set.add(tag)
self._tag_set.add(item.primary) | def _update_object_maps(self, item):
if (item.primary not in self._primary_map.keys()):
self._primary_map[item.primary] = []
self._primary_map[item.primary].append(item)
for tag in item.tags:
self._tag_set.add(tag)
self._tag_set.add(item.primary)<|docstring|>Updates meta objects to reflect the contents of item.<|endoftext|> |
c7bf236964b8383af94556a8d747662babbfda1353e0bc4f0b12229da01ad962 | def _recompute_object_maps(self):
'Recomputes all meta object maps like tag_set, primary_to_entries, etc.\n\n This should be used after making a non-trivial change to the list like\n modifying an elements tags or removing an element.\n '
self._primary_map = {}
self._tag_set = set()
for item in self._items:
self._update_object_maps(item) | Recomputes all meta object maps like tag_set, primary_to_entries, etc.
This should be used after making a non-trivial change to the list like
modifying an elements tags or removing an element. | lib/sjb/cs/classes.py | _recompute_object_maps | sbryant1014/sjb_cheatsheet | 0 | python | def _recompute_object_maps(self):
'Recomputes all meta object maps like tag_set, primary_to_entries, etc.\n\n This should be used after making a non-trivial change to the list like\n modifying an elements tags or removing an element.\n '
self._primary_map = {}
self._tag_set = set()
for item in self._items:
self._update_object_maps(item) | def _recompute_object_maps(self):
'Recomputes all meta object maps like tag_set, primary_to_entries, etc.\n\n This should be used after making a non-trivial change to the list like\n modifying an elements tags or removing an element.\n '
self._primary_map = {}
self._tag_set = set()
for item in self._items:
self._update_object_maps(item)<|docstring|>Recomputes all meta object maps like tag_set, primary_to_entries, etc.
This should be used after making a non-trivial change to the list like
modifying an elements tags or removing an element.<|endoftext|> |
0d09477d15be51e18a935ef95749dd7dd3c31751dc55a30d6b3e6a1795540a54 | def __init__(self, dims=None, nda=None, shape=None, dtype=None, attrs={}):
" \n Parameters\n ----------\n dims : tuple of ints (optional)\n specifies the dimensions required for building the\n ArrayOfEqualSizedArrays' datatype attribute\n\n See Array.__init__ for optional args \n "
self.dims = dims
super().__init__(nda, shape, dtype, attrs) | Parameters
----------
dims : tuple of ints (optional)
specifies the dimensions required for building the
ArrayOfEqualSizedArrays' datatype attribute
See Array.__init__ for optional args | pygama/lh5/arrayofequalsizedarrays.py | __init__ | tcald/pygama | 13 | python | def __init__(self, dims=None, nda=None, shape=None, dtype=None, attrs={}):
" \n Parameters\n ----------\n dims : tuple of ints (optional)\n specifies the dimensions required for building the\n ArrayOfEqualSizedArrays' datatype attribute\n\n See Array.__init__ for optional args \n "
self.dims = dims
super().__init__(nda, shape, dtype, attrs) | def __init__(self, dims=None, nda=None, shape=None, dtype=None, attrs={}):
" \n Parameters\n ----------\n dims : tuple of ints (optional)\n specifies the dimensions required for building the\n ArrayOfEqualSizedArrays' datatype attribute\n\n See Array.__init__ for optional args \n "
self.dims = dims
super().__init__(nda, shape, dtype, attrs)<|docstring|>Parameters
----------
dims : tuple of ints (optional)
specifies the dimensions required for building the
ArrayOfEqualSizedArrays' datatype attribute
See Array.__init__ for optional args<|endoftext|> |
fbcd4278f2c40bbd5f4539e10a99e91130a2c17656e35cb4bb022118fdd212c7 | def dataype_name(self):
"The name for this object's lh5 datatype attribute"
return 'array_of_equalsized_arrays' | The name for this object's lh5 datatype attribute | pygama/lh5/arrayofequalsizedarrays.py | dataype_name | tcald/pygama | 13 | python | def dataype_name(self):
return 'array_of_equalsized_arrays' | def dataype_name(self):
return 'array_of_equalsized_arrays'<|docstring|>The name for this object's lh5 datatype attribute<|endoftext|> |
7d82bbe7f7ccc17ce53899e23c5b1220a0f702a182b1e350de82fabbe6dabb9f | def form_datatype(self):
"Return this object's lh5 datatype attribute string"
dt = self.dataype_name()
nD = str(len(self.nda.shape))
if (self.dims is not None):
nD = ','.join([str(i) for i in self.dims])
et = get_lh5_element_type(self)
return (((((dt + '<') + nD) + '>{') + et) + '}') | Return this object's lh5 datatype attribute string | pygama/lh5/arrayofequalsizedarrays.py | form_datatype | tcald/pygama | 13 | python | def form_datatype(self):
dt = self.dataype_name()
nD = str(len(self.nda.shape))
if (self.dims is not None):
nD = ','.join([str(i) for i in self.dims])
et = get_lh5_element_type(self)
return (((((dt + '<') + nD) + '>{') + et) + '}') | def form_datatype(self):
dt = self.dataype_name()
nD = str(len(self.nda.shape))
if (self.dims is not None):
nD = ','.join([str(i) for i in self.dims])
et = get_lh5_element_type(self)
return (((((dt + '<') + nD) + '>{') + et) + '}')<|docstring|>Return this object's lh5 datatype attribute string<|endoftext|> |
168cbbe83a419ea0145393bedfde52c0d4f888bc8cb636d37365492acd6ac6a8 | def __len__(self):
'Provides __len__ for this array-like class'
return len(self.nda) | Provides __len__ for this array-like class | pygama/lh5/arrayofequalsizedarrays.py | __len__ | tcald/pygama | 13 | python | def __len__(self):
return len(self.nda) | def __len__(self):
return len(self.nda)<|docstring|>Provides __len__ for this array-like class<|endoftext|> |
b48c795f47e056dfcead69025fbb6066e390f0a209a8a875cc35c5a27f389ba6 | def check_ncbi_for_prev_experiment(sample_name):
'Use a combination of NCBI E-Utils to detect past MiSeq experiments for this sample, so that new runs can be attached'
if ('SAMN' not in sample_name):
raise ValueError('{} not a valid NCBI BioSample ID.'.format(sample_name))
query = urllib.urlencode({'db': 'sra', 'term': sample_name, 'field': 'BSPL'})
r1 = xml.fromstring(urllib.urlopen('http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi', data=query).read())
for sra in [e.text for e in r1.findall('.//Id')]:
query = urllib.urlencode({'db': 'SRA', 'id': sra})
r2 = xml.fromstring(urllib.urlopen('http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi', data=query).read())
for sra_exp in r2.findall('.//EXPERIMENT'):
if ('Illumina MiSeq' in sra_exp.find('.//INSTRUMENT_MODEL').text):
return sra_exp.attrib['accession']
return False | Use a combination of NCBI E-Utils to detect past MiSeq experiments for this sample, so that new runs can be attached | sra_quick_submit_package/sra_quick_submit.py | check_ncbi_for_prev_experiment | GenomeTrakrUnofficial/sra-quick-subm | 2 | python | def check_ncbi_for_prev_experiment(sample_name):
if ('SAMN' not in sample_name):
raise ValueError('{} not a valid NCBI BioSample ID.'.format(sample_name))
query = urllib.urlencode({'db': 'sra', 'term': sample_name, 'field': 'BSPL'})
r1 = xml.fromstring(urllib.urlopen('http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi', data=query).read())
for sra in [e.text for e in r1.findall('.//Id')]:
query = urllib.urlencode({'db': 'SRA', 'id': sra})
r2 = xml.fromstring(urllib.urlopen('http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi', data=query).read())
for sra_exp in r2.findall('.//EXPERIMENT'):
if ('Illumina MiSeq' in sra_exp.find('.//INSTRUMENT_MODEL').text):
return sra_exp.attrib['accession']
return False | def check_ncbi_for_prev_experiment(sample_name):
if ('SAMN' not in sample_name):
raise ValueError('{} not a valid NCBI BioSample ID.'.format(sample_name))
query = urllib.urlencode({'db': 'sra', 'term': sample_name, 'field': 'BSPL'})
r1 = xml.fromstring(urllib.urlopen('http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi', data=query).read())
for sra in [e.text for e in r1.findall('.//Id')]:
query = urllib.urlencode({'db': 'SRA', 'id': sra})
r2 = xml.fromstring(urllib.urlopen('http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi', data=query).read())
for sra_exp in r2.findall('.//EXPERIMENT'):
if ('Illumina MiSeq' in sra_exp.find('.//INSTRUMENT_MODEL').text):
return sra_exp.attrib['accession']
return False<|docstring|>Use a combination of NCBI E-Utils to detect past MiSeq experiments for this sample, so that new runs can be attached<|endoftext|> |
6d092d3a0cb3f6e1cfe2339bf024fc6a5f10dbc651f835c212fe5ad68153bcca | def make_submission(path, entry, project, hold, merge=False, **kwargs):
'Produce three submission files - run, experiment, submission - and then tar them up.'
exp_acc = None
entry.update(kwargs)
if (not exists(path)):
os.makedirs(path)
print('Made {}.'.format(path))
if (merge and ((entry['Sample Name'] in merge.split(' ')) or ('all' in merge))):
entry['num'] = ''
if (entry['Sample Name'] in sample_cache):
r = xml.fromstring(run.format(**entry))
run_set = sample_cache[entry['Sample Name']]
run_set.append(r)
else:
sample_cache[entry['Sample Name']] = run_set = xml.Element('RUN_SET')
run_set.append(xml.fromstring(run.format(**entry)))
try:
exp_acc = check_ncbi_for_prev_experiment(entry['Biosample Accession'])
except ValueError:
print(locals())
if exp_acc:
print('Found a previous experiment: {}'.format(exp_acc))
sub = xml.fromstring(submission_no_exp.format(date=hold, **entry))
for ref in run_set.findall('.//EXPERIMENT_REF'):
ref.attrib['accession'] = exp_acc
else:
print('No experiment found. Creating a new one...')
exp = xml.fromstring(experiment.format(project=project, **entry))
xml.ElementTree(exp).write(join(path, '{Sample Name}.experiment.xml'.format(**entry)), encoding='UTF-8', xml_declaration=True)
sub = xml.fromstring(submission.format(date=hold, **entry))
xml.ElementTree(run_set).write(join(path, '{Sample Name}.run.xml'.format(**entry)), encoding='UTF-8', xml_declaration=True)
xml.ElementTree(sub).write(join(path, '{Sample Name}.submission.xml'.format(**entry)), encoding='UTF-8', xml_declaration=True)
with tarfile.open(join(path, '{Sample Name}.submission_archive.tar'.format(**entry)), 'w', format=tarfile.GNU_FORMAT) as tarball:
tarball.add(join(path, '{Sample Name}.run.xml'.format(**entry)), arcname='{Sample Name}.run.xml'.format(**entry))
tarball.add(join(path, '{Sample Name}.submission.xml'.format(**entry)), arcname='{Sample Name}.submission.xml'.format(**entry))
try:
tarball.add(join(path, '{Sample Name}.experiment.xml'.format(**entry)), arcname='{Sample Name}.experiment.xml'.format(**entry))
print('Produced run, experiment, and submission files for {}'.format(entry['Sample Name']))
except (OSError, IOError):
print('Produced run and submission files for {}'.format(entry['Sample Name']))
else:
if (entry['Sample Name'] in sample_names):
entry['num'] = '.{:02}'.format((sample_names.count(entry['Sample Name']) + 1))
else:
entry['num'] = ''
try:
exp = xml.fromstring(experiment.format(project=project, **entry))
xml.ElementTree(exp).write(join(path, '{Sample Name}{num}.experiment.xml'.format(**entry)), encoding='UTF-8', xml_declaration=True)
r = xml.fromstring(run.format(**entry))
r_set = xml.Element('RUN_SET')
r_set.append(r)
xml.ElementTree(r).write(join(path, '{Sample Name}{num}.run.xml'.format(**entry)), encoding='UTF-8', xml_declaration=True)
sub = xml.fromstring(submission.format(date=hold, **entry))
xml.ElementTree(sub).write(join(path, '{Sample Name}{num}.submission.xml'.format(**entry)), encoding='UTF-8', xml_declaration=True)
except xml.ParseError as e:
print('XML internal parsing failed. Tried to parse:')
print(experiment.format(project=project, **entry))
print(run.format(**entry))
print(submission.format(date=date, **entry))
raise e
tarball = tarfile.open(join(path, '{Sample Name}{num}.submission_archive.tar'.format(**entry)), 'w', format=tarfile.GNU_FORMAT)
tarball.add(join(path, '{Sample Name}{num}.experiment.xml'.format(**entry)), arcname='{Sample Name}{num}.experiment.xml'.format(**entry))
tarball.add(join(path, '{Sample Name}{num}.run.xml'.format(**entry)), arcname='{Sample Name}{num}.run.xml'.format(**entry))
tarball.add(join(path, '{Sample Name}{num}.submission.xml'.format(**entry)), arcname='{Sample Name}{num}.submission.xml'.format(**entry))
tarball.close()
sample_names.append(entry['Sample Name']) | Produce three submission files - run, experiment, submission - and then tar them up. | sra_quick_submit_package/sra_quick_submit.py | make_submission | GenomeTrakrUnofficial/sra-quick-subm | 2 | python | def make_submission(path, entry, project, hold, merge=False, **kwargs):
exp_acc = None
entry.update(kwargs)
if (not exists(path)):
os.makedirs(path)
print('Made {}.'.format(path))
if (merge and ((entry['Sample Name'] in merge.split(' ')) or ('all' in merge))):
entry['num'] =
if (entry['Sample Name'] in sample_cache):
r = xml.fromstring(run.format(**entry))
run_set = sample_cache[entry['Sample Name']]
run_set.append(r)
else:
sample_cache[entry['Sample Name']] = run_set = xml.Element('RUN_SET')
run_set.append(xml.fromstring(run.format(**entry)))
try:
exp_acc = check_ncbi_for_prev_experiment(entry['Biosample Accession'])
except ValueError:
print(locals())
if exp_acc:
print('Found a previous experiment: {}'.format(exp_acc))
sub = xml.fromstring(submission_no_exp.format(date=hold, **entry))
for ref in run_set.findall('.//EXPERIMENT_REF'):
ref.attrib['accession'] = exp_acc
else:
print('No experiment found. Creating a new one...')
exp = xml.fromstring(experiment.format(project=project, **entry))
xml.ElementTree(exp).write(join(path, '{Sample Name}.experiment.xml'.format(**entry)), encoding='UTF-8', xml_declaration=True)
sub = xml.fromstring(submission.format(date=hold, **entry))
xml.ElementTree(run_set).write(join(path, '{Sample Name}.run.xml'.format(**entry)), encoding='UTF-8', xml_declaration=True)
xml.ElementTree(sub).write(join(path, '{Sample Name}.submission.xml'.format(**entry)), encoding='UTF-8', xml_declaration=True)
with tarfile.open(join(path, '{Sample Name}.submission_archive.tar'.format(**entry)), 'w', format=tarfile.GNU_FORMAT) as tarball:
tarball.add(join(path, '{Sample Name}.run.xml'.format(**entry)), arcname='{Sample Name}.run.xml'.format(**entry))
tarball.add(join(path, '{Sample Name}.submission.xml'.format(**entry)), arcname='{Sample Name}.submission.xml'.format(**entry))
try:
tarball.add(join(path, '{Sample Name}.experiment.xml'.format(**entry)), arcname='{Sample Name}.experiment.xml'.format(**entry))
print('Produced run, experiment, and submission files for {}'.format(entry['Sample Name']))
except (OSError, IOError):
print('Produced run and submission files for {}'.format(entry['Sample Name']))
else:
if (entry['Sample Name'] in sample_names):
entry['num'] = '.{:02}'.format((sample_names.count(entry['Sample Name']) + 1))
else:
entry['num'] =
try:
exp = xml.fromstring(experiment.format(project=project, **entry))
xml.ElementTree(exp).write(join(path, '{Sample Name}{num}.experiment.xml'.format(**entry)), encoding='UTF-8', xml_declaration=True)
r = xml.fromstring(run.format(**entry))
r_set = xml.Element('RUN_SET')
r_set.append(r)
xml.ElementTree(r).write(join(path, '{Sample Name}{num}.run.xml'.format(**entry)), encoding='UTF-8', xml_declaration=True)
sub = xml.fromstring(submission.format(date=hold, **entry))
xml.ElementTree(sub).write(join(path, '{Sample Name}{num}.submission.xml'.format(**entry)), encoding='UTF-8', xml_declaration=True)
except xml.ParseError as e:
print('XML internal parsing failed. Tried to parse:')
print(experiment.format(project=project, **entry))
print(run.format(**entry))
print(submission.format(date=date, **entry))
raise e
tarball = tarfile.open(join(path, '{Sample Name}{num}.submission_archive.tar'.format(**entry)), 'w', format=tarfile.GNU_FORMAT)
tarball.add(join(path, '{Sample Name}{num}.experiment.xml'.format(**entry)), arcname='{Sample Name}{num}.experiment.xml'.format(**entry))
tarball.add(join(path, '{Sample Name}{num}.run.xml'.format(**entry)), arcname='{Sample Name}{num}.run.xml'.format(**entry))
tarball.add(join(path, '{Sample Name}{num}.submission.xml'.format(**entry)), arcname='{Sample Name}{num}.submission.xml'.format(**entry))
tarball.close()
sample_names.append(entry['Sample Name']) | def make_submission(path, entry, project, hold, merge=False, **kwargs):
exp_acc = None
entry.update(kwargs)
if (not exists(path)):
os.makedirs(path)
print('Made {}.'.format(path))
if (merge and ((entry['Sample Name'] in merge.split(' ')) or ('all' in merge))):
entry['num'] =
if (entry['Sample Name'] in sample_cache):
r = xml.fromstring(run.format(**entry))
run_set = sample_cache[entry['Sample Name']]
run_set.append(r)
else:
sample_cache[entry['Sample Name']] = run_set = xml.Element('RUN_SET')
run_set.append(xml.fromstring(run.format(**entry)))
try:
exp_acc = check_ncbi_for_prev_experiment(entry['Biosample Accession'])
except ValueError:
print(locals())
if exp_acc:
print('Found a previous experiment: {}'.format(exp_acc))
sub = xml.fromstring(submission_no_exp.format(date=hold, **entry))
for ref in run_set.findall('.//EXPERIMENT_REF'):
ref.attrib['accession'] = exp_acc
else:
print('No experiment found. Creating a new one...')
exp = xml.fromstring(experiment.format(project=project, **entry))
xml.ElementTree(exp).write(join(path, '{Sample Name}.experiment.xml'.format(**entry)), encoding='UTF-8', xml_declaration=True)
sub = xml.fromstring(submission.format(date=hold, **entry))
xml.ElementTree(run_set).write(join(path, '{Sample Name}.run.xml'.format(**entry)), encoding='UTF-8', xml_declaration=True)
xml.ElementTree(sub).write(join(path, '{Sample Name}.submission.xml'.format(**entry)), encoding='UTF-8', xml_declaration=True)
with tarfile.open(join(path, '{Sample Name}.submission_archive.tar'.format(**entry)), 'w', format=tarfile.GNU_FORMAT) as tarball:
tarball.add(join(path, '{Sample Name}.run.xml'.format(**entry)), arcname='{Sample Name}.run.xml'.format(**entry))
tarball.add(join(path, '{Sample Name}.submission.xml'.format(**entry)), arcname='{Sample Name}.submission.xml'.format(**entry))
try:
tarball.add(join(path, '{Sample Name}.experiment.xml'.format(**entry)), arcname='{Sample Name}.experiment.xml'.format(**entry))
print('Produced run, experiment, and submission files for {}'.format(entry['Sample Name']))
except (OSError, IOError):
print('Produced run and submission files for {}'.format(entry['Sample Name']))
else:
if (entry['Sample Name'] in sample_names):
entry['num'] = '.{:02}'.format((sample_names.count(entry['Sample Name']) + 1))
else:
entry['num'] =
try:
exp = xml.fromstring(experiment.format(project=project, **entry))
xml.ElementTree(exp).write(join(path, '{Sample Name}{num}.experiment.xml'.format(**entry)), encoding='UTF-8', xml_declaration=True)
r = xml.fromstring(run.format(**entry))
r_set = xml.Element('RUN_SET')
r_set.append(r)
xml.ElementTree(r).write(join(path, '{Sample Name}{num}.run.xml'.format(**entry)), encoding='UTF-8', xml_declaration=True)
sub = xml.fromstring(submission.format(date=hold, **entry))
xml.ElementTree(sub).write(join(path, '{Sample Name}{num}.submission.xml'.format(**entry)), encoding='UTF-8', xml_declaration=True)
except xml.ParseError as e:
print('XML internal parsing failed. Tried to parse:')
print(experiment.format(project=project, **entry))
print(run.format(**entry))
print(submission.format(date=date, **entry))
raise e
tarball = tarfile.open(join(path, '{Sample Name}{num}.submission_archive.tar'.format(**entry)), 'w', format=tarfile.GNU_FORMAT)
tarball.add(join(path, '{Sample Name}{num}.experiment.xml'.format(**entry)), arcname='{Sample Name}{num}.experiment.xml'.format(**entry))
tarball.add(join(path, '{Sample Name}{num}.run.xml'.format(**entry)), arcname='{Sample Name}{num}.run.xml'.format(**entry))
tarball.add(join(path, '{Sample Name}{num}.submission.xml'.format(**entry)), arcname='{Sample Name}{num}.submission.xml'.format(**entry))
tarball.close()
sample_names.append(entry['Sample Name'])<|docstring|>Produce three submission files - run, experiment, submission - and then tar them up.<|endoftext|> |
05dd146abb49474802ad2d6383792a4034733fb6cb0ebfc3c72bdeb162a9ef3c | def get_desmond_energy_from_file(energy_file):
'Parses the desmond energy file. '
with open(energy_file, 'r') as f:
data = []
types = []
line = f.readline()
if line.startswith('time=0.000000'):
terms = line.split()
terms = terms[1:(- 2)]
for term in terms:
(key, value) = term.split('=')
types.append(key)
data.append(float(value))
for line in f:
if ('(0.000000)' in line):
words = line.split()
if (words[(- 1)] == 'total'):
continue
key = words[0]
if key:
types.append(key)
data.append(words[(- 1)])
data = [(float(value) * units.kilocalories_per_mole) for value in data]
e_out = OrderedDict(zip(types, data))
return e_out | Parses the desmond energy file. | intermol/desmond/__init__.py | get_desmond_energy_from_file | JoshuaSBrown/InterMol | 109 | python | def get_desmond_energy_from_file(energy_file):
' '
with open(energy_file, 'r') as f:
data = []
types = []
line = f.readline()
if line.startswith('time=0.000000'):
terms = line.split()
terms = terms[1:(- 2)]
for term in terms:
(key, value) = term.split('=')
types.append(key)
data.append(float(value))
for line in f:
if ('(0.000000)' in line):
words = line.split()
if (words[(- 1)] == 'total'):
continue
key = words[0]
if key:
types.append(key)
data.append(words[(- 1)])
data = [(float(value) * units.kilocalories_per_mole) for value in data]
e_out = OrderedDict(zip(types, data))
return e_out | def get_desmond_energy_from_file(energy_file):
' '
with open(energy_file, 'r') as f:
data = []
types = []
line = f.readline()
if line.startswith('time=0.000000'):
terms = line.split()
terms = terms[1:(- 2)]
for term in terms:
(key, value) = term.split('=')
types.append(key)
data.append(float(value))
for line in f:
if ('(0.000000)' in line):
words = line.split()
if (words[(- 1)] == 'total'):
continue
key = words[0]
if key:
types.append(key)
data.append(words[(- 1)])
data = [(float(value) * units.kilocalories_per_mole) for value in data]
e_out = OrderedDict(zip(types, data))
return e_out<|docstring|>Parses the desmond energy file.<|endoftext|> |
cf8ad26c9fd45e59070823e11e8d985b57293a547642ba4efe36e2bce6b39566 | def energies(cms, cfg, des_path):
'Evalutes energies of DESMOND files\n\n Args:\n cms (str): Path to .cms file.\n cfg (str): Path to .cfg file.\n des_path (str): Path to DESMOND binaries.\n Returns:\n tot_energy:\n energy_file:\n\n '
logger.info('Evaluating energy of {0}'.format(cms))
cms = os.path.abspath(cms)
cfg = os.path.abspath(cfg)
(direc, cms_filename) = os.path.split(cms)
cwd = os.getcwd()
name = os.path.splitext(cms_filename)[0]
energy_file = ('%s/%s.enegrp.dat' % (direc, name))
if (des_path and (not (des_path == ''))):
desmond_bin = os.path.join(des_path, 'desmond')
elif os.environ.get('SCHRODINGER'):
desmond_bin = os.path.join(os.environ.get('SCHRODINGER'), 'desmond')
else:
raise Exception('Desmond binary not found')
os.chdir(direc)
if os.path.exists('trj'):
shutil.rmtree('trj')
cmd = [desmond_bin, '-WAIT', '-P', '1', '-in', cms, '-JOBNAME', name, '-c', cfg]
logger.debug(('Running DESMOND with command:\n %s' % ' '.join(cmd)))
with open('desmond_stdout.txt', 'w') as out, open('desmond_stderr.txt', 'w') as err:
exit = subprocess.call(cmd, stdout=out, stderr=err)
if exit:
logger.error(('Energy evaluation failed. See %s/desmond_stderr.txt' % direc))
os.chdir(cwd)
raise Exception('Energy evaluation failed for {0}'.format(cms))
tot_energy = get_desmond_energy_from_file(energy_file)
outcms = ((cms[:(- 4)] + '-out') + cms[(- 4):])
os.remove(outcms)
os.chdir(cwd)
return (tot_energy, energy_file) | Evalutes energies of DESMOND files
Args:
cms (str): Path to .cms file.
cfg (str): Path to .cfg file.
des_path (str): Path to DESMOND binaries.
Returns:
tot_energy:
energy_file: | intermol/desmond/__init__.py | energies | JoshuaSBrown/InterMol | 109 | python | def energies(cms, cfg, des_path):
'Evalutes energies of DESMOND files\n\n Args:\n cms (str): Path to .cms file.\n cfg (str): Path to .cfg file.\n des_path (str): Path to DESMOND binaries.\n Returns:\n tot_energy:\n energy_file:\n\n '
logger.info('Evaluating energy of {0}'.format(cms))
cms = os.path.abspath(cms)
cfg = os.path.abspath(cfg)
(direc, cms_filename) = os.path.split(cms)
cwd = os.getcwd()
name = os.path.splitext(cms_filename)[0]
energy_file = ('%s/%s.enegrp.dat' % (direc, name))
if (des_path and (not (des_path == ))):
desmond_bin = os.path.join(des_path, 'desmond')
elif os.environ.get('SCHRODINGER'):
desmond_bin = os.path.join(os.environ.get('SCHRODINGER'), 'desmond')
else:
raise Exception('Desmond binary not found')
os.chdir(direc)
if os.path.exists('trj'):
shutil.rmtree('trj')
cmd = [desmond_bin, '-WAIT', '-P', '1', '-in', cms, '-JOBNAME', name, '-c', cfg]
logger.debug(('Running DESMOND with command:\n %s' % ' '.join(cmd)))
with open('desmond_stdout.txt', 'w') as out, open('desmond_stderr.txt', 'w') as err:
exit = subprocess.call(cmd, stdout=out, stderr=err)
if exit:
logger.error(('Energy evaluation failed. See %s/desmond_stderr.txt' % direc))
os.chdir(cwd)
raise Exception('Energy evaluation failed for {0}'.format(cms))
tot_energy = get_desmond_energy_from_file(energy_file)
outcms = ((cms[:(- 4)] + '-out') + cms[(- 4):])
os.remove(outcms)
os.chdir(cwd)
return (tot_energy, energy_file) | def energies(cms, cfg, des_path):
'Evalutes energies of DESMOND files\n\n Args:\n cms (str): Path to .cms file.\n cfg (str): Path to .cfg file.\n des_path (str): Path to DESMOND binaries.\n Returns:\n tot_energy:\n energy_file:\n\n '
logger.info('Evaluating energy of {0}'.format(cms))
cms = os.path.abspath(cms)
cfg = os.path.abspath(cfg)
(direc, cms_filename) = os.path.split(cms)
cwd = os.getcwd()
name = os.path.splitext(cms_filename)[0]
energy_file = ('%s/%s.enegrp.dat' % (direc, name))
if (des_path and (not (des_path == ))):
desmond_bin = os.path.join(des_path, 'desmond')
elif os.environ.get('SCHRODINGER'):
desmond_bin = os.path.join(os.environ.get('SCHRODINGER'), 'desmond')
else:
raise Exception('Desmond binary not found')
os.chdir(direc)
if os.path.exists('trj'):
shutil.rmtree('trj')
cmd = [desmond_bin, '-WAIT', '-P', '1', '-in', cms, '-JOBNAME', name, '-c', cfg]
logger.debug(('Running DESMOND with command:\n %s' % ' '.join(cmd)))
with open('desmond_stdout.txt', 'w') as out, open('desmond_stderr.txt', 'w') as err:
exit = subprocess.call(cmd, stdout=out, stderr=err)
if exit:
logger.error(('Energy evaluation failed. See %s/desmond_stderr.txt' % direc))
os.chdir(cwd)
raise Exception('Energy evaluation failed for {0}'.format(cms))
tot_energy = get_desmond_energy_from_file(energy_file)
outcms = ((cms[:(- 4)] + '-out') + cms[(- 4):])
os.remove(outcms)
os.chdir(cwd)
return (tot_energy, energy_file)<|docstring|>Evalutes energies of DESMOND files
Args:
cms (str): Path to .cms file.
cfg (str): Path to .cfg file.
des_path (str): Path to DESMOND binaries.
Returns:
tot_energy:
energy_file:<|endoftext|> |
199f3fdd3e0d53ac3856ce76b53f784d082602826d0906c5dde21f4dd45818e5 | def as_email():
'Return the post as an email message\n\n:returns: The post as an instance of the standard core email message class\n:rtype: :class:`email.message.Message`' | Return the post as an email message
:returns: The post as an instance of the standard core email message class
:rtype: :class:`email.message.Message` | gs/group/list/email/base/interfaces.py | as_email | groupserver/gs.group.list.email.base | 0 | python | def as_email():
'Return the post as an email message\n\n:returns: The post as an instance of the standard core email message class\n:rtype: :class:`email.message.Message`' | def as_email():
'Return the post as an email message\n\n:returns: The post as an instance of the standard core email message class\n:rtype: :class:`email.message.Message`'<|docstring|>Return the post as an email message
:returns: The post as an instance of the standard core email message class
:rtype: :class:`email.message.Message`<|endoftext|> |
0d19283df45ea3dfb180e19d84239e1b33e7624e835ef7c99a530a10b4855265 | def __init__(self, n_epochs=1, verbosity=2, cost_freq=2, init_delta=0.1, eta_plus=1.2, eta_minus=0.5, delta_min=1e-06, delta_max=50.0, manhattan=True, n_threads=1, chunk_size=100):
'\n rprop\n '
super().__init__()
self.n_epochs = n_epochs
self.verbosity = verbosity
self.cost_freq = cost_freq
self.eta_plus = eta_plus
self.eta_minus = eta_minus
self.init_delta = init_delta
self.delta_max = delta_max
self.delta_min = delta_min
self.prev_dc_db = None
self.prev_dc_dq = None
self.prev_dc_dr = None
self.prev_dc_dt = None
self.dc_db = None
self.dc_dq = None
self.dc_dr = None
self.dc_dt = None
self.delta_b = None
self.delta_q = None
self.delta_rx = None
self.delta_ry = None
self.delta_rz = None
self.delta_t = None
self.manhattan = manhattan
self.n_threads = n_threads
self.chunk_size = chunk_size
self.pool = None | rprop | src/calrissian/optimizers/particle_rprop.py | __init__ | awlange/brainsparks | 3 | python | def __init__(self, n_epochs=1, verbosity=2, cost_freq=2, init_delta=0.1, eta_plus=1.2, eta_minus=0.5, delta_min=1e-06, delta_max=50.0, manhattan=True, n_threads=1, chunk_size=100):
'\n \n '
super().__init__()
self.n_epochs = n_epochs
self.verbosity = verbosity
self.cost_freq = cost_freq
self.eta_plus = eta_plus
self.eta_minus = eta_minus
self.init_delta = init_delta
self.delta_max = delta_max
self.delta_min = delta_min
self.prev_dc_db = None
self.prev_dc_dq = None
self.prev_dc_dr = None
self.prev_dc_dt = None
self.dc_db = None
self.dc_dq = None
self.dc_dr = None
self.dc_dt = None
self.delta_b = None
self.delta_q = None
self.delta_rx = None
self.delta_ry = None
self.delta_rz = None
self.delta_t = None
self.manhattan = manhattan
self.n_threads = n_threads
self.chunk_size = chunk_size
self.pool = None | def __init__(self, n_epochs=1, verbosity=2, cost_freq=2, init_delta=0.1, eta_plus=1.2, eta_minus=0.5, delta_min=1e-06, delta_max=50.0, manhattan=True, n_threads=1, chunk_size=100):
'\n \n '
super().__init__()
self.n_epochs = n_epochs
self.verbosity = verbosity
self.cost_freq = cost_freq
self.eta_plus = eta_plus
self.eta_minus = eta_minus
self.init_delta = init_delta
self.delta_max = delta_max
self.delta_min = delta_min
self.prev_dc_db = None
self.prev_dc_dq = None
self.prev_dc_dr = None
self.prev_dc_dt = None
self.dc_db = None
self.dc_dq = None
self.dc_dr = None
self.dc_dt = None
self.delta_b = None
self.delta_q = None
self.delta_rx = None
self.delta_ry = None
self.delta_rz = None
self.delta_t = None
self.manhattan = manhattan
self.n_threads = n_threads
self.chunk_size = chunk_size
self.pool = None<|docstring|>rprop<|endoftext|> |
538a6758e4f45e8d43817a2d929684e32af7cabfba94de32bec2ee28ec79ac77 | def optimize(self, network, data_X, data_Y):
'\n :return: optimized network\n '
optimize_start_time = time.time()
if (self.verbosity > 0):
c = network.cost(data_X, data_Y)
print('Cost before epochs: {}'.format(c))
for epoch in range(self.n_epochs):
epoch_start_time = time.time()
if (self.n_threads > 1):
self.cost_gradient_parallel(network, data_X, data_Y)
else:
(self.dc_db, self.dc_dq, self.dc_dr, self.dc_dt) = network.cost_gradient(data_X, data_Y)
self.weight_update(network)
if (self.verbosity > 0):
c = network.cost(data_X, data_Y)
print('Cost after epoch {}: {:g}'.format(epoch, c))
print('Epoch time: {:g} s'.format((time.time() - epoch_start_time)))
if (self.verbosity > 0):
c = network.cost(data_X, data_Y)
print('\n\nCost after optimize run: {:g}'.format(c))
print('Optimize run time: {:g} s'.format((time.time() - optimize_start_time)))
return network | :return: optimized network | src/calrissian/optimizers/particle_rprop.py | optimize | awlange/brainsparks | 3 | python | def optimize(self, network, data_X, data_Y):
'\n \n '
optimize_start_time = time.time()
if (self.verbosity > 0):
c = network.cost(data_X, data_Y)
print('Cost before epochs: {}'.format(c))
for epoch in range(self.n_epochs):
epoch_start_time = time.time()
if (self.n_threads > 1):
self.cost_gradient_parallel(network, data_X, data_Y)
else:
(self.dc_db, self.dc_dq, self.dc_dr, self.dc_dt) = network.cost_gradient(data_X, data_Y)
self.weight_update(network)
if (self.verbosity > 0):
c = network.cost(data_X, data_Y)
print('Cost after epoch {}: {:g}'.format(epoch, c))
print('Epoch time: {:g} s'.format((time.time() - epoch_start_time)))
if (self.verbosity > 0):
c = network.cost(data_X, data_Y)
print('\n\nCost after optimize run: {:g}'.format(c))
print('Optimize run time: {:g} s'.format((time.time() - optimize_start_time)))
return network | def optimize(self, network, data_X, data_Y):
'\n \n '
optimize_start_time = time.time()
if (self.verbosity > 0):
c = network.cost(data_X, data_Y)
print('Cost before epochs: {}'.format(c))
for epoch in range(self.n_epochs):
epoch_start_time = time.time()
if (self.n_threads > 1):
self.cost_gradient_parallel(network, data_X, data_Y)
else:
(self.dc_db, self.dc_dq, self.dc_dr, self.dc_dt) = network.cost_gradient(data_X, data_Y)
self.weight_update(network)
if (self.verbosity > 0):
c = network.cost(data_X, data_Y)
print('Cost after epoch {}: {:g}'.format(epoch, c))
print('Epoch time: {:g} s'.format((time.time() - epoch_start_time)))
if (self.verbosity > 0):
c = network.cost(data_X, data_Y)
print('\n\nCost after optimize run: {:g}'.format(c))
print('Optimize run time: {:g} s'.format((time.time() - optimize_start_time)))
return network<|docstring|>:return: optimized network<|endoftext|> |
5012eff1bced6a98ac2699edb78bc24be53f344f0739c92b9aed0af025fe7827 | def weight_update(self, network):
'\n Update weights and biases according to RPROP\n\n TODO: Oof... this code needs to be improved!\n '
if ((self.delta_b is None) or (self.delta_q is None) or (self.delta_rx is None) or (self.delta_ry is None) or (self.delta_rz is None)):
id = self.init_delta
self.delta_b = []
self.delta_q = []
self.delta_rx = [np.random.uniform(0, id, network.particle_input.output_size)]
self.delta_ry = [np.random.uniform(0, id, network.particle_input.output_size)]
self.delta_rz = [np.random.uniform(0, id, network.particle_input.output_size)]
self.delta_t = [np.random.uniform(0, id, network.particle_input.output_size)]
self.prev_dc_db = []
self.prev_dc_dq = []
self.prev_dc_dr = [[np.zeros(network.particle_input.output_size)] for _ in range(3)]
self.prev_dc_dt = [np.zeros(network.particle_input.output_size)]
for (l, layer) in enumerate(network.layers):
self.delta_b.append(np.random.uniform(0, id, layer.b.shape))
self.delta_q.append(np.random.uniform(0, id, layer.q.shape))
self.delta_rx.append(np.random.uniform(0, id, layer.output_size))
self.delta_ry.append(np.random.uniform(0, id, layer.output_size))
self.delta_rz.append(np.random.uniform(0, id, layer.output_size))
self.delta_t.append(np.random.uniform(0, id, layer.theta.shape))
self.prev_dc_db.append(np.zeros_like(self.dc_db[l]))
self.prev_dc_dq.append(np.zeros_like(self.dc_dq[l]))
self.prev_dc_dr[0].append(np.zeros_like(self.dc_dr[0][(l + 1)]))
self.prev_dc_dr[1].append(np.zeros_like(self.dc_dr[1][(l + 1)]))
self.prev_dc_dr[2].append(np.zeros_like(self.dc_dr[2][(l + 1)]))
self.prev_dc_dt.append(np.zeros_like(self.dc_dt[(l + 1)]))
for (l, layer) in enumerate(network.layers):
prod = (self.prev_dc_db[l] * self.dc_db[l])
for (i, b) in enumerate(layer.b[0]):
(self.delta_b[l][0][i], self.dc_db[l][0][i]) = self.get_delta(prod[0][i], self.delta_b[l][0][i], self.dc_db[l][0][i])
layer.b[0][i] -= (np.sign(self.dc_db[l][0][i]) * self.delta_b[l][0][i])
self.prev_dc_db[l][0][i] = self.dc_db[l][0][i]
prod = (self.prev_dc_dq[l] * self.dc_dq[l])
for (i, q) in enumerate(layer.q):
(self.delta_q[l][i], self.dc_dq[l][i]) = self.get_delta(prod[i], self.delta_q[l][i], self.dc_dq[l][i])
layer.q[i] -= (np.sign(self.dc_dq[l][i]) * self.delta_q[l][i])
self.prev_dc_dq[l][i] = self.dc_dq[l][i]
prod = (self.prev_dc_dt[(l + 1)] * self.dc_dt[(l + 1)])
for (i, t) in enumerate(layer.theta):
(self.delta_t[(l + 1)][i], self.dc_dt[(l + 1)][i]) = self.get_delta(prod[i], self.delta_t[(l + 1)][i], self.dc_dt[(l + 1)][i])
layer.theta[i] -= (np.sign(self.dc_dt[(l + 1)][i]) * self.delta_t[(l + 1)][i])
self.prev_dc_dt[(l + 1)][i] = self.dc_dt[(l + 1)][i]
if self.manhattan:
prod = (self.prev_dc_dr[0][(l + 1)] * self.dc_dr[0][(l + 1)])
for (i, rx) in enumerate(layer.rx):
(self.delta_rx[(l + 1)][i], self.dc_dr[0][(l + 1)][i]) = self.get_delta(prod[i], self.delta_rx[(l + 1)][i], self.dc_dr[0][(l + 1)][i])
layer.rx[i] -= (np.sign(self.dc_dr[0][(l + 1)][i]) * self.delta_rx[(l + 1)][i])
self.prev_dc_dr[0][(l + 1)][i] = self.dc_dr[0][(l + 1)][i]
prod = (self.prev_dc_dr[1][(l + 1)] * self.dc_dr[1][(l + 1)])
for (i, ry) in enumerate(layer.ry):
(self.delta_ry[(l + 1)][i], self.dc_dr[1][(l + 1)][i]) = self.get_delta(prod[i], self.delta_ry[(l + 1)][i], self.dc_dr[1][(l + 1)][i])
layer.ry[i] -= (np.sign(self.dc_dr[1][(l + 1)][i]) * self.delta_ry[(l + 1)][i])
self.prev_dc_dr[1][(l + 1)][i] = self.dc_dr[1][(l + 1)][i]
prod = (self.prev_dc_dr[2][(l + 1)] * self.dc_dr[2][(l + 1)])
for (i, rz) in enumerate(layer.rz):
(self.delta_rz[(l + 1)][i], self.dc_dr[2][(l + 1)][i]) = self.get_delta(prod[i], self.delta_rz[(l + 1)][i], self.dc_dr[2][(l + 1)][i])
layer.rz[i] -= (np.sign(self.dc_dr[2][(l + 1)][i]) * self.delta_rz[(l + 1)][i])
self.prev_dc_dr[2][(l + 1)][i] = self.dc_dr[2][(l + 1)][i]
else:
prod = (((self.prev_dc_dr[0][(l + 1)] * self.dc_dr[0][(l + 1)]) + (self.prev_dc_dr[1][(l + 1)] * self.dc_dr[1][(l + 1)])) + (self.prev_dc_dr[2][(l + 1)] * self.dc_dr[2][(l + 1)]))
for (i, rx) in enumerate(layer.rx):
(delta, dc) = self.get_delta(prod[i], self.delta_rx[(l + 1)][i], 1.0)
self.dc_dr[0][(l + 1)][i] *= dc
self.dc_dr[1][(l + 1)][i] *= dc
self.dc_dr[2][(l + 1)][i] *= dc
self.delta_rx[(l + 1)][i] = delta
self.delta_ry[(l + 1)][i] = delta
self.delta_rz[(l + 1)][i] = delta
layer.rx[i] -= (np.sign(self.dc_dr[0][(l + 1)][i]) * self.delta_rx[(l + 1)][i])
layer.ry[i] -= (np.sign(self.dc_dr[1][(l + 1)][i]) * self.delta_ry[(l + 1)][i])
layer.rz[i] -= (np.sign(self.dc_dr[2][(l + 1)][i]) * self.delta_rz[(l + 1)][i])
self.prev_dc_dr[0][(l + 1)][i] = self.dc_dr[0][(l + 1)][i]
self.prev_dc_dr[1][(l + 1)][i] = self.dc_dr[1][(l + 1)][i]
self.prev_dc_dr[2][(l + 1)][i] = self.dc_dr[2][(l + 1)][i]
prod = (self.prev_dc_dt[0] * self.dc_dt[0])
for (i, t) in enumerate(layer.theta):
(self.delta_t[0][i], self.dc_dt[0][i]) = self.get_delta(prod[i], self.delta_t[0][i], self.dc_dt[0][i])
layer.theta[i] -= (np.sign(self.dc_dt[0][i]) * self.delta_t[0][i])
self.prev_dc_dt[0][i] = self.dc_dt[0][i]
if self.manhattan:
prod = (self.prev_dc_dr[0][0] * self.dc_dr[0][0])
for (i, rx) in enumerate(network.particle_input.rx):
(self.delta_rx[0][i], self.dc_dr[0][0][i]) = self.get_delta(prod[i], self.delta_rx[0][i], self.dc_dr[0][0][i])
network.particle_input.rx[i] -= (np.sign(self.dc_dr[0][0][i]) * self.delta_rx[0][i])
self.prev_dc_dr[0][0][i] = self.dc_dr[0][0][i]
prod = (self.prev_dc_dr[1][0] * self.dc_dr[1][0])
for (i, ry) in enumerate(network.particle_input.ry):
(self.delta_ry[0][i], self.dc_dr[1][0][i]) = self.get_delta(prod[i], self.delta_ry[0][i], self.dc_dr[1][0][i])
network.particle_input.ry[i] -= (np.sign(self.dc_dr[1][0][i]) * self.delta_ry[0][i])
self.prev_dc_dr[1][0][i] = self.dc_dr[1][0][i]
prod = (self.prev_dc_dr[2][0] * self.dc_dr[2][0])
for (i, rz) in enumerate(network.particle_input.rz):
(self.delta_rz[0][i], self.dc_dr[2][0][i]) = self.get_delta(prod[i], self.delta_rz[0][i], self.dc_dr[2][0][i])
network.particle_input.rz[i] -= (np.sign(self.dc_dr[2][0][i]) * self.delta_rz[0][i])
self.prev_dc_dr[2][0][i] = self.dc_dr[2][0][i]
else:
prod = (((self.prev_dc_dr[0][0] * self.dc_dr[0][0]) + (self.prev_dc_dr[1][0] * self.dc_dr[1][0])) + (self.prev_dc_dr[2][0] * self.dc_dr[2][0]))
for (i, rx) in enumerate(network.particle_input.rx):
(delta, dc) = self.get_delta(prod[i], self.delta_rx[0][i], 1.0)
self.dc_dr[0][0][i] *= dc
self.dc_dr[1][0][i] *= dc
self.dc_dr[2][0][i] *= dc
self.delta_rx[0][i] = delta
self.delta_ry[0][i] = delta
self.delta_rz[0][i] = delta
network.particle_input.rx[i] -= (np.sign(self.dc_dr[0][0][i]) * self.delta_rx[0][i])
network.particle_input.ry[i] -= (np.sign(self.dc_dr[1][0][i]) * self.delta_ry[0][i])
network.particle_input.rz[i] -= (np.sign(self.dc_dr[2][0][i]) * self.delta_rz[0][i])
self.prev_dc_dr[0][0][i] = self.dc_dr[0][0][i]
self.prev_dc_dr[1][0][i] = self.dc_dr[1][0][i]
self.prev_dc_dr[2][0][i] = self.dc_dr[2][0][i] | Update weights and biases according to RPROP
TODO: Oof... this code needs to be improved! | src/calrissian/optimizers/particle_rprop.py | weight_update | awlange/brainsparks | 3 | python | def weight_update(self, network):
'\n Update weights and biases according to RPROP\n\n TODO: Oof... this code needs to be improved!\n '
if ((self.delta_b is None) or (self.delta_q is None) or (self.delta_rx is None) or (self.delta_ry is None) or (self.delta_rz is None)):
id = self.init_delta
self.delta_b = []
self.delta_q = []
self.delta_rx = [np.random.uniform(0, id, network.particle_input.output_size)]
self.delta_ry = [np.random.uniform(0, id, network.particle_input.output_size)]
self.delta_rz = [np.random.uniform(0, id, network.particle_input.output_size)]
self.delta_t = [np.random.uniform(0, id, network.particle_input.output_size)]
self.prev_dc_db = []
self.prev_dc_dq = []
self.prev_dc_dr = [[np.zeros(network.particle_input.output_size)] for _ in range(3)]
self.prev_dc_dt = [np.zeros(network.particle_input.output_size)]
for (l, layer) in enumerate(network.layers):
self.delta_b.append(np.random.uniform(0, id, layer.b.shape))
self.delta_q.append(np.random.uniform(0, id, layer.q.shape))
self.delta_rx.append(np.random.uniform(0, id, layer.output_size))
self.delta_ry.append(np.random.uniform(0, id, layer.output_size))
self.delta_rz.append(np.random.uniform(0, id, layer.output_size))
self.delta_t.append(np.random.uniform(0, id, layer.theta.shape))
self.prev_dc_db.append(np.zeros_like(self.dc_db[l]))
self.prev_dc_dq.append(np.zeros_like(self.dc_dq[l]))
self.prev_dc_dr[0].append(np.zeros_like(self.dc_dr[0][(l + 1)]))
self.prev_dc_dr[1].append(np.zeros_like(self.dc_dr[1][(l + 1)]))
self.prev_dc_dr[2].append(np.zeros_like(self.dc_dr[2][(l + 1)]))
self.prev_dc_dt.append(np.zeros_like(self.dc_dt[(l + 1)]))
for (l, layer) in enumerate(network.layers):
prod = (self.prev_dc_db[l] * self.dc_db[l])
for (i, b) in enumerate(layer.b[0]):
(self.delta_b[l][0][i], self.dc_db[l][0][i]) = self.get_delta(prod[0][i], self.delta_b[l][0][i], self.dc_db[l][0][i])
layer.b[0][i] -= (np.sign(self.dc_db[l][0][i]) * self.delta_b[l][0][i])
self.prev_dc_db[l][0][i] = self.dc_db[l][0][i]
prod = (self.prev_dc_dq[l] * self.dc_dq[l])
for (i, q) in enumerate(layer.q):
(self.delta_q[l][i], self.dc_dq[l][i]) = self.get_delta(prod[i], self.delta_q[l][i], self.dc_dq[l][i])
layer.q[i] -= (np.sign(self.dc_dq[l][i]) * self.delta_q[l][i])
self.prev_dc_dq[l][i] = self.dc_dq[l][i]
prod = (self.prev_dc_dt[(l + 1)] * self.dc_dt[(l + 1)])
for (i, t) in enumerate(layer.theta):
(self.delta_t[(l + 1)][i], self.dc_dt[(l + 1)][i]) = self.get_delta(prod[i], self.delta_t[(l + 1)][i], self.dc_dt[(l + 1)][i])
layer.theta[i] -= (np.sign(self.dc_dt[(l + 1)][i]) * self.delta_t[(l + 1)][i])
self.prev_dc_dt[(l + 1)][i] = self.dc_dt[(l + 1)][i]
if self.manhattan:
prod = (self.prev_dc_dr[0][(l + 1)] * self.dc_dr[0][(l + 1)])
for (i, rx) in enumerate(layer.rx):
(self.delta_rx[(l + 1)][i], self.dc_dr[0][(l + 1)][i]) = self.get_delta(prod[i], self.delta_rx[(l + 1)][i], self.dc_dr[0][(l + 1)][i])
layer.rx[i] -= (np.sign(self.dc_dr[0][(l + 1)][i]) * self.delta_rx[(l + 1)][i])
self.prev_dc_dr[0][(l + 1)][i] = self.dc_dr[0][(l + 1)][i]
prod = (self.prev_dc_dr[1][(l + 1)] * self.dc_dr[1][(l + 1)])
for (i, ry) in enumerate(layer.ry):
(self.delta_ry[(l + 1)][i], self.dc_dr[1][(l + 1)][i]) = self.get_delta(prod[i], self.delta_ry[(l + 1)][i], self.dc_dr[1][(l + 1)][i])
layer.ry[i] -= (np.sign(self.dc_dr[1][(l + 1)][i]) * self.delta_ry[(l + 1)][i])
self.prev_dc_dr[1][(l + 1)][i] = self.dc_dr[1][(l + 1)][i]
prod = (self.prev_dc_dr[2][(l + 1)] * self.dc_dr[2][(l + 1)])
for (i, rz) in enumerate(layer.rz):
(self.delta_rz[(l + 1)][i], self.dc_dr[2][(l + 1)][i]) = self.get_delta(prod[i], self.delta_rz[(l + 1)][i], self.dc_dr[2][(l + 1)][i])
layer.rz[i] -= (np.sign(self.dc_dr[2][(l + 1)][i]) * self.delta_rz[(l + 1)][i])
self.prev_dc_dr[2][(l + 1)][i] = self.dc_dr[2][(l + 1)][i]
else:
prod = (((self.prev_dc_dr[0][(l + 1)] * self.dc_dr[0][(l + 1)]) + (self.prev_dc_dr[1][(l + 1)] * self.dc_dr[1][(l + 1)])) + (self.prev_dc_dr[2][(l + 1)] * self.dc_dr[2][(l + 1)]))
for (i, rx) in enumerate(layer.rx):
(delta, dc) = self.get_delta(prod[i], self.delta_rx[(l + 1)][i], 1.0)
self.dc_dr[0][(l + 1)][i] *= dc
self.dc_dr[1][(l + 1)][i] *= dc
self.dc_dr[2][(l + 1)][i] *= dc
self.delta_rx[(l + 1)][i] = delta
self.delta_ry[(l + 1)][i] = delta
self.delta_rz[(l + 1)][i] = delta
layer.rx[i] -= (np.sign(self.dc_dr[0][(l + 1)][i]) * self.delta_rx[(l + 1)][i])
layer.ry[i] -= (np.sign(self.dc_dr[1][(l + 1)][i]) * self.delta_ry[(l + 1)][i])
layer.rz[i] -= (np.sign(self.dc_dr[2][(l + 1)][i]) * self.delta_rz[(l + 1)][i])
self.prev_dc_dr[0][(l + 1)][i] = self.dc_dr[0][(l + 1)][i]
self.prev_dc_dr[1][(l + 1)][i] = self.dc_dr[1][(l + 1)][i]
self.prev_dc_dr[2][(l + 1)][i] = self.dc_dr[2][(l + 1)][i]
prod = (self.prev_dc_dt[0] * self.dc_dt[0])
for (i, t) in enumerate(layer.theta):
(self.delta_t[0][i], self.dc_dt[0][i]) = self.get_delta(prod[i], self.delta_t[0][i], self.dc_dt[0][i])
layer.theta[i] -= (np.sign(self.dc_dt[0][i]) * self.delta_t[0][i])
self.prev_dc_dt[0][i] = self.dc_dt[0][i]
if self.manhattan:
prod = (self.prev_dc_dr[0][0] * self.dc_dr[0][0])
for (i, rx) in enumerate(network.particle_input.rx):
(self.delta_rx[0][i], self.dc_dr[0][0][i]) = self.get_delta(prod[i], self.delta_rx[0][i], self.dc_dr[0][0][i])
network.particle_input.rx[i] -= (np.sign(self.dc_dr[0][0][i]) * self.delta_rx[0][i])
self.prev_dc_dr[0][0][i] = self.dc_dr[0][0][i]
prod = (self.prev_dc_dr[1][0] * self.dc_dr[1][0])
for (i, ry) in enumerate(network.particle_input.ry):
(self.delta_ry[0][i], self.dc_dr[1][0][i]) = self.get_delta(prod[i], self.delta_ry[0][i], self.dc_dr[1][0][i])
network.particle_input.ry[i] -= (np.sign(self.dc_dr[1][0][i]) * self.delta_ry[0][i])
self.prev_dc_dr[1][0][i] = self.dc_dr[1][0][i]
prod = (self.prev_dc_dr[2][0] * self.dc_dr[2][0])
for (i, rz) in enumerate(network.particle_input.rz):
(self.delta_rz[0][i], self.dc_dr[2][0][i]) = self.get_delta(prod[i], self.delta_rz[0][i], self.dc_dr[2][0][i])
network.particle_input.rz[i] -= (np.sign(self.dc_dr[2][0][i]) * self.delta_rz[0][i])
self.prev_dc_dr[2][0][i] = self.dc_dr[2][0][i]
else:
prod = (((self.prev_dc_dr[0][0] * self.dc_dr[0][0]) + (self.prev_dc_dr[1][0] * self.dc_dr[1][0])) + (self.prev_dc_dr[2][0] * self.dc_dr[2][0]))
for (i, rx) in enumerate(network.particle_input.rx):
(delta, dc) = self.get_delta(prod[i], self.delta_rx[0][i], 1.0)
self.dc_dr[0][0][i] *= dc
self.dc_dr[1][0][i] *= dc
self.dc_dr[2][0][i] *= dc
self.delta_rx[0][i] = delta
self.delta_ry[0][i] = delta
self.delta_rz[0][i] = delta
network.particle_input.rx[i] -= (np.sign(self.dc_dr[0][0][i]) * self.delta_rx[0][i])
network.particle_input.ry[i] -= (np.sign(self.dc_dr[1][0][i]) * self.delta_ry[0][i])
network.particle_input.rz[i] -= (np.sign(self.dc_dr[2][0][i]) * self.delta_rz[0][i])
self.prev_dc_dr[0][0][i] = self.dc_dr[0][0][i]
self.prev_dc_dr[1][0][i] = self.dc_dr[1][0][i]
self.prev_dc_dr[2][0][i] = self.dc_dr[2][0][i] | def weight_update(self, network):
'\n Update weights and biases according to RPROP\n\n TODO: Oof... this code needs to be improved!\n '
if ((self.delta_b is None) or (self.delta_q is None) or (self.delta_rx is None) or (self.delta_ry is None) or (self.delta_rz is None)):
id = self.init_delta
self.delta_b = []
self.delta_q = []
self.delta_rx = [np.random.uniform(0, id, network.particle_input.output_size)]
self.delta_ry = [np.random.uniform(0, id, network.particle_input.output_size)]
self.delta_rz = [np.random.uniform(0, id, network.particle_input.output_size)]
self.delta_t = [np.random.uniform(0, id, network.particle_input.output_size)]
self.prev_dc_db = []
self.prev_dc_dq = []
self.prev_dc_dr = [[np.zeros(network.particle_input.output_size)] for _ in range(3)]
self.prev_dc_dt = [np.zeros(network.particle_input.output_size)]
for (l, layer) in enumerate(network.layers):
self.delta_b.append(np.random.uniform(0, id, layer.b.shape))
self.delta_q.append(np.random.uniform(0, id, layer.q.shape))
self.delta_rx.append(np.random.uniform(0, id, layer.output_size))
self.delta_ry.append(np.random.uniform(0, id, layer.output_size))
self.delta_rz.append(np.random.uniform(0, id, layer.output_size))
self.delta_t.append(np.random.uniform(0, id, layer.theta.shape))
self.prev_dc_db.append(np.zeros_like(self.dc_db[l]))
self.prev_dc_dq.append(np.zeros_like(self.dc_dq[l]))
self.prev_dc_dr[0].append(np.zeros_like(self.dc_dr[0][(l + 1)]))
self.prev_dc_dr[1].append(np.zeros_like(self.dc_dr[1][(l + 1)]))
self.prev_dc_dr[2].append(np.zeros_like(self.dc_dr[2][(l + 1)]))
self.prev_dc_dt.append(np.zeros_like(self.dc_dt[(l + 1)]))
for (l, layer) in enumerate(network.layers):
prod = (self.prev_dc_db[l] * self.dc_db[l])
for (i, b) in enumerate(layer.b[0]):
(self.delta_b[l][0][i], self.dc_db[l][0][i]) = self.get_delta(prod[0][i], self.delta_b[l][0][i], self.dc_db[l][0][i])
layer.b[0][i] -= (np.sign(self.dc_db[l][0][i]) * self.delta_b[l][0][i])
self.prev_dc_db[l][0][i] = self.dc_db[l][0][i]
prod = (self.prev_dc_dq[l] * self.dc_dq[l])
for (i, q) in enumerate(layer.q):
(self.delta_q[l][i], self.dc_dq[l][i]) = self.get_delta(prod[i], self.delta_q[l][i], self.dc_dq[l][i])
layer.q[i] -= (np.sign(self.dc_dq[l][i]) * self.delta_q[l][i])
self.prev_dc_dq[l][i] = self.dc_dq[l][i]
prod = (self.prev_dc_dt[(l + 1)] * self.dc_dt[(l + 1)])
for (i, t) in enumerate(layer.theta):
(self.delta_t[(l + 1)][i], self.dc_dt[(l + 1)][i]) = self.get_delta(prod[i], self.delta_t[(l + 1)][i], self.dc_dt[(l + 1)][i])
layer.theta[i] -= (np.sign(self.dc_dt[(l + 1)][i]) * self.delta_t[(l + 1)][i])
self.prev_dc_dt[(l + 1)][i] = self.dc_dt[(l + 1)][i]
if self.manhattan:
prod = (self.prev_dc_dr[0][(l + 1)] * self.dc_dr[0][(l + 1)])
for (i, rx) in enumerate(layer.rx):
(self.delta_rx[(l + 1)][i], self.dc_dr[0][(l + 1)][i]) = self.get_delta(prod[i], self.delta_rx[(l + 1)][i], self.dc_dr[0][(l + 1)][i])
layer.rx[i] -= (np.sign(self.dc_dr[0][(l + 1)][i]) * self.delta_rx[(l + 1)][i])
self.prev_dc_dr[0][(l + 1)][i] = self.dc_dr[0][(l + 1)][i]
prod = (self.prev_dc_dr[1][(l + 1)] * self.dc_dr[1][(l + 1)])
for (i, ry) in enumerate(layer.ry):
(self.delta_ry[(l + 1)][i], self.dc_dr[1][(l + 1)][i]) = self.get_delta(prod[i], self.delta_ry[(l + 1)][i], self.dc_dr[1][(l + 1)][i])
layer.ry[i] -= (np.sign(self.dc_dr[1][(l + 1)][i]) * self.delta_ry[(l + 1)][i])
self.prev_dc_dr[1][(l + 1)][i] = self.dc_dr[1][(l + 1)][i]
prod = (self.prev_dc_dr[2][(l + 1)] * self.dc_dr[2][(l + 1)])
for (i, rz) in enumerate(layer.rz):
(self.delta_rz[(l + 1)][i], self.dc_dr[2][(l + 1)][i]) = self.get_delta(prod[i], self.delta_rz[(l + 1)][i], self.dc_dr[2][(l + 1)][i])
layer.rz[i] -= (np.sign(self.dc_dr[2][(l + 1)][i]) * self.delta_rz[(l + 1)][i])
self.prev_dc_dr[2][(l + 1)][i] = self.dc_dr[2][(l + 1)][i]
else:
prod = (((self.prev_dc_dr[0][(l + 1)] * self.dc_dr[0][(l + 1)]) + (self.prev_dc_dr[1][(l + 1)] * self.dc_dr[1][(l + 1)])) + (self.prev_dc_dr[2][(l + 1)] * self.dc_dr[2][(l + 1)]))
for (i, rx) in enumerate(layer.rx):
(delta, dc) = self.get_delta(prod[i], self.delta_rx[(l + 1)][i], 1.0)
self.dc_dr[0][(l + 1)][i] *= dc
self.dc_dr[1][(l + 1)][i] *= dc
self.dc_dr[2][(l + 1)][i] *= dc
self.delta_rx[(l + 1)][i] = delta
self.delta_ry[(l + 1)][i] = delta
self.delta_rz[(l + 1)][i] = delta
layer.rx[i] -= (np.sign(self.dc_dr[0][(l + 1)][i]) * self.delta_rx[(l + 1)][i])
layer.ry[i] -= (np.sign(self.dc_dr[1][(l + 1)][i]) * self.delta_ry[(l + 1)][i])
layer.rz[i] -= (np.sign(self.dc_dr[2][(l + 1)][i]) * self.delta_rz[(l + 1)][i])
self.prev_dc_dr[0][(l + 1)][i] = self.dc_dr[0][(l + 1)][i]
self.prev_dc_dr[1][(l + 1)][i] = self.dc_dr[1][(l + 1)][i]
self.prev_dc_dr[2][(l + 1)][i] = self.dc_dr[2][(l + 1)][i]
prod = (self.prev_dc_dt[0] * self.dc_dt[0])
for (i, t) in enumerate(layer.theta):
(self.delta_t[0][i], self.dc_dt[0][i]) = self.get_delta(prod[i], self.delta_t[0][i], self.dc_dt[0][i])
layer.theta[i] -= (np.sign(self.dc_dt[0][i]) * self.delta_t[0][i])
self.prev_dc_dt[0][i] = self.dc_dt[0][i]
if self.manhattan:
prod = (self.prev_dc_dr[0][0] * self.dc_dr[0][0])
for (i, rx) in enumerate(network.particle_input.rx):
(self.delta_rx[0][i], self.dc_dr[0][0][i]) = self.get_delta(prod[i], self.delta_rx[0][i], self.dc_dr[0][0][i])
network.particle_input.rx[i] -= (np.sign(self.dc_dr[0][0][i]) * self.delta_rx[0][i])
self.prev_dc_dr[0][0][i] = self.dc_dr[0][0][i]
prod = (self.prev_dc_dr[1][0] * self.dc_dr[1][0])
for (i, ry) in enumerate(network.particle_input.ry):
(self.delta_ry[0][i], self.dc_dr[1][0][i]) = self.get_delta(prod[i], self.delta_ry[0][i], self.dc_dr[1][0][i])
network.particle_input.ry[i] -= (np.sign(self.dc_dr[1][0][i]) * self.delta_ry[0][i])
self.prev_dc_dr[1][0][i] = self.dc_dr[1][0][i]
prod = (self.prev_dc_dr[2][0] * self.dc_dr[2][0])
for (i, rz) in enumerate(network.particle_input.rz):
(self.delta_rz[0][i], self.dc_dr[2][0][i]) = self.get_delta(prod[i], self.delta_rz[0][i], self.dc_dr[2][0][i])
network.particle_input.rz[i] -= (np.sign(self.dc_dr[2][0][i]) * self.delta_rz[0][i])
self.prev_dc_dr[2][0][i] = self.dc_dr[2][0][i]
else:
prod = (((self.prev_dc_dr[0][0] * self.dc_dr[0][0]) + (self.prev_dc_dr[1][0] * self.dc_dr[1][0])) + (self.prev_dc_dr[2][0] * self.dc_dr[2][0]))
for (i, rx) in enumerate(network.particle_input.rx):
(delta, dc) = self.get_delta(prod[i], self.delta_rx[0][i], 1.0)
self.dc_dr[0][0][i] *= dc
self.dc_dr[1][0][i] *= dc
self.dc_dr[2][0][i] *= dc
self.delta_rx[0][i] = delta
self.delta_ry[0][i] = delta
self.delta_rz[0][i] = delta
network.particle_input.rx[i] -= (np.sign(self.dc_dr[0][0][i]) * self.delta_rx[0][i])
network.particle_input.ry[i] -= (np.sign(self.dc_dr[1][0][i]) * self.delta_ry[0][i])
network.particle_input.rz[i] -= (np.sign(self.dc_dr[2][0][i]) * self.delta_rz[0][i])
self.prev_dc_dr[0][0][i] = self.dc_dr[0][0][i]
self.prev_dc_dr[1][0][i] = self.dc_dr[1][0][i]
self.prev_dc_dr[2][0][i] = self.dc_dr[2][0][i]<|docstring|>Update weights and biases according to RPROP
TODO: Oof... this code needs to be improved!<|endoftext|> |
b80e0378b0cc0fd2185181995d5e48c79f090876e3f621759a64b65318b1c966 | @staticmethod
def main():
"\n Displays menu and processes user's input.\n It calls the proper method based on the user's selection.\n "
dummy = displayMenu(menuItems, True)
while True:
choice = displayMenu(menuItems, False)
if (choice == 1):
print('\n**** Types Operations ****')
print('\n---Get number types---')
n = numbers()
n.getType(1.2)
n.getType(1000)
n.getComplexType((1 + 2j))
elif (choice == 2):
print('\n*** String Operations ***')
print('\n---Get a string---')
strings.useString()
print('\n---Get a substring---')
strings.subString()
print('\n---Strip white spaces---')
strings.stripWhiteSpaces()
print('\n---Lower case string---')
strings.lowerString()
print('\n---Upper case string---')
strings.upperString()
print('\n---Split string---')
strings.splitString()
elif (choice == 3):
print('\n *** List Operations ***')
print('\n---Index lists---')
lists.listIndex()
print('\n---Slice lists---')
lists.listSlice()
print('\n---Change lists---')
lists.listChange()
print('\n---Add lists---')
lists.listAdd()
elif (choice == 4):
print('\n*** Dictionary Operations ***')
fiboTriangle(5)
elif (choice == 5):
print('\n*** Tuple Operations ***')
print('\n---Create tuples ---')
tuples.createTuples()
elif (choice == 6):
print('\n*** Plot Operations ***')
plotting()
elif (choice == (len(menuItems) - 1)):
dummy = displayMenu(menuItems, True)
elif (choice == len(menuItems)):
break | Displays menu and processes user's input.
It calls the proper method based on the user's selection. | language-types/main.py | main | milexm/pythonExamples | 0 | python | @staticmethod
def main():
"\n Displays menu and processes user's input.\n It calls the proper method based on the user's selection.\n "
dummy = displayMenu(menuItems, True)
while True:
choice = displayMenu(menuItems, False)
if (choice == 1):
print('\n**** Types Operations ****')
print('\n---Get number types---')
n = numbers()
n.getType(1.2)
n.getType(1000)
n.getComplexType((1 + 2j))
elif (choice == 2):
print('\n*** String Operations ***')
print('\n---Get a string---')
strings.useString()
print('\n---Get a substring---')
strings.subString()
print('\n---Strip white spaces---')
strings.stripWhiteSpaces()
print('\n---Lower case string---')
strings.lowerString()
print('\n---Upper case string---')
strings.upperString()
print('\n---Split string---')
strings.splitString()
elif (choice == 3):
print('\n *** List Operations ***')
print('\n---Index lists---')
lists.listIndex()
print('\n---Slice lists---')
lists.listSlice()
print('\n---Change lists---')
lists.listChange()
print('\n---Add lists---')
lists.listAdd()
elif (choice == 4):
print('\n*** Dictionary Operations ***')
fiboTriangle(5)
elif (choice == 5):
print('\n*** Tuple Operations ***')
print('\n---Create tuples ---')
tuples.createTuples()
elif (choice == 6):
print('\n*** Plot Operations ***')
plotting()
elif (choice == (len(menuItems) - 1)):
dummy = displayMenu(menuItems, True)
elif (choice == len(menuItems)):
break | @staticmethod
def main():
"\n Displays menu and processes user's input.\n It calls the proper method based on the user's selection.\n "
dummy = displayMenu(menuItems, True)
while True:
choice = displayMenu(menuItems, False)
if (choice == 1):
print('\n**** Types Operations ****')
print('\n---Get number types---')
n = numbers()
n.getType(1.2)
n.getType(1000)
n.getComplexType((1 + 2j))
elif (choice == 2):
print('\n*** String Operations ***')
print('\n---Get a string---')
strings.useString()
print('\n---Get a substring---')
strings.subString()
print('\n---Strip white spaces---')
strings.stripWhiteSpaces()
print('\n---Lower case string---')
strings.lowerString()
print('\n---Upper case string---')
strings.upperString()
print('\n---Split string---')
strings.splitString()
elif (choice == 3):
print('\n *** List Operations ***')
print('\n---Index lists---')
lists.listIndex()
print('\n---Slice lists---')
lists.listSlice()
print('\n---Change lists---')
lists.listChange()
print('\n---Add lists---')
lists.listAdd()
elif (choice == 4):
print('\n*** Dictionary Operations ***')
fiboTriangle(5)
elif (choice == 5):
print('\n*** Tuple Operations ***')
print('\n---Create tuples ---')
tuples.createTuples()
elif (choice == 6):
print('\n*** Plot Operations ***')
plotting()
elif (choice == (len(menuItems) - 1)):
dummy = displayMenu(menuItems, True)
elif (choice == len(menuItems)):
break<|docstring|>Displays menu and processes user's input.
It calls the proper method based on the user's selection.<|endoftext|> |
f056f81fe768ccd7b9648db562957104a70c459c84d4bdb3aaabf05f47d64059 | def parse_response(self, response: requests.Response, stream_state: Mapping[(str, Any)], **kwargs) -> Iterable[Mapping]:
'\n :return an iterable containing each record in the response\n '
(yield from response.json().get(self.data_field, [])) | :return an iterable containing each record in the response | airbyte-integrations/connectors/source-amazon-seller-partner/source_amazon_seller_partner/streams.py | parse_response | jakemccloskey/airbyte | 0 | python | def parse_response(self, response: requests.Response, stream_state: Mapping[(str, Any)], **kwargs) -> Iterable[Mapping]:
'\n \n '
(yield from response.json().get(self.data_field, [])) | def parse_response(self, response: requests.Response, stream_state: Mapping[(str, Any)], **kwargs) -> Iterable[Mapping]:
'\n \n '
(yield from response.json().get(self.data_field, []))<|docstring|>:return an iterable containing each record in the response<|endoftext|> |
07b594042f11d0f8f69125b7353932d3b2f976d0d84254e52620be41b09ca1ba | def get_updated_state(self, current_stream_state: MutableMapping[(str, Any)], latest_record: Mapping[(str, Any)]) -> Mapping[(str, Any)]:
"\n Return the latest state by comparing the cursor value in the latest record with the stream's most recent state object\n and returning an updated state object.\n "
latest_benchmark = latest_record[self.cursor_field]
if current_stream_state.get(self.cursor_field):
return {self.cursor_field: max(latest_benchmark, current_stream_state[self.cursor_field])}
return {self.cursor_field: latest_benchmark} | Return the latest state by comparing the cursor value in the latest record with the stream's most recent state object
and returning an updated state object. | airbyte-integrations/connectors/source-amazon-seller-partner/source_amazon_seller_partner/streams.py | get_updated_state | jakemccloskey/airbyte | 0 | python | def get_updated_state(self, current_stream_state: MutableMapping[(str, Any)], latest_record: Mapping[(str, Any)]) -> Mapping[(str, Any)]:
"\n Return the latest state by comparing the cursor value in the latest record with the stream's most recent state object\n and returning an updated state object.\n "
latest_benchmark = latest_record[self.cursor_field]
if current_stream_state.get(self.cursor_field):
return {self.cursor_field: max(latest_benchmark, current_stream_state[self.cursor_field])}
return {self.cursor_field: latest_benchmark} | def get_updated_state(self, current_stream_state: MutableMapping[(str, Any)], latest_record: Mapping[(str, Any)]) -> Mapping[(str, Any)]:
"\n Return the latest state by comparing the cursor value in the latest record with the stream's most recent state object\n and returning an updated state object.\n "
latest_benchmark = latest_record[self.cursor_field]
if current_stream_state.get(self.cursor_field):
return {self.cursor_field: max(latest_benchmark, current_stream_state[self.cursor_field])}
return {self.cursor_field: latest_benchmark}<|docstring|>Return the latest state by comparing the cursor value in the latest record with the stream's most recent state object
and returning an updated state object.<|endoftext|> |
738f97525dbcefba6d033d368039eafe48a17badb04fce6e3d53648d817023d3 | def _create_prepared_request(self, path: str, http_method: str='GET', headers: Mapping=None, params: Mapping=None, json: Any=None, data: Any=None) -> requests.PreparedRequest:
'\n Override to make http_method configurable per method call\n '
args = {'method': http_method, 'url': (self.url_base + path), 'headers': headers, 'params': params}
if (http_method.upper() in BODY_REQUEST_METHODS):
if (json and data):
raise RequestBodyException("At the same time only one of the 'request_body_data' and 'request_body_json' functions can return data")
elif json:
args['json'] = json
elif data:
args['data'] = data
return self._session.prepare_request(requests.Request(**args)) | Override to make http_method configurable per method call | airbyte-integrations/connectors/source-amazon-seller-partner/source_amazon_seller_partner/streams.py | _create_prepared_request | jakemccloskey/airbyte | 0 | python | def _create_prepared_request(self, path: str, http_method: str='GET', headers: Mapping=None, params: Mapping=None, json: Any=None, data: Any=None) -> requests.PreparedRequest:
'\n \n '
args = {'method': http_method, 'url': (self.url_base + path), 'headers': headers, 'params': params}
if (http_method.upper() in BODY_REQUEST_METHODS):
if (json and data):
raise RequestBodyException("At the same time only one of the 'request_body_data' and 'request_body_json' functions can return data")
elif json:
args['json'] = json
elif data:
args['data'] = data
return self._session.prepare_request(requests.Request(**args)) | def _create_prepared_request(self, path: str, http_method: str='GET', headers: Mapping=None, params: Mapping=None, json: Any=None, data: Any=None) -> requests.PreparedRequest:
'\n \n '
args = {'method': http_method, 'url': (self.url_base + path), 'headers': headers, 'params': params}
if (http_method.upper() in BODY_REQUEST_METHODS):
if (json and data):
raise RequestBodyException("At the same time only one of the 'request_body_data' and 'request_body_json' functions can return data")
elif json:
args['json'] = json
elif data:
args['data'] = data
return self._session.prepare_request(requests.Request(**args))<|docstring|>Override to make http_method configurable per method call<|endoftext|> |
d7c615c28e4f8b257181b4f6430b1df792b5429bc2374f98fce16128ecf17ab4 | def decrypt_report_document(self, url, initialization_vector, key, encryption_standard, payload):
'\n Decrypts and unpacks a report document, currently AES encryption is implemented\n '
if (encryption_standard == 'AES'):
decrypted = self.decrypt_aes(requests.get(url).content, key, initialization_vector)
if ('compressionAlgorithm' in payload):
return zlib.decompress(bytearray(decrypted), (15 + 32)).decode('iso-8859-1')
return decrypted.decode('iso-8859-1')
raise Exception([{'message': 'Only AES decryption is implemented.'}]) | Decrypts and unpacks a report document, currently AES encryption is implemented | airbyte-integrations/connectors/source-amazon-seller-partner/source_amazon_seller_partner/streams.py | decrypt_report_document | jakemccloskey/airbyte | 0 | python | def decrypt_report_document(self, url, initialization_vector, key, encryption_standard, payload):
'\n \n '
if (encryption_standard == 'AES'):
decrypted = self.decrypt_aes(requests.get(url).content, key, initialization_vector)
if ('compressionAlgorithm' in payload):
return zlib.decompress(bytearray(decrypted), (15 + 32)).decode('iso-8859-1')
return decrypted.decode('iso-8859-1')
raise Exception([{'message': 'Only AES decryption is implemented.'}]) | def decrypt_report_document(self, url, initialization_vector, key, encryption_standard, payload):
'\n \n '
if (encryption_standard == 'AES'):
decrypted = self.decrypt_aes(requests.get(url).content, key, initialization_vector)
if ('compressionAlgorithm' in payload):
return zlib.decompress(bytearray(decrypted), (15 + 32)).decode('iso-8859-1')
return decrypted.decode('iso-8859-1')
raise Exception([{'message': 'Only AES decryption is implemented.'}])<|docstring|>Decrypts and unpacks a report document, currently AES encryption is implemented<|endoftext|> |
e71865673464d70cc45f5ce1fe3b96d272fa0674c18c98766b4ba695ca9fbc26 | def read_records(self, *args, **kwargs) -> Iterable[Mapping[(str, Any)]]:
'\n Create and retrieve the report.\n Decrypt and parse the report is its fully proceed, then yield the report document records.\n '
report_payload = {}
is_processed = False
is_done = False
start_time = pendulum.now('utc')
seconds_waited = 0
report_id = self._create_report()['reportId']
while ((not is_processed) and (seconds_waited < REPORTS_MAX_WAIT_SECONDS)):
report_payload = self._retrieve_report(report_id=report_id)
seconds_waited = (pendulum.now('utc') - start_time).seconds
is_processed = (report_payload.get('processingStatus') not in ['IN_QUEUE', 'IN_PROGRESS'])
is_done = (report_payload.get('processingStatus') == 'DONE')
time.sleep(self.sleep_seconds)
if is_done:
document_id = report_payload['reportDocumentId']
request_headers = self.request_headers()
request = self._create_prepared_request(path=self.path(document_id=document_id), headers=dict(request_headers, **self.authenticator.get_auth_header()), params=self.request_params())
response = self._send_request(request)
(yield from self.parse_response(response))
else:
logger.warn(f'There are no report document related in stream `{self.name}`. Report body {report_payload}') | Create and retrieve the report.
Decrypt and parse the report is its fully proceed, then yield the report document records. | airbyte-integrations/connectors/source-amazon-seller-partner/source_amazon_seller_partner/streams.py | read_records | jakemccloskey/airbyte | 0 | python | def read_records(self, *args, **kwargs) -> Iterable[Mapping[(str, Any)]]:
'\n Create and retrieve the report.\n Decrypt and parse the report is its fully proceed, then yield the report document records.\n '
report_payload = {}
is_processed = False
is_done = False
start_time = pendulum.now('utc')
seconds_waited = 0
report_id = self._create_report()['reportId']
while ((not is_processed) and (seconds_waited < REPORTS_MAX_WAIT_SECONDS)):
report_payload = self._retrieve_report(report_id=report_id)
seconds_waited = (pendulum.now('utc') - start_time).seconds
is_processed = (report_payload.get('processingStatus') not in ['IN_QUEUE', 'IN_PROGRESS'])
is_done = (report_payload.get('processingStatus') == 'DONE')
time.sleep(self.sleep_seconds)
if is_done:
document_id = report_payload['reportDocumentId']
request_headers = self.request_headers()
request = self._create_prepared_request(path=self.path(document_id=document_id), headers=dict(request_headers, **self.authenticator.get_auth_header()), params=self.request_params())
response = self._send_request(request)
(yield from self.parse_response(response))
else:
logger.warn(f'There are no report document related in stream `{self.name}`. Report body {report_payload}') | def read_records(self, *args, **kwargs) -> Iterable[Mapping[(str, Any)]]:
'\n Create and retrieve the report.\n Decrypt and parse the report is its fully proceed, then yield the report document records.\n '
report_payload = {}
is_processed = False
is_done = False
start_time = pendulum.now('utc')
seconds_waited = 0
report_id = self._create_report()['reportId']
while ((not is_processed) and (seconds_waited < REPORTS_MAX_WAIT_SECONDS)):
report_payload = self._retrieve_report(report_id=report_id)
seconds_waited = (pendulum.now('utc') - start_time).seconds
is_processed = (report_payload.get('processingStatus') not in ['IN_QUEUE', 'IN_PROGRESS'])
is_done = (report_payload.get('processingStatus') == 'DONE')
time.sleep(self.sleep_seconds)
if is_done:
document_id = report_payload['reportDocumentId']
request_headers = self.request_headers()
request = self._create_prepared_request(path=self.path(document_id=document_id), headers=dict(request_headers, **self.authenticator.get_auth_header()), params=self.request_params())
response = self._send_request(request)
(yield from self.parse_response(response))
else:
logger.warn(f'There are no report document related in stream `{self.name}`. Report body {report_payload}')<|docstring|>Create and retrieve the report.
Decrypt and parse the report is its fully proceed, then yield the report document records.<|endoftext|> |
cc1e5f3385aea3e678be8f2d26c7985775d05104ffbf1b8a4cba3993871ed092 | def _label_encoding(one_hot_labels):
' Constructs artificial 1d labels from incoming array of one_hot encoded label data\n Artificial label of a one_hot encoded label is the dim number where the label had a 1\n Args: \n one_hot_labels (np.ndarray): label data\n Return:\n encoded_labels (float): label data\n '
encoded_labels = np.argmax(one_hot_labels, axis=1)
return encoded_labels | Constructs artificial 1d labels from incoming array of one_hot encoded label data
Artificial label of a one_hot encoded label is the dim number where the label had a 1
Args:
one_hot_labels (np.ndarray): label data
Return:
encoded_labels (float): label data | niteshade/defence.py | _label_encoding | oskarfernlund/data-poisoning-attacks | 0 | python | def _label_encoding(one_hot_labels):
' Constructs artificial 1d labels from incoming array of one_hot encoded label data\n Artificial label of a one_hot encoded label is the dim number where the label had a 1\n Args: \n one_hot_labels (np.ndarray): label data\n Return:\n encoded_labels (float): label data\n '
encoded_labels = np.argmax(one_hot_labels, axis=1)
return encoded_labels | def _label_encoding(one_hot_labels):
' Constructs artificial 1d labels from incoming array of one_hot encoded label data\n Artificial label of a one_hot encoded label is the dim number where the label had a 1\n Args: \n one_hot_labels (np.ndarray): label data\n Return:\n encoded_labels (float): label data\n '
encoded_labels = np.argmax(one_hot_labels, axis=1)
return encoded_labels<|docstring|>Constructs artificial 1d labels from incoming array of one_hot encoded label data
Artificial label of a one_hot encoded label is the dim number where the label had a 1
Args:
one_hot_labels (np.ndarray): label data
Return:
encoded_labels (float): label data<|endoftext|> |
cf6bf7e772644993948f0b3e48beef896e89c5eb0fe3805af7b10ad1ffc564b1 | def _input_validation(defender):
' Input validation for various defenders or Defendergroup\n Args: \n defender (Defender, DefenderGroup): label data\n '
if isinstance(defender, DefenderGroup):
if (not isinstance(defender.defender_list, list)):
raise TypeError('The defender_list is not a list object.')
if (not isinstance(defender.ensemble_accept_rate, float)):
raise TypeError('The ensemble_rate needs to be a float.')
for defender in defender.defender_list:
if (not isinstance(defender, Defender)):
raise TypeError('All of the defenders in the defender_list need to be Defender objects.')
elif isinstance(defender, KNN_Defender):
if (not isinstance(defender.confidence_threshold, float)):
raise TypeError('The confidence_threshold is not a float')
if (not isinstance(defender.nearest_neighbours, int)):
raise TypeError('The nearest_neighbours is not a float')
if (not isinstance(defender.one_hot, boolean)):
raise TypeError('The one_hot flat is not a boolean')
elif isinstance(defender, SoftmaxDefender):
if (not isinstance(defender.threshold, float)):
raise TypeError('The threshold input for the SoftmaxDefender needs to be a float')
if (not isinstance(defender.one_hot, boolean)):
raise TypeError('The one_hot flat is not a boolean')
if (not isinstance(defender.delay, int)):
raise TypeError('The delay parameter is not an integer')
elif isinstance(defender, KNN_Defender):
if (not (isinstance(defender.threshold, float) or isinstance(defender.threshold, int))):
raise TypeError('The threshold input for the FeasibleSetDefender needs to be either float or a integer type.')
if (not isinstance(defender.one_hot, boolean)):
raise TypeError('The one_hot flat is not a boolean') | Input validation for various defenders or Defendergroup
Args:
defender (Defender, DefenderGroup): label data | niteshade/defence.py | _input_validation | oskarfernlund/data-poisoning-attacks | 0 | python | def _input_validation(defender):
' Input validation for various defenders or Defendergroup\n Args: \n defender (Defender, DefenderGroup): label data\n '
if isinstance(defender, DefenderGroup):
if (not isinstance(defender.defender_list, list)):
raise TypeError('The defender_list is not a list object.')
if (not isinstance(defender.ensemble_accept_rate, float)):
raise TypeError('The ensemble_rate needs to be a float.')
for defender in defender.defender_list:
if (not isinstance(defender, Defender)):
raise TypeError('All of the defenders in the defender_list need to be Defender objects.')
elif isinstance(defender, KNN_Defender):
if (not isinstance(defender.confidence_threshold, float)):
raise TypeError('The confidence_threshold is not a float')
if (not isinstance(defender.nearest_neighbours, int)):
raise TypeError('The nearest_neighbours is not a float')
if (not isinstance(defender.one_hot, boolean)):
raise TypeError('The one_hot flat is not a boolean')
elif isinstance(defender, SoftmaxDefender):
if (not isinstance(defender.threshold, float)):
raise TypeError('The threshold input for the SoftmaxDefender needs to be a float')
if (not isinstance(defender.one_hot, boolean)):
raise TypeError('The one_hot flat is not a boolean')
if (not isinstance(defender.delay, int)):
raise TypeError('The delay parameter is not an integer')
elif isinstance(defender, KNN_Defender):
if (not (isinstance(defender.threshold, float) or isinstance(defender.threshold, int))):
raise TypeError('The threshold input for the FeasibleSetDefender needs to be either float or a integer type.')
if (not isinstance(defender.one_hot, boolean)):
raise TypeError('The one_hot flat is not a boolean') | def _input_validation(defender):
' Input validation for various defenders or Defendergroup\n Args: \n defender (Defender, DefenderGroup): label data\n '
if isinstance(defender, DefenderGroup):
if (not isinstance(defender.defender_list, list)):
raise TypeError('The defender_list is not a list object.')
if (not isinstance(defender.ensemble_accept_rate, float)):
raise TypeError('The ensemble_rate needs to be a float.')
for defender in defender.defender_list:
if (not isinstance(defender, Defender)):
raise TypeError('All of the defenders in the defender_list need to be Defender objects.')
elif isinstance(defender, KNN_Defender):
if (not isinstance(defender.confidence_threshold, float)):
raise TypeError('The confidence_threshold is not a float')
if (not isinstance(defender.nearest_neighbours, int)):
raise TypeError('The nearest_neighbours is not a float')
if (not isinstance(defender.one_hot, boolean)):
raise TypeError('The one_hot flat is not a boolean')
elif isinstance(defender, SoftmaxDefender):
if (not isinstance(defender.threshold, float)):
raise TypeError('The threshold input for the SoftmaxDefender needs to be a float')
if (not isinstance(defender.one_hot, boolean)):
raise TypeError('The one_hot flat is not a boolean')
if (not isinstance(defender.delay, int)):
raise TypeError('The delay parameter is not an integer')
elif isinstance(defender, KNN_Defender):
if (not (isinstance(defender.threshold, float) or isinstance(defender.threshold, int))):
raise TypeError('The threshold input for the FeasibleSetDefender needs to be either float or a integer type.')
if (not isinstance(defender.one_hot, boolean)):
raise TypeError('The one_hot flat is not a boolean')<|docstring|>Input validation for various defenders or Defendergroup
Args:
defender (Defender, DefenderGroup): label data<|endoftext|> |
53fe71c912d0245a63232295f22d1dd02c2dc3b6032e87c9b1909945914ce7dd | def __init__(self, defender_list: list, ensemble_accept_rate=0.0) -> None:
'Constructor method of DefenderGroup class.\n '
self.defender_list = defender_list
self.ensemble_accept_rate = ensemble_accept_rate
_input_validation(self) | Constructor method of DefenderGroup class. | niteshade/defence.py | __init__ | oskarfernlund/data-poisoning-attacks | 0 | python | def __init__(self, defender_list: list, ensemble_accept_rate=0.0) -> None:
'\n '
self.defender_list = defender_list
self.ensemble_accept_rate = ensemble_accept_rate
_input_validation(self) | def __init__(self, defender_list: list, ensemble_accept_rate=0.0) -> None:
'\n '
self.defender_list = defender_list
self.ensemble_accept_rate = ensemble_accept_rate
_input_validation(self)<|docstring|>Constructor method of DefenderGroup class.<|endoftext|> |
ab334e8dc67c608a2bf73d8313a5f95c7381099c69fa3061f9e1758b3374e66d | def defend(self, X, y, **input_kwargs):
' Group defend method, where each of the .defend method of each defender in defender_list is called. \n The exact defence depends on whether ensemble decisionmaking has been used.\n Args: \n X (np.ndarray, torch.Tensor) : point data (shape (batch_size, data dimensionality)).\n y (np.ndarray, torch.Tensor) : label data (shape (batch_size,)).\n \n Return:\n tuple (output_x, output_y):\n output_x (np.ndarray, torch.Tensor) : point data (shape (batch_size, data dimensionality)),\n output_y (np.ndarray, torch.Tensor) : label data (shape (batch_size,)) .\n '
if (self.ensemble_accept_rate > 0):
(output_x, output_y) = self._ensemble_defence(X, y, **input_kwargs)
else:
(output_x, output_y) = self._sequential_defence(X, y, **input_kwargs)
return (output_x, output_y) | Group defend method, where each of the .defend method of each defender in defender_list is called.
The exact defence depends on whether ensemble decisionmaking has been used.
Args:
X (np.ndarray, torch.Tensor) : point data (shape (batch_size, data dimensionality)).
y (np.ndarray, torch.Tensor) : label data (shape (batch_size,)).
Return:
tuple (output_x, output_y):
output_x (np.ndarray, torch.Tensor) : point data (shape (batch_size, data dimensionality)),
output_y (np.ndarray, torch.Tensor) : label data (shape (batch_size,)) . | niteshade/defence.py | defend | oskarfernlund/data-poisoning-attacks | 0 | python | def defend(self, X, y, **input_kwargs):
' Group defend method, where each of the .defend method of each defender in defender_list is called. \n The exact defence depends on whether ensemble decisionmaking has been used.\n Args: \n X (np.ndarray, torch.Tensor) : point data (shape (batch_size, data dimensionality)).\n y (np.ndarray, torch.Tensor) : label data (shape (batch_size,)).\n \n Return:\n tuple (output_x, output_y):\n output_x (np.ndarray, torch.Tensor) : point data (shape (batch_size, data dimensionality)),\n output_y (np.ndarray, torch.Tensor) : label data (shape (batch_size,)) .\n '
if (self.ensemble_accept_rate > 0):
(output_x, output_y) = self._ensemble_defence(X, y, **input_kwargs)
else:
(output_x, output_y) = self._sequential_defence(X, y, **input_kwargs)
return (output_x, output_y) | def defend(self, X, y, **input_kwargs):
' Group defend method, where each of the .defend method of each defender in defender_list is called. \n The exact defence depends on whether ensemble decisionmaking has been used.\n Args: \n X (np.ndarray, torch.Tensor) : point data (shape (batch_size, data dimensionality)).\n y (np.ndarray, torch.Tensor) : label data (shape (batch_size,)).\n \n Return:\n tuple (output_x, output_y):\n output_x (np.ndarray, torch.Tensor) : point data (shape (batch_size, data dimensionality)),\n output_y (np.ndarray, torch.Tensor) : label data (shape (batch_size,)) .\n '
if (self.ensemble_accept_rate > 0):
(output_x, output_y) = self._ensemble_defence(X, y, **input_kwargs)
else:
(output_x, output_y) = self._sequential_defence(X, y, **input_kwargs)
return (output_x, output_y)<|docstring|>Group defend method, where each of the .defend method of each defender in defender_list is called.
The exact defence depends on whether ensemble decisionmaking has been used.
Args:
X (np.ndarray, torch.Tensor) : point data (shape (batch_size, data dimensionality)).
y (np.ndarray, torch.Tensor) : label data (shape (batch_size,)).
Return:
tuple (output_x, output_y):
output_x (np.ndarray, torch.Tensor) : point data (shape (batch_size, data dimensionality)),
output_y (np.ndarray, torch.Tensor) : label data (shape (batch_size,)) .<|endoftext|> |
a7eeaedff6fbd8b03c662f3f01a182f2983849258707fa9abd6df782c7e3bdfe | def _ensemble_defence(self, X, y, **input_kwargs):
'Group defend method, where each defender in the list will defend input points - \n the .defend method of each defender will be called for all points and their decisions will be recorded in a dictionary\n Points will be rejected based on the proportion of defenders rejecting each individual point\n Args: \n X (np.ndarray, torch.Tensor) : point data (shape (batch_size, data dimensionality)).\n y (np.ndarray, torch.Tensor) : label data (shape (batch_size,)).\n \n Return:\n tuple (output_x, output_y):\n output_x (np.ndarray, torch.Tensor) : point data.\n output_y (np.ndarray, torch.Tensor) : label data.\n '
input_datapoints = X.copy()
input_labels = y.copy()
accept_counts = self._initiate_dict(X, y)
for defender in self.defender_list:
(X, y) = defender.defend(X, y, **input_kwargs)
accept_counts = self._update_dict(accept_counts, X, y)
X = input_datapoints.copy()
y = input_labels.copy()
(output_x, output_y) = self._get_final_points(accept_counts)
return (output_x, output_y) | Group defend method, where each defender in the list will defend input points -
the .defend method of each defender will be called for all points and their decisions will be recorded in a dictionary
Points will be rejected based on the proportion of defenders rejecting each individual point
Args:
X (np.ndarray, torch.Tensor) : point data (shape (batch_size, data dimensionality)).
y (np.ndarray, torch.Tensor) : label data (shape (batch_size,)).
Return:
tuple (output_x, output_y):
output_x (np.ndarray, torch.Tensor) : point data.
output_y (np.ndarray, torch.Tensor) : label data. | niteshade/defence.py | _ensemble_defence | oskarfernlund/data-poisoning-attacks | 0 | python | def _ensemble_defence(self, X, y, **input_kwargs):
'Group defend method, where each defender in the list will defend input points - \n the .defend method of each defender will be called for all points and their decisions will be recorded in a dictionary\n Points will be rejected based on the proportion of defenders rejecting each individual point\n Args: \n X (np.ndarray, torch.Tensor) : point data (shape (batch_size, data dimensionality)).\n y (np.ndarray, torch.Tensor) : label data (shape (batch_size,)).\n \n Return:\n tuple (output_x, output_y):\n output_x (np.ndarray, torch.Tensor) : point data.\n output_y (np.ndarray, torch.Tensor) : label data.\n '
input_datapoints = X.copy()
input_labels = y.copy()
accept_counts = self._initiate_dict(X, y)
for defender in self.defender_list:
(X, y) = defender.defend(X, y, **input_kwargs)
accept_counts = self._update_dict(accept_counts, X, y)
X = input_datapoints.copy()
y = input_labels.copy()
(output_x, output_y) = self._get_final_points(accept_counts)
return (output_x, output_y) | def _ensemble_defence(self, X, y, **input_kwargs):
'Group defend method, where each defender in the list will defend input points - \n the .defend method of each defender will be called for all points and their decisions will be recorded in a dictionary\n Points will be rejected based on the proportion of defenders rejecting each individual point\n Args: \n X (np.ndarray, torch.Tensor) : point data (shape (batch_size, data dimensionality)).\n y (np.ndarray, torch.Tensor) : label data (shape (batch_size,)).\n \n Return:\n tuple (output_x, output_y):\n output_x (np.ndarray, torch.Tensor) : point data.\n output_y (np.ndarray, torch.Tensor) : label data.\n '
input_datapoints = X.copy()
input_labels = y.copy()
accept_counts = self._initiate_dict(X, y)
for defender in self.defender_list:
(X, y) = defender.defend(X, y, **input_kwargs)
accept_counts = self._update_dict(accept_counts, X, y)
X = input_datapoints.copy()
y = input_labels.copy()
(output_x, output_y) = self._get_final_points(accept_counts)
return (output_x, output_y)<|docstring|>Group defend method, where each defender in the list will defend input points -
the .defend method of each defender will be called for all points and their decisions will be recorded in a dictionary
Points will be rejected based on the proportion of defenders rejecting each individual point
Args:
X (np.ndarray, torch.Tensor) : point data (shape (batch_size, data dimensionality)).
y (np.ndarray, torch.Tensor) : label data (shape (batch_size,)).
Return:
tuple (output_x, output_y):
output_x (np.ndarray, torch.Tensor) : point data.
output_y (np.ndarray, torch.Tensor) : label data.<|endoftext|> |
f42c583d88952dcebe74e4c277b88d0a1af98f04e28c29f8c930453562ca14ba | def _sequential_defence(self, X, y, **input_kwargs):
'Group defend method, where each defender in the list will defend input points - \n the .defend method of each defender will be called for all points\n if one defender rejects a point, that point will be rejected and not sent forward\n Args: \n X (np.ndarray, torch.Tensor): point data.\n y (np.ndarray, torch.Tensor): label data.\n \n Return:\n tuple (output_x, output_y) where:\n output_x (np.ndarray, torch.Tensor): point data.\n output_y (np.ndarray, torch.Tensor): label data.\n '
for defender in self.defender_list:
if (len(X) > 0):
(output_x, output_y) = defender.defend(X, y, **input_kwargs)
return (output_x, output_y) | Group defend method, where each defender in the list will defend input points -
the .defend method of each defender will be called for all points
if one defender rejects a point, that point will be rejected and not sent forward
Args:
X (np.ndarray, torch.Tensor): point data.
y (np.ndarray, torch.Tensor): label data.
Return:
tuple (output_x, output_y) where:
output_x (np.ndarray, torch.Tensor): point data.
output_y (np.ndarray, torch.Tensor): label data. | niteshade/defence.py | _sequential_defence | oskarfernlund/data-poisoning-attacks | 0 | python | def _sequential_defence(self, X, y, **input_kwargs):
'Group defend method, where each defender in the list will defend input points - \n the .defend method of each defender will be called for all points\n if one defender rejects a point, that point will be rejected and not sent forward\n Args: \n X (np.ndarray, torch.Tensor): point data.\n y (np.ndarray, torch.Tensor): label data.\n \n Return:\n tuple (output_x, output_y) where:\n output_x (np.ndarray, torch.Tensor): point data.\n output_y (np.ndarray, torch.Tensor): label data.\n '
for defender in self.defender_list:
if (len(X) > 0):
(output_x, output_y) = defender.defend(X, y, **input_kwargs)
return (output_x, output_y) | def _sequential_defence(self, X, y, **input_kwargs):
'Group defend method, where each defender in the list will defend input points - \n the .defend method of each defender will be called for all points\n if one defender rejects a point, that point will be rejected and not sent forward\n Args: \n X (np.ndarray, torch.Tensor): point data.\n y (np.ndarray, torch.Tensor): label data.\n \n Return:\n tuple (output_x, output_y) where:\n output_x (np.ndarray, torch.Tensor): point data.\n output_y (np.ndarray, torch.Tensor): label data.\n '
for defender in self.defender_list:
if (len(X) > 0):
(output_x, output_y) = defender.defend(X, y, **input_kwargs)
return (output_x, output_y)<|docstring|>Group defend method, where each defender in the list will defend input points -
the .defend method of each defender will be called for all points
if one defender rejects a point, that point will be rejected and not sent forward
Args:
X (np.ndarray, torch.Tensor): point data.
y (np.ndarray, torch.Tensor): label data.
Return:
tuple (output_x, output_y) where:
output_x (np.ndarray, torch.Tensor): point data.
output_y (np.ndarray, torch.Tensor): label data.<|endoftext|> |
040a30b99b78092787de3c7c3b487fa8220335c4987e467149a6e5055d39a748 | def _initiate_dict(self, X, y):
' Initiate 3 dictionaries for ensemble decisionmaking\n one for original points and labels (value) to indeces (key)\n second for str(points) and str(labels) (value) to indeces (key)\n third for indeces (keys) to accept_counts (values)\n Args: \n X (np.ndarray, torch.Tensor): point data.\n y (np.ndarray, torch.Tensor): label data.\n \n Return:\n accept_counts (dictionary) - A dictionary with point indeces (keys) and accept counts (values)\n '
accept_counts = {}
self.__idx_point_mapping = {}
self.__idx_str_mapping = {}
for (idx, points) in enumerate(X):
self.__idx_point_mapping[idx] = {'point': points, 'target': y[idx]}
self.__idx_str_mapping[idx] = {'point': str(points), 'target': str(y[idx])}
accept_counts[idx] = 0
return accept_counts | Initiate 3 dictionaries for ensemble decisionmaking
one for original points and labels (value) to indeces (key)
second for str(points) and str(labels) (value) to indeces (key)
third for indeces (keys) to accept_counts (values)
Args:
X (np.ndarray, torch.Tensor): point data.
y (np.ndarray, torch.Tensor): label data.
Return:
accept_counts (dictionary) - A dictionary with point indeces (keys) and accept counts (values) | niteshade/defence.py | _initiate_dict | oskarfernlund/data-poisoning-attacks | 0 | python | def _initiate_dict(self, X, y):
' Initiate 3 dictionaries for ensemble decisionmaking\n one for original points and labels (value) to indeces (key)\n second for str(points) and str(labels) (value) to indeces (key)\n third for indeces (keys) to accept_counts (values)\n Args: \n X (np.ndarray, torch.Tensor): point data.\n y (np.ndarray, torch.Tensor): label data.\n \n Return:\n accept_counts (dictionary) - A dictionary with point indeces (keys) and accept counts (values)\n '
accept_counts = {}
self.__idx_point_mapping = {}
self.__idx_str_mapping = {}
for (idx, points) in enumerate(X):
self.__idx_point_mapping[idx] = {'point': points, 'target': y[idx]}
self.__idx_str_mapping[idx] = {'point': str(points), 'target': str(y[idx])}
accept_counts[idx] = 0
return accept_counts | def _initiate_dict(self, X, y):
' Initiate 3 dictionaries for ensemble decisionmaking\n one for original points and labels (value) to indeces (key)\n second for str(points) and str(labels) (value) to indeces (key)\n third for indeces (keys) to accept_counts (values)\n Args: \n X (np.ndarray, torch.Tensor): point data.\n y (np.ndarray, torch.Tensor): label data.\n \n Return:\n accept_counts (dictionary) - A dictionary with point indeces (keys) and accept counts (values)\n '
accept_counts = {}
self.__idx_point_mapping = {}
self.__idx_str_mapping = {}
for (idx, points) in enumerate(X):
self.__idx_point_mapping[idx] = {'point': points, 'target': y[idx]}
self.__idx_str_mapping[idx] = {'point': str(points), 'target': str(y[idx])}
accept_counts[idx] = 0
return accept_counts<|docstring|>Initiate 3 dictionaries for ensemble decisionmaking
one for original points and labels (value) to indeces (key)
second for str(points) and str(labels) (value) to indeces (key)
third for indeces (keys) to accept_counts (values)
Args:
X (np.ndarray, torch.Tensor): point data.
y (np.ndarray, torch.Tensor): label data.
Return:
accept_counts (dictionary) - A dictionary with point indeces (keys) and accept counts (values)<|endoftext|> |
f864364bf91a25a2647bafbd5d70eac7d2286a2422ff79d1fb8d8fd310a47f07 | def _update_dict(self, point_dict, X, y):
' Update the accept count dictionary using incoming points and index dictionary\n for all incoming points, add 1 to the accept count of that point\n To get the indeces of input points, the string points and labels to indeces dict is used\n Args: \n X (np.ndarray, torch.Tensor): point data.\n y (np.ndarray, torch.Tensor): label data.\n point_dict (dictionary): dictionary of accept counts\n Return:\n point_dict (dictionary) - A dictionary with updated point indeces (keys) and accept counts (values)\n '
key_list = list(self.__idx_str_mapping.keys())
value_list = list(self.__idx_str_mapping.values())
for (index, points) in enumerate(X):
idx_map_value = {'point': str(points), 'target': str(y[index])}
position = value_list.index(idx_map_value)
index_point = key_list[position]
point_dict[index_point] += 1
return point_dict | Update the accept count dictionary using incoming points and index dictionary
for all incoming points, add 1 to the accept count of that point
To get the indeces of input points, the string points and labels to indeces dict is used
Args:
X (np.ndarray, torch.Tensor): point data.
y (np.ndarray, torch.Tensor): label data.
point_dict (dictionary): dictionary of accept counts
Return:
point_dict (dictionary) - A dictionary with updated point indeces (keys) and accept counts (values) | niteshade/defence.py | _update_dict | oskarfernlund/data-poisoning-attacks | 0 | python | def _update_dict(self, point_dict, X, y):
' Update the accept count dictionary using incoming points and index dictionary\n for all incoming points, add 1 to the accept count of that point\n To get the indeces of input points, the string points and labels to indeces dict is used\n Args: \n X (np.ndarray, torch.Tensor): point data.\n y (np.ndarray, torch.Tensor): label data.\n point_dict (dictionary): dictionary of accept counts\n Return:\n point_dict (dictionary) - A dictionary with updated point indeces (keys) and accept counts (values)\n '
key_list = list(self.__idx_str_mapping.keys())
value_list = list(self.__idx_str_mapping.values())
for (index, points) in enumerate(X):
idx_map_value = {'point': str(points), 'target': str(y[index])}
position = value_list.index(idx_map_value)
index_point = key_list[position]
point_dict[index_point] += 1
return point_dict | def _update_dict(self, point_dict, X, y):
' Update the accept count dictionary using incoming points and index dictionary\n for all incoming points, add 1 to the accept count of that point\n To get the indeces of input points, the string points and labels to indeces dict is used\n Args: \n X (np.ndarray, torch.Tensor): point data.\n y (np.ndarray, torch.Tensor): label data.\n point_dict (dictionary): dictionary of accept counts\n Return:\n point_dict (dictionary) - A dictionary with updated point indeces (keys) and accept counts (values)\n '
key_list = list(self.__idx_str_mapping.keys())
value_list = list(self.__idx_str_mapping.values())
for (index, points) in enumerate(X):
idx_map_value = {'point': str(points), 'target': str(y[index])}
position = value_list.index(idx_map_value)
index_point = key_list[position]
point_dict[index_point] += 1
return point_dict<|docstring|>Update the accept count dictionary using incoming points and index dictionary
for all incoming points, add 1 to the accept count of that point
To get the indeces of input points, the string points and labels to indeces dict is used
Args:
X (np.ndarray, torch.Tensor): point data.
y (np.ndarray, torch.Tensor): label data.
point_dict (dictionary): dictionary of accept counts
Return:
point_dict (dictionary) - A dictionary with updated point indeces (keys) and accept counts (values)<|endoftext|> |
e2b7e6862593db05f95560222596833b203b47c619d029de383e7ad6a750c0a9 | def _get_final_points(self, point_dict):
' Using the accept count dictionary, obtain the final points that are accepted\n points accepted if accept_count/nr_of_defenders > ensemble_accept_rate\n Args: \n point_dict (dictionary): dictionary of accept counts\n Return:\n tuple (np.array(accepted_X), np.array(accepted_Y))\n '
accepted_X = []
accepted_Y = []
for (key, values) in point_dict.items():
if ((values / len(self.defender_list)) > self.ensemble_accept_rate):
accepted_X.append(self.__idx_point_mapping[key]['point'])
accepted_Y.append(self.__idx_point_mapping[key]['target'])
return (np.array(accepted_X), np.array(accepted_Y)) | Using the accept count dictionary, obtain the final points that are accepted
points accepted if accept_count/nr_of_defenders > ensemble_accept_rate
Args:
point_dict (dictionary): dictionary of accept counts
Return:
tuple (np.array(accepted_X), np.array(accepted_Y)) | niteshade/defence.py | _get_final_points | oskarfernlund/data-poisoning-attacks | 0 | python | def _get_final_points(self, point_dict):
' Using the accept count dictionary, obtain the final points that are accepted\n points accepted if accept_count/nr_of_defenders > ensemble_accept_rate\n Args: \n point_dict (dictionary): dictionary of accept counts\n Return:\n tuple (np.array(accepted_X), np.array(accepted_Y))\n '
accepted_X = []
accepted_Y = []
for (key, values) in point_dict.items():
if ((values / len(self.defender_list)) > self.ensemble_accept_rate):
accepted_X.append(self.__idx_point_mapping[key]['point'])
accepted_Y.append(self.__idx_point_mapping[key]['target'])
return (np.array(accepted_X), np.array(accepted_Y)) | def _get_final_points(self, point_dict):
' Using the accept count dictionary, obtain the final points that are accepted\n points accepted if accept_count/nr_of_defenders > ensemble_accept_rate\n Args: \n point_dict (dictionary): dictionary of accept counts\n Return:\n tuple (np.array(accepted_X), np.array(accepted_Y))\n '
accepted_X = []
accepted_Y = []
for (key, values) in point_dict.items():
if ((values / len(self.defender_list)) > self.ensemble_accept_rate):
accepted_X.append(self.__idx_point_mapping[key]['point'])
accepted_Y.append(self.__idx_point_mapping[key]['target'])
return (np.array(accepted_X), np.array(accepted_Y))<|docstring|>Using the accept count dictionary, obtain the final points that are accepted
points accepted if accept_count/nr_of_defenders > ensemble_accept_rate
Args:
point_dict (dictionary): dictionary of accept counts
Return:
tuple (np.array(accepted_X), np.array(accepted_Y))<|endoftext|> |
9a1e6d75bc53675bbb96e1a5fcd4c2e382b90e8d0ba627036c14ff9106a78015 | @abstractmethod
def defend(self):
'Checks if the .defend method is implemented. '
raise NotImplementedError('Defend method needs to be implemented for a defender') | Checks if the .defend method is implemented. | niteshade/defence.py | defend | oskarfernlund/data-poisoning-attacks | 0 | python | @abstractmethod
def defend(self):
' '
raise NotImplementedError('Defend method needs to be implemented for a defender') | @abstractmethod
def defend(self):
' '
raise NotImplementedError('Defend method needs to be implemented for a defender')<|docstring|>Checks if the .defend method is implemented.<|endoftext|> |
5f5c77baf39d89ba102ec531e04a7107ab829e559686b2195eab854ccdbf7d62 | def __init__(self, initial_dataset_x, initial_dataset_y) -> None:
' Initialise the OutlierDefender class using a initial dataset.\n '
super().__init__()
self._type_check(initial_dataset_x, initial_dataset_y)
if (self._datatype == 1):
self._init_x = initial_dataset_x
self._init_y = initial_dataset_y
else:
self._init_x = initial_dataset_x.cpu().detach().numpy()
self._init_y = initial_dataset_y.cpu().detach().numpy() | Initialise the OutlierDefender class using a initial dataset. | niteshade/defence.py | __init__ | oskarfernlund/data-poisoning-attacks | 0 | python | def __init__(self, initial_dataset_x, initial_dataset_y) -> None:
' \n '
super().__init__()
self._type_check(initial_dataset_x, initial_dataset_y)
if (self._datatype == 1):
self._init_x = initial_dataset_x
self._init_y = initial_dataset_y
else:
self._init_x = initial_dataset_x.cpu().detach().numpy()
self._init_y = initial_dataset_y.cpu().detach().numpy() | def __init__(self, initial_dataset_x, initial_dataset_y) -> None:
' \n '
super().__init__()
self._type_check(initial_dataset_x, initial_dataset_y)
if (self._datatype == 1):
self._init_x = initial_dataset_x
self._init_y = initial_dataset_y
else:
self._init_x = initial_dataset_x.cpu().detach().numpy()
self._init_y = initial_dataset_y.cpu().detach().numpy()<|docstring|>Initialise the OutlierDefender class using a initial dataset.<|endoftext|> |
833285b163bd7a0191e7ee213f8275977805ccb386c4234a0b3c1464c826a91e | def __init__(self, init_x, init_y, nearest_neighbours: int, confidence_threshold: float, one_hot=False) -> None:
' Constructor method of KNN_Defender class.\n If the inputs are one-hot encoded, artificial integer labels are constructed\n to use the SKlearn classifier.\n '
super().__init__()
self._type_check(init_x, init_y)
self.nearest_neighbours = nearest_neighbours
self.confidence_threshold = confidence_threshold
self.one_hot = one_hot
_input_validation(self)
if (self._datatype == 0):
init_x = init_x.cpu().detach().numpy()
init_y = init_y.cpu().detach().numpy()
nr_of_datapoints = init_x.shape[0]
self.training_dataset_x = init_x.reshape((nr_of_datapoints, (- 1)))
if self.one_hot:
self.training_dataset_y = _label_encoding(init_y)
else:
self.training_dataset_y = init_y.reshape((nr_of_datapoints,)) | Constructor method of KNN_Defender class.
If the inputs are one-hot encoded, artificial integer labels are constructed
to use the SKlearn classifier. | niteshade/defence.py | __init__ | oskarfernlund/data-poisoning-attacks | 0 | python | def __init__(self, init_x, init_y, nearest_neighbours: int, confidence_threshold: float, one_hot=False) -> None:
' Constructor method of KNN_Defender class.\n If the inputs are one-hot encoded, artificial integer labels are constructed\n to use the SKlearn classifier.\n '
super().__init__()
self._type_check(init_x, init_y)
self.nearest_neighbours = nearest_neighbours
self.confidence_threshold = confidence_threshold
self.one_hot = one_hot
_input_validation(self)
if (self._datatype == 0):
init_x = init_x.cpu().detach().numpy()
init_y = init_y.cpu().detach().numpy()
nr_of_datapoints = init_x.shape[0]
self.training_dataset_x = init_x.reshape((nr_of_datapoints, (- 1)))
if self.one_hot:
self.training_dataset_y = _label_encoding(init_y)
else:
self.training_dataset_y = init_y.reshape((nr_of_datapoints,)) | def __init__(self, init_x, init_y, nearest_neighbours: int, confidence_threshold: float, one_hot=False) -> None:
' Constructor method of KNN_Defender class.\n If the inputs are one-hot encoded, artificial integer labels are constructed\n to use the SKlearn classifier.\n '
super().__init__()
self._type_check(init_x, init_y)
self.nearest_neighbours = nearest_neighbours
self.confidence_threshold = confidence_threshold
self.one_hot = one_hot
_input_validation(self)
if (self._datatype == 0):
init_x = init_x.cpu().detach().numpy()
init_y = init_y.cpu().detach().numpy()
nr_of_datapoints = init_x.shape[0]
self.training_dataset_x = init_x.reshape((nr_of_datapoints, (- 1)))
if self.one_hot:
self.training_dataset_y = _label_encoding(init_y)
else:
self.training_dataset_y = init_y.reshape((nr_of_datapoints,))<|docstring|>Constructor method of KNN_Defender class.
If the inputs are one-hot encoded, artificial integer labels are constructed
to use the SKlearn classifier.<|endoftext|> |
ac2a44a3704923317505b630127bd118b132463ea20a680e857b8e8283c9d3e5 | def defend(self, datapoints, input_labels, **kwargs):
' The defend method for the KNN_defender.\n For each incoming point, closest neighbours and their labels are found.\n If the proportion of the most frequent label in closest neighbours is higher than a threshold,\n then the label of the point is flipped to be the most frequent label of closest neighbours.\n Args: \n datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)).\n input_labels (np.ndarray, torch.Tensor): label data (shape (batch_size,)).\n Return:\n tuple (datapoints, flipped_labels) :\n datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)),\n flipped_labels (np.ndarray, torch.Tensor): modified label data (shape (batch_size,)).\n '
self._type_check(datapoints, input_labels)
if (self._datatype == 0):
datapoints = datapoints.cpu().detach().numpy()
input_labels = input_labels.cpu().detach().numpy()
nr_of_datapoints = datapoints.shape[0]
datapoints_reshaped = datapoints.copy().reshape((nr_of_datapoints, (- 1)))
if self.one_hot:
one_hot_length = len(input_labels[0])
input_labels = np.argmax(input_labels, axis=1)
KNN_classifier = KNeighborsClassifier(self.nearest_neighbours)
KNN_classifier.fit(self.training_dataset_x, self.training_dataset_y)
nearest_indeces = KNN_classifier.kneighbors(datapoints_reshaped, return_distance=False)
confidence_list = self._get_confidence_labels(nearest_indeces)
flipped_labels = self._confidence_flip(input_labels, confidence_list)
self.training_dataset_x = np.append(self.training_dataset_x, datapoints_reshaped, axis=0)
self.training_dataset_y = np.append(self.training_dataset_y, flipped_labels.reshape((nr_of_datapoints,)), axis=0)
if self.one_hot:
flipped_labels = self._one_hot_decoding(one_hot_length, flipped_labels)
if (self._datatype == 0):
datapoints = torch.tensor(datapoints)
flipped_labels = torch.tensor(flipped_labels)
return (datapoints, flipped_labels) | The defend method for the KNN_defender.
For each incoming point, closest neighbours and their labels are found.
If the proportion of the most frequent label in closest neighbours is higher than a threshold,
then the label of the point is flipped to be the most frequent label of closest neighbours.
Args:
datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)).
input_labels (np.ndarray, torch.Tensor): label data (shape (batch_size,)).
Return:
tuple (datapoints, flipped_labels) :
datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)),
flipped_labels (np.ndarray, torch.Tensor): modified label data (shape (batch_size,)). | niteshade/defence.py | defend | oskarfernlund/data-poisoning-attacks | 0 | python | def defend(self, datapoints, input_labels, **kwargs):
' The defend method for the KNN_defender.\n For each incoming point, closest neighbours and their labels are found.\n If the proportion of the most frequent label in closest neighbours is higher than a threshold,\n then the label of the point is flipped to be the most frequent label of closest neighbours.\n Args: \n datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)).\n input_labels (np.ndarray, torch.Tensor): label data (shape (batch_size,)).\n Return:\n tuple (datapoints, flipped_labels) :\n datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)),\n flipped_labels (np.ndarray, torch.Tensor): modified label data (shape (batch_size,)).\n '
self._type_check(datapoints, input_labels)
if (self._datatype == 0):
datapoints = datapoints.cpu().detach().numpy()
input_labels = input_labels.cpu().detach().numpy()
nr_of_datapoints = datapoints.shape[0]
datapoints_reshaped = datapoints.copy().reshape((nr_of_datapoints, (- 1)))
if self.one_hot:
one_hot_length = len(input_labels[0])
input_labels = np.argmax(input_labels, axis=1)
KNN_classifier = KNeighborsClassifier(self.nearest_neighbours)
KNN_classifier.fit(self.training_dataset_x, self.training_dataset_y)
nearest_indeces = KNN_classifier.kneighbors(datapoints_reshaped, return_distance=False)
confidence_list = self._get_confidence_labels(nearest_indeces)
flipped_labels = self._confidence_flip(input_labels, confidence_list)
self.training_dataset_x = np.append(self.training_dataset_x, datapoints_reshaped, axis=0)
self.training_dataset_y = np.append(self.training_dataset_y, flipped_labels.reshape((nr_of_datapoints,)), axis=0)
if self.one_hot:
flipped_labels = self._one_hot_decoding(one_hot_length, flipped_labels)
if (self._datatype == 0):
datapoints = torch.tensor(datapoints)
flipped_labels = torch.tensor(flipped_labels)
return (datapoints, flipped_labels) | def defend(self, datapoints, input_labels, **kwargs):
' The defend method for the KNN_defender.\n For each incoming point, closest neighbours and their labels are found.\n If the proportion of the most frequent label in closest neighbours is higher than a threshold,\n then the label of the point is flipped to be the most frequent label of closest neighbours.\n Args: \n datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)).\n input_labels (np.ndarray, torch.Tensor): label data (shape (batch_size,)).\n Return:\n tuple (datapoints, flipped_labels) :\n datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)),\n flipped_labels (np.ndarray, torch.Tensor): modified label data (shape (batch_size,)).\n '
self._type_check(datapoints, input_labels)
if (self._datatype == 0):
datapoints = datapoints.cpu().detach().numpy()
input_labels = input_labels.cpu().detach().numpy()
nr_of_datapoints = datapoints.shape[0]
datapoints_reshaped = datapoints.copy().reshape((nr_of_datapoints, (- 1)))
if self.one_hot:
one_hot_length = len(input_labels[0])
input_labels = np.argmax(input_labels, axis=1)
KNN_classifier = KNeighborsClassifier(self.nearest_neighbours)
KNN_classifier.fit(self.training_dataset_x, self.training_dataset_y)
nearest_indeces = KNN_classifier.kneighbors(datapoints_reshaped, return_distance=False)
confidence_list = self._get_confidence_labels(nearest_indeces)
flipped_labels = self._confidence_flip(input_labels, confidence_list)
self.training_dataset_x = np.append(self.training_dataset_x, datapoints_reshaped, axis=0)
self.training_dataset_y = np.append(self.training_dataset_y, flipped_labels.reshape((nr_of_datapoints,)), axis=0)
if self.one_hot:
flipped_labels = self._one_hot_decoding(one_hot_length, flipped_labels)
if (self._datatype == 0):
datapoints = torch.tensor(datapoints)
flipped_labels = torch.tensor(flipped_labels)
return (datapoints, flipped_labels)<|docstring|>The defend method for the KNN_defender.
For each incoming point, closest neighbours and their labels are found.
If the proportion of the most frequent label in closest neighbours is higher than a threshold,
then the label of the point is flipped to be the most frequent label of closest neighbours.
Args:
datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)).
input_labels (np.ndarray, torch.Tensor): label data (shape (batch_size,)).
Return:
tuple (datapoints, flipped_labels) :
datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)),
flipped_labels (np.ndarray, torch.Tensor): modified label data (shape (batch_size,)).<|endoftext|> |
5122ae4ccca7a85dab91dd2497e55c784ef0c73667c03f3c9da0c39aa3ca921b | def _one_hot_decoding(self, one_hot_length, flipped_labels):
' Construct one_hot outputs from int input labels\n Args: \n one_hot_length (int): Dimensionality of one_hot_encoded outputs\n flipped_labels (np.ndarray): label data.\n Return:\n output_labels (np.ndarray): one_hot_encoded label data\n '
output_labels = np.zeros((flipped_labels.shape[0], one_hot_length))
for (id, label) in enumerate(flipped_labels):
output_labels[id][label] = 1
return output_labels | Construct one_hot outputs from int input labels
Args:
one_hot_length (int): Dimensionality of one_hot_encoded outputs
flipped_labels (np.ndarray): label data.
Return:
output_labels (np.ndarray): one_hot_encoded label data | niteshade/defence.py | _one_hot_decoding | oskarfernlund/data-poisoning-attacks | 0 | python | def _one_hot_decoding(self, one_hot_length, flipped_labels):
' Construct one_hot outputs from int input labels\n Args: \n one_hot_length (int): Dimensionality of one_hot_encoded outputs\n flipped_labels (np.ndarray): label data.\n Return:\n output_labels (np.ndarray): one_hot_encoded label data\n '
output_labels = np.zeros((flipped_labels.shape[0], one_hot_length))
for (id, label) in enumerate(flipped_labels):
output_labels[id][label] = 1
return output_labels | def _one_hot_decoding(self, one_hot_length, flipped_labels):
' Construct one_hot outputs from int input labels\n Args: \n one_hot_length (int): Dimensionality of one_hot_encoded outputs\n flipped_labels (np.ndarray): label data.\n Return:\n output_labels (np.ndarray): one_hot_encoded label data\n '
output_labels = np.zeros((flipped_labels.shape[0], one_hot_length))
for (id, label) in enumerate(flipped_labels):
output_labels[id][label] = 1
return output_labels<|docstring|>Construct one_hot outputs from int input labels
Args:
one_hot_length (int): Dimensionality of one_hot_encoded outputs
flipped_labels (np.ndarray): label data.
Return:
output_labels (np.ndarray): one_hot_encoded label data<|endoftext|> |
ddae5d0ae6893ab6621d343c6be95fba5baf031df49c886cd957798e5f859955 | def _get_confidence_labels(self, indeces):
' Find the most frequent label from the nearest neighbour indeces\n and get its confidence (label_count / nr_of_nghbs)\n Args: \n indeces (list): list of lists, inner list contains indeces for the nearest neighbours for datapoints\n input_labels (np.ndarray): label data.\n Return:\n confidence array (np.ndarray): array of tuples where tuple[0]: most frequent label, tuple[1]: confidence of label\n '
confidence_list = []
for nghbs in indeces:
label_list = []
for index in nghbs:
label_list.append(self.training_dataset_y[index])
confidence_tuple = self._calculate_confidence(label_list)
confidence_list.append(confidence_tuple)
return np.array(confidence_list) | Find the most frequent label from the nearest neighbour indeces
and get its confidence (label_count / nr_of_nghbs)
Args:
indeces (list): list of lists, inner list contains indeces for the nearest neighbours for datapoints
input_labels (np.ndarray): label data.
Return:
confidence array (np.ndarray): array of tuples where tuple[0]: most frequent label, tuple[1]: confidence of label | niteshade/defence.py | _get_confidence_labels | oskarfernlund/data-poisoning-attacks | 0 | python | def _get_confidence_labels(self, indeces):
' Find the most frequent label from the nearest neighbour indeces\n and get its confidence (label_count / nr_of_nghbs)\n Args: \n indeces (list): list of lists, inner list contains indeces for the nearest neighbours for datapoints\n input_labels (np.ndarray): label data.\n Return:\n confidence array (np.ndarray): array of tuples where tuple[0]: most frequent label, tuple[1]: confidence of label\n '
confidence_list = []
for nghbs in indeces:
label_list = []
for index in nghbs:
label_list.append(self.training_dataset_y[index])
confidence_tuple = self._calculate_confidence(label_list)
confidence_list.append(confidence_tuple)
return np.array(confidence_list) | def _get_confidence_labels(self, indeces):
' Find the most frequent label from the nearest neighbour indeces\n and get its confidence (label_count / nr_of_nghbs)\n Args: \n indeces (list): list of lists, inner list contains indeces for the nearest neighbours for datapoints\n input_labels (np.ndarray): label data.\n Return:\n confidence array (np.ndarray): array of tuples where tuple[0]: most frequent label, tuple[1]: confidence of label\n '
confidence_list = []
for nghbs in indeces:
label_list = []
for index in nghbs:
label_list.append(self.training_dataset_y[index])
confidence_tuple = self._calculate_confidence(label_list)
confidence_list.append(confidence_tuple)
return np.array(confidence_list)<|docstring|>Find the most frequent label from the nearest neighbour indeces
and get its confidence (label_count / nr_of_nghbs)
Args:
indeces (list): list of lists, inner list contains indeces for the nearest neighbours for datapoints
input_labels (np.ndarray): label data.
Return:
confidence array (np.ndarray): array of tuples where tuple[0]: most frequent label, tuple[1]: confidence of label<|endoftext|> |
96d416f9125ae8af5940374cf3a75a15cb39063ed1592d2d4044a0d9f1589c55 | def _calculate_confidence(self, labels):
' Find the most frequent label from a incoming list of labels\n and get its confidence (label_count / len(label_list))\n Args: \n labels (list): list containing labels for the nearest neighbours\n Return:\n tuple(max_label, confidence)\n max_label (int): Most frequent label\n confidence (float): most frequent_label_count/len(label_list)\n '
unique_labels = list(set(labels))
max_count = 0
max_label = (- 1)
for label in unique_labels:
if (list(labels).count(label) > max_count):
max_count = list(labels).count(label)
max_label = label
return (max_label, (max_count / len(labels))) | Find the most frequent label from a incoming list of labels
and get its confidence (label_count / len(label_list))
Args:
labels (list): list containing labels for the nearest neighbours
Return:
tuple(max_label, confidence)
max_label (int): Most frequent label
confidence (float): most frequent_label_count/len(label_list) | niteshade/defence.py | _calculate_confidence | oskarfernlund/data-poisoning-attacks | 0 | python | def _calculate_confidence(self, labels):
' Find the most frequent label from a incoming list of labels\n and get its confidence (label_count / len(label_list))\n Args: \n labels (list): list containing labels for the nearest neighbours\n Return:\n tuple(max_label, confidence)\n max_label (int): Most frequent label\n confidence (float): most frequent_label_count/len(label_list)\n '
unique_labels = list(set(labels))
max_count = 0
max_label = (- 1)
for label in unique_labels:
if (list(labels).count(label) > max_count):
max_count = list(labels).count(label)
max_label = label
return (max_label, (max_count / len(labels))) | def _calculate_confidence(self, labels):
' Find the most frequent label from a incoming list of labels\n and get its confidence (label_count / len(label_list))\n Args: \n labels (list): list containing labels for the nearest neighbours\n Return:\n tuple(max_label, confidence)\n max_label (int): Most frequent label\n confidence (float): most frequent_label_count/len(label_list)\n '
unique_labels = list(set(labels))
max_count = 0
max_label = (- 1)
for label in unique_labels:
if (list(labels).count(label) > max_count):
max_count = list(labels).count(label)
max_label = label
return (max_label, (max_count / len(labels)))<|docstring|>Find the most frequent label from a incoming list of labels
and get its confidence (label_count / len(label_list))
Args:
labels (list): list containing labels for the nearest neighbours
Return:
tuple(max_label, confidence)
max_label (int): Most frequent label
confidence (float): most frequent_label_count/len(label_list)<|endoftext|> |
ce59237e7dde0e7e59cece6df3b65c7646d583f7b53a91e5b74154073fcc5b1b | def _confidence_flip(self, labels, confidence_list):
' Flip incoming input labels if the confidence of the most frequent label of their nearest nghbs\n is over a threshold\n Args: \n labels (list): list containing input labels\n labels (list): list containing a tuple for each input label with most frequent nearest nghb label and its confidence\n Return:\n labels (list): List of modified input labels\n '
for (idx, _) in enumerate(labels):
if (confidence_list[idx][1] > self.confidence_threshold):
labels[idx] = confidence_list[idx][0]
return labels | Flip incoming input labels if the confidence of the most frequent label of their nearest nghbs
is over a threshold
Args:
labels (list): list containing input labels
labels (list): list containing a tuple for each input label with most frequent nearest nghb label and its confidence
Return:
labels (list): List of modified input labels | niteshade/defence.py | _confidence_flip | oskarfernlund/data-poisoning-attacks | 0 | python | def _confidence_flip(self, labels, confidence_list):
' Flip incoming input labels if the confidence of the most frequent label of their nearest nghbs\n is over a threshold\n Args: \n labels (list): list containing input labels\n labels (list): list containing a tuple for each input label with most frequent nearest nghb label and its confidence\n Return:\n labels (list): List of modified input labels\n '
for (idx, _) in enumerate(labels):
if (confidence_list[idx][1] > self.confidence_threshold):
labels[idx] = confidence_list[idx][0]
return labels | def _confidence_flip(self, labels, confidence_list):
' Flip incoming input labels if the confidence of the most frequent label of their nearest nghbs\n is over a threshold\n Args: \n labels (list): list containing input labels\n labels (list): list containing a tuple for each input label with most frequent nearest nghb label and its confidence\n Return:\n labels (list): List of modified input labels\n '
for (idx, _) in enumerate(labels):
if (confidence_list[idx][1] > self.confidence_threshold):
labels[idx] = confidence_list[idx][0]
return labels<|docstring|>Flip incoming input labels if the confidence of the most frequent label of their nearest nghbs
is over a threshold
Args:
labels (list): list containing input labels
labels (list): list containing a tuple for each input label with most frequent nearest nghb label and its confidence
Return:
labels (list): List of modified input labels<|endoftext|> |
6aff00ef3aa31cb548918fbcdde382b529e01e07f6533165366c171e4eb71654 | def __init__(self, threshold=0.05, delay=0, one_hot=True) -> None:
'Constructor method of SoftmaxDefender class.\n '
super().__init__()
self.threshold = threshold
self.one_hot = one_hot
self.delay = delay
_input_validation(self)
self.defend_counter = 0 | Constructor method of SoftmaxDefender class. | niteshade/defence.py | __init__ | oskarfernlund/data-poisoning-attacks | 0 | python | def __init__(self, threshold=0.05, delay=0, one_hot=True) -> None:
'\n '
super().__init__()
self.threshold = threshold
self.one_hot = one_hot
self.delay = delay
_input_validation(self)
self.defend_counter = 0 | def __init__(self, threshold=0.05, delay=0, one_hot=True) -> None:
'\n '
super().__init__()
self.threshold = threshold
self.one_hot = one_hot
self.delay = delay
_input_validation(self)
self.defend_counter = 0<|docstring|>Constructor method of SoftmaxDefender class.<|endoftext|> |
6fc34ee680f4a02607c18809188b2930cfed520c3cc3f0817a215c44c82ab69f | def defend(self, datapoints, labels, model, **input_kwargs):
' The defend method for the SoftMaxDefender.\n Defender starts defending if defend call counter (self.defend_counter) is larger than delay attribute.\n For each incoming point, a forward pass is done to get the softmax output values for the point.\n If the output value of the true label is below the threshold, the points are rejected.\n If one_hot encoded, artificial labels are created.\n Args: \n datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)).\n input_labels (np.ndarray, torch.Tensor): label data (shape (batch_size,)).\n model (torch.nn.model): The updated current model that is used for online learning\n Return:\n tuple (datapoints, labels):\n datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)),\n labels (np.ndarray, torch.Tensor): modified label data (shape (batch_size,)).\n '
self._type_check(datapoints, labels)
self.defend_counter += 1
if (self.defend_counter > self.delay):
labels = labels.reshape((- 1), 1)
if (self._datatype == 1):
X_batch = torch.tensor(datapoints)
labels = torch.tensor(labels)
else:
X_batch = datapoints
labels = labels
if self.one_hot:
class_labels = torch.argmax(labels, axis=1).reshape((- 1), 1)
else:
class_labels = labels
model.eval()
with torch.no_grad():
outputs = model.forward(X_batch.float())
confidence = torch.gather(outputs.cpu(), 1, class_labels.cpu())
mask = (confidence > self.threshold).squeeze(1)
X_output = X_batch[mask]
y_output = labels[mask]
if (self._datatype == 1):
X_output = X_output.cpu().detach().numpy()
y_output = y_output.cpu().detach().numpy()
return (X_output, y_output.reshape((- 1)))
else:
return (datapoints, labels) | The defend method for the SoftMaxDefender.
Defender starts defending if defend call counter (self.defend_counter) is larger than delay attribute.
For each incoming point, a forward pass is done to get the softmax output values for the point.
If the output value of the true label is below the threshold, the points are rejected.
If one_hot encoded, artificial labels are created.
Args:
datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)).
input_labels (np.ndarray, torch.Tensor): label data (shape (batch_size,)).
model (torch.nn.model): The updated current model that is used for online learning
Return:
tuple (datapoints, labels):
datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)),
labels (np.ndarray, torch.Tensor): modified label data (shape (batch_size,)). | niteshade/defence.py | defend | oskarfernlund/data-poisoning-attacks | 0 | python | def defend(self, datapoints, labels, model, **input_kwargs):
' The defend method for the SoftMaxDefender.\n Defender starts defending if defend call counter (self.defend_counter) is larger than delay attribute.\n For each incoming point, a forward pass is done to get the softmax output values for the point.\n If the output value of the true label is below the threshold, the points are rejected.\n If one_hot encoded, artificial labels are created.\n Args: \n datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)).\n input_labels (np.ndarray, torch.Tensor): label data (shape (batch_size,)).\n model (torch.nn.model): The updated current model that is used for online learning\n Return:\n tuple (datapoints, labels):\n datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)),\n labels (np.ndarray, torch.Tensor): modified label data (shape (batch_size,)).\n '
self._type_check(datapoints, labels)
self.defend_counter += 1
if (self.defend_counter > self.delay):
labels = labels.reshape((- 1), 1)
if (self._datatype == 1):
X_batch = torch.tensor(datapoints)
labels = torch.tensor(labels)
else:
X_batch = datapoints
labels = labels
if self.one_hot:
class_labels = torch.argmax(labels, axis=1).reshape((- 1), 1)
else:
class_labels = labels
model.eval()
with torch.no_grad():
outputs = model.forward(X_batch.float())
confidence = torch.gather(outputs.cpu(), 1, class_labels.cpu())
mask = (confidence > self.threshold).squeeze(1)
X_output = X_batch[mask]
y_output = labels[mask]
if (self._datatype == 1):
X_output = X_output.cpu().detach().numpy()
y_output = y_output.cpu().detach().numpy()
return (X_output, y_output.reshape((- 1)))
else:
return (datapoints, labels) | def defend(self, datapoints, labels, model, **input_kwargs):
' The defend method for the SoftMaxDefender.\n Defender starts defending if defend call counter (self.defend_counter) is larger than delay attribute.\n For each incoming point, a forward pass is done to get the softmax output values for the point.\n If the output value of the true label is below the threshold, the points are rejected.\n If one_hot encoded, artificial labels are created.\n Args: \n datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)).\n input_labels (np.ndarray, torch.Tensor): label data (shape (batch_size,)).\n model (torch.nn.model): The updated current model that is used for online learning\n Return:\n tuple (datapoints, labels):\n datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)),\n labels (np.ndarray, torch.Tensor): modified label data (shape (batch_size,)).\n '
self._type_check(datapoints, labels)
self.defend_counter += 1
if (self.defend_counter > self.delay):
labels = labels.reshape((- 1), 1)
if (self._datatype == 1):
X_batch = torch.tensor(datapoints)
labels = torch.tensor(labels)
else:
X_batch = datapoints
labels = labels
if self.one_hot:
class_labels = torch.argmax(labels, axis=1).reshape((- 1), 1)
else:
class_labels = labels
model.eval()
with torch.no_grad():
outputs = model.forward(X_batch.float())
confidence = torch.gather(outputs.cpu(), 1, class_labels.cpu())
mask = (confidence > self.threshold).squeeze(1)
X_output = X_batch[mask]
y_output = labels[mask]
if (self._datatype == 1):
X_output = X_output.cpu().detach().numpy()
y_output = y_output.cpu().detach().numpy()
return (X_output, y_output.reshape((- 1)))
else:
return (datapoints, labels)<|docstring|>The defend method for the SoftMaxDefender.
Defender starts defending if defend call counter (self.defend_counter) is larger than delay attribute.
For each incoming point, a forward pass is done to get the softmax output values for the point.
If the output value of the true label is below the threshold, the points are rejected.
If one_hot encoded, artificial labels are created.
Args:
datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)).
input_labels (np.ndarray, torch.Tensor): label data (shape (batch_size,)).
model (torch.nn.model): The updated current model that is used for online learning
Return:
tuple (datapoints, labels):
datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)),
labels (np.ndarray, torch.Tensor): modified label data (shape (batch_size,)).<|endoftext|> |
291cd44d8165d242ee1a3d40354be833757683bb522110875d6750bdd87ba208 | def __init__(self, initial_dataset_x, initial_dataset_y, threshold, one_hot=False, dist_metric=None) -> None:
' Constructor method of FeasibleSetDefender class.\n Within the init, a feasible set is constructed and\n depending on the input a respective distance metric is constructed for calculating point distances from label centroids.\n '
super().__init__(initial_dataset_x, initial_dataset_y)
self.one_hot = one_hot
self._threshold = threshold
if self.one_hot:
self._init_y = _label_encoding(initial_dataset_y)
else:
initial_dataset_y = initial_dataset_y.reshape((- 1))
self._feasible_set_construction()
self.distance_metric = dist_metric
_input_validation(self) | Constructor method of FeasibleSetDefender class.
Within the init, a feasible set is constructed and
depending on the input a respective distance metric is constructed for calculating point distances from label centroids. | niteshade/defence.py | __init__ | oskarfernlund/data-poisoning-attacks | 0 | python | def __init__(self, initial_dataset_x, initial_dataset_y, threshold, one_hot=False, dist_metric=None) -> None:
' Constructor method of FeasibleSetDefender class.\n Within the init, a feasible set is constructed and\n depending on the input a respective distance metric is constructed for calculating point distances from label centroids.\n '
super().__init__(initial_dataset_x, initial_dataset_y)
self.one_hot = one_hot
self._threshold = threshold
if self.one_hot:
self._init_y = _label_encoding(initial_dataset_y)
else:
initial_dataset_y = initial_dataset_y.reshape((- 1))
self._feasible_set_construction()
self.distance_metric = dist_metric
_input_validation(self) | def __init__(self, initial_dataset_x, initial_dataset_y, threshold, one_hot=False, dist_metric=None) -> None:
' Constructor method of FeasibleSetDefender class.\n Within the init, a feasible set is constructed and\n depending on the input a respective distance metric is constructed for calculating point distances from label centroids.\n '
super().__init__(initial_dataset_x, initial_dataset_y)
self.one_hot = one_hot
self._threshold = threshold
if self.one_hot:
self._init_y = _label_encoding(initial_dataset_y)
else:
initial_dataset_y = initial_dataset_y.reshape((- 1))
self._feasible_set_construction()
self.distance_metric = dist_metric
_input_validation(self)<|docstring|>Constructor method of FeasibleSetDefender class.
Within the init, a feasible set is constructed and
depending on the input a respective distance metric is constructed for calculating point distances from label centroids.<|endoftext|> |
94b9b7774964c0f67c5949bb58cfd51ecd6163fc4caa4e959ce5689a1a00879a | def _feasible_set_construction(self):
' Constructs the initial feasible set for the defender\n Currently feasible set centroid is constructed by just taking the mean of the points per dimension for a label\n Also implements label counts for the running centroid updating during .defend\n '
labels = np.unique(self._init_y)
feasible_set = {}
label_counts = {}
for label in labels:
label_rows = (self._init_y == label)
label_counts[label] = np.sum(label_rows)
feasible_set[label] = np.mean(self._init_x[label_rows], 0)
self.feasible_set = feasible_set
self._label_counts = label_counts | Constructs the initial feasible set for the defender
Currently feasible set centroid is constructed by just taking the mean of the points per dimension for a label
Also implements label counts for the running centroid updating during .defend | niteshade/defence.py | _feasible_set_construction | oskarfernlund/data-poisoning-attacks | 0 | python | def _feasible_set_construction(self):
' Constructs the initial feasible set for the defender\n Currently feasible set centroid is constructed by just taking the mean of the points per dimension for a label\n Also implements label counts for the running centroid updating during .defend\n '
labels = np.unique(self._init_y)
feasible_set = {}
label_counts = {}
for label in labels:
label_rows = (self._init_y == label)
label_counts[label] = np.sum(label_rows)
feasible_set[label] = np.mean(self._init_x[label_rows], 0)
self.feasible_set = feasible_set
self._label_counts = label_counts | def _feasible_set_construction(self):
' Constructs the initial feasible set for the defender\n Currently feasible set centroid is constructed by just taking the mean of the points per dimension for a label\n Also implements label counts for the running centroid updating during .defend\n '
labels = np.unique(self._init_y)
feasible_set = {}
label_counts = {}
for label in labels:
label_rows = (self._init_y == label)
label_counts[label] = np.sum(label_rows)
feasible_set[label] = np.mean(self._init_x[label_rows], 0)
self.feasible_set = feasible_set
self._label_counts = label_counts<|docstring|>Constructs the initial feasible set for the defender
Currently feasible set centroid is constructed by just taking the mean of the points per dimension for a label
Also implements label counts for the running centroid updating during .defend<|endoftext|> |
fc5d9a748d4e0c4b3421e92395cec8d4e145b29a74755994844fa240fbc03905 | def _feasible_set_adjustment(self, datapoint, label):
' Adjust running means of feasible set (the centroid locations)\n using label counts, input new datapoint and label\n Args: \n datapoint (np.ndarray, torch.Tensor): point data.\n label (np.ndarray, torch.Tensor): label data.\n '
label_mean = self.feasible_set[label]
self._label_counts[label] += 1
new_mean = (label_mean + ((datapoint - label_mean) / self._label_counts[label]))
self.feasible_set[label] = new_mean | Adjust running means of feasible set (the centroid locations)
using label counts, input new datapoint and label
Args:
datapoint (np.ndarray, torch.Tensor): point data.
label (np.ndarray, torch.Tensor): label data. | niteshade/defence.py | _feasible_set_adjustment | oskarfernlund/data-poisoning-attacks | 0 | python | def _feasible_set_adjustment(self, datapoint, label):
' Adjust running means of feasible set (the centroid locations)\n using label counts, input new datapoint and label\n Args: \n datapoint (np.ndarray, torch.Tensor): point data.\n label (np.ndarray, torch.Tensor): label data.\n '
label_mean = self.feasible_set[label]
self._label_counts[label] += 1
new_mean = (label_mean + ((datapoint - label_mean) / self._label_counts[label]))
self.feasible_set[label] = new_mean | def _feasible_set_adjustment(self, datapoint, label):
' Adjust running means of feasible set (the centroid locations)\n using label counts, input new datapoint and label\n Args: \n datapoint (np.ndarray, torch.Tensor): point data.\n label (np.ndarray, torch.Tensor): label data.\n '
label_mean = self.feasible_set[label]
self._label_counts[label] += 1
new_mean = (label_mean + ((datapoint - label_mean) / self._label_counts[label]))
self.feasible_set[label] = new_mean<|docstring|>Adjust running means of feasible set (the centroid locations)
using label counts, input new datapoint and label
Args:
datapoint (np.ndarray, torch.Tensor): point data.
label (np.ndarray, torch.Tensor): label data.<|endoftext|> |
9833fab7a967e6fc0c4a98670f80acc6426fd0f78c2e054721cc9bce8a36992c | def _distance_metric_calculator(self, datapoint, label):
' Calculate the distance metric for the datapoint from the feasible set mean of that datapoints label\n Args: \n datapoint (np.ndarray, torch.Tensor): point data.\n label (np.ndarray, torch.Tensor): label data.\n Return:\n distance (float): distance of the point calculated from the centroid of the label\n '
label_mean = self.feasible_set[label]
distance = self.__distance_metric.distance(datapoint, label_mean)
return distance | Calculate the distance metric for the datapoint from the feasible set mean of that datapoints label
Args:
datapoint (np.ndarray, torch.Tensor): point data.
label (np.ndarray, torch.Tensor): label data.
Return:
distance (float): distance of the point calculated from the centroid of the label | niteshade/defence.py | _distance_metric_calculator | oskarfernlund/data-poisoning-attacks | 0 | python | def _distance_metric_calculator(self, datapoint, label):
' Calculate the distance metric for the datapoint from the feasible set mean of that datapoints label\n Args: \n datapoint (np.ndarray, torch.Tensor): point data.\n label (np.ndarray, torch.Tensor): label data.\n Return:\n distance (float): distance of the point calculated from the centroid of the label\n '
label_mean = self.feasible_set[label]
distance = self.__distance_metric.distance(datapoint, label_mean)
return distance | def _distance_metric_calculator(self, datapoint, label):
' Calculate the distance metric for the datapoint from the feasible set mean of that datapoints label\n Args: \n datapoint (np.ndarray, torch.Tensor): point data.\n label (np.ndarray, torch.Tensor): label data.\n Return:\n distance (float): distance of the point calculated from the centroid of the label\n '
label_mean = self.feasible_set[label]
distance = self.__distance_metric.distance(datapoint, label_mean)
return distance<|docstring|>Calculate the distance metric for the datapoint from the feasible set mean of that datapoints label
Args:
datapoint (np.ndarray, torch.Tensor): point data.
label (np.ndarray, torch.Tensor): label data.
Return:
distance (float): distance of the point calculated from the centroid of the label<|endoftext|> |
c5798b64684f43f06ae46ec8bdf456212026a1cabadf829d6092d6ae645275a6 | def defend(self, datapoints, labels, **input_kwargs):
' The defend method for the FeasibleSetDefender.\n For each incoming point, a distance from the feasible set centroid of that label is calculated.\n If the distance is higher than the threshold, the points are rejected.\n If all points are rejceted, empty arrays are returned.\n If one_hot encoded, artificial labels are created.\n\n Args: \n datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)).\n input_labels (np.ndarray, torch.Tensor): label data (shape (batch_size,)).\n Return:\n tuple (output_datapoints, output_labels) :\n output_datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)),\n output_labels (np.ndarray, torch.Tensor): label data (shape (batch_size,)).\n '
self._type_check(datapoints, labels)
if (self._datatype == 0):
datapoints = datapoints.cpu().detach().numpy()
labels = labels.cpu().detach().numpy()
if self.one_hot:
one_hot_length = len(labels[0])
labels = np.argmax(labels, axis=1)
else:
labels = labels.reshape((- 1))
cleared_datapoints = []
cleared_labels = []
for (id, datapoint) in enumerate(datapoints):
data_label = labels[id]
distance = self._distance_metric_calculator(datapoint, data_label)
if (distance < self._threshold):
self._feasible_set_adjustment(datapoint, data_label)
cleared_datapoints.append(datapoint)
cleared_labels.append(data_label)
if (len(cleared_labels) == 0):
if (self._datatype == 0):
output_empty_array = torch.tensor([])
else:
output_empty_array = np.array([])
return (output_empty_array, output_empty_array)
cleared_labels_stack = np.stack(cleared_labels)
if self.one_hot:
cleared_labels_stack = self._one_hot_decoding(cleared_labels_stack, one_hot_length)
output_datapoints = np.stack(cleared_datapoints)
output_labels = cleared_labels_stack
if (self._datatype == 0):
output_datapoints = torch.tensor(output_datapoints)
output_labels = torch.tensor(output_labels)
return (output_datapoints, output_labels) | The defend method for the FeasibleSetDefender.
For each incoming point, a distance from the feasible set centroid of that label is calculated.
If the distance is higher than the threshold, the points are rejected.
If all points are rejceted, empty arrays are returned.
If one_hot encoded, artificial labels are created.
Args:
datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)).
input_labels (np.ndarray, torch.Tensor): label data (shape (batch_size,)).
Return:
tuple (output_datapoints, output_labels) :
output_datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)),
output_labels (np.ndarray, torch.Tensor): label data (shape (batch_size,)). | niteshade/defence.py | defend | oskarfernlund/data-poisoning-attacks | 0 | python | def defend(self, datapoints, labels, **input_kwargs):
' The defend method for the FeasibleSetDefender.\n For each incoming point, a distance from the feasible set centroid of that label is calculated.\n If the distance is higher than the threshold, the points are rejected.\n If all points are rejceted, empty arrays are returned.\n If one_hot encoded, artificial labels are created.\n\n Args: \n datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)).\n input_labels (np.ndarray, torch.Tensor): label data (shape (batch_size,)).\n Return:\n tuple (output_datapoints, output_labels) :\n output_datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)),\n output_labels (np.ndarray, torch.Tensor): label data (shape (batch_size,)).\n '
self._type_check(datapoints, labels)
if (self._datatype == 0):
datapoints = datapoints.cpu().detach().numpy()
labels = labels.cpu().detach().numpy()
if self.one_hot:
one_hot_length = len(labels[0])
labels = np.argmax(labels, axis=1)
else:
labels = labels.reshape((- 1))
cleared_datapoints = []
cleared_labels = []
for (id, datapoint) in enumerate(datapoints):
data_label = labels[id]
distance = self._distance_metric_calculator(datapoint, data_label)
if (distance < self._threshold):
self._feasible_set_adjustment(datapoint, data_label)
cleared_datapoints.append(datapoint)
cleared_labels.append(data_label)
if (len(cleared_labels) == 0):
if (self._datatype == 0):
output_empty_array = torch.tensor([])
else:
output_empty_array = np.array([])
return (output_empty_array, output_empty_array)
cleared_labels_stack = np.stack(cleared_labels)
if self.one_hot:
cleared_labels_stack = self._one_hot_decoding(cleared_labels_stack, one_hot_length)
output_datapoints = np.stack(cleared_datapoints)
output_labels = cleared_labels_stack
if (self._datatype == 0):
output_datapoints = torch.tensor(output_datapoints)
output_labels = torch.tensor(output_labels)
return (output_datapoints, output_labels) | def defend(self, datapoints, labels, **input_kwargs):
' The defend method for the FeasibleSetDefender.\n For each incoming point, a distance from the feasible set centroid of that label is calculated.\n If the distance is higher than the threshold, the points are rejected.\n If all points are rejceted, empty arrays are returned.\n If one_hot encoded, artificial labels are created.\n\n Args: \n datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)).\n input_labels (np.ndarray, torch.Tensor): label data (shape (batch_size,)).\n Return:\n tuple (output_datapoints, output_labels) :\n output_datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)),\n output_labels (np.ndarray, torch.Tensor): label data (shape (batch_size,)).\n '
self._type_check(datapoints, labels)
if (self._datatype == 0):
datapoints = datapoints.cpu().detach().numpy()
labels = labels.cpu().detach().numpy()
if self.one_hot:
one_hot_length = len(labels[0])
labels = np.argmax(labels, axis=1)
else:
labels = labels.reshape((- 1))
cleared_datapoints = []
cleared_labels = []
for (id, datapoint) in enumerate(datapoints):
data_label = labels[id]
distance = self._distance_metric_calculator(datapoint, data_label)
if (distance < self._threshold):
self._feasible_set_adjustment(datapoint, data_label)
cleared_datapoints.append(datapoint)
cleared_labels.append(data_label)
if (len(cleared_labels) == 0):
if (self._datatype == 0):
output_empty_array = torch.tensor([])
else:
output_empty_array = np.array([])
return (output_empty_array, output_empty_array)
cleared_labels_stack = np.stack(cleared_labels)
if self.one_hot:
cleared_labels_stack = self._one_hot_decoding(cleared_labels_stack, one_hot_length)
output_datapoints = np.stack(cleared_datapoints)
output_labels = cleared_labels_stack
if (self._datatype == 0):
output_datapoints = torch.tensor(output_datapoints)
output_labels = torch.tensor(output_labels)
return (output_datapoints, output_labels)<|docstring|>The defend method for the FeasibleSetDefender.
For each incoming point, a distance from the feasible set centroid of that label is calculated.
If the distance is higher than the threshold, the points are rejected.
If all points are rejceted, empty arrays are returned.
If one_hot encoded, artificial labels are created.
Args:
datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)).
input_labels (np.ndarray, torch.Tensor): label data (shape (batch_size,)).
Return:
tuple (output_datapoints, output_labels) :
output_datapoints (np.ndarray, torch.Tensor): point data (shape (batch_size, data dimensionality)),
output_labels (np.ndarray, torch.Tensor): label data (shape (batch_size,)).<|endoftext|> |
ac77fa67b4340b03fb9856c4ce267ec890513c29f254f70f34b830a6b002977c | def _one_hot_decoding(self, cleared_labels_stack, one_hot_length):
' Construct one_hot outputs from int input labels\n Args: \n one_hot_length (int): Dimensionality of one_hot_encoded outputs\n cleared_labels_stack (list): list of labels.\n Return:\n output_labels (np.ndarray): one_hot_encoded label data\n '
output_labels = np.zeros((len(cleared_labels_stack), one_hot_length))
for (id, label) in enumerate(cleared_labels_stack):
output_labels[id][label] = 1
return output_labels | Construct one_hot outputs from int input labels
Args:
one_hot_length (int): Dimensionality of one_hot_encoded outputs
cleared_labels_stack (list): list of labels.
Return:
output_labels (np.ndarray): one_hot_encoded label data | niteshade/defence.py | _one_hot_decoding | oskarfernlund/data-poisoning-attacks | 0 | python | def _one_hot_decoding(self, cleared_labels_stack, one_hot_length):
' Construct one_hot outputs from int input labels\n Args: \n one_hot_length (int): Dimensionality of one_hot_encoded outputs\n cleared_labels_stack (list): list of labels.\n Return:\n output_labels (np.ndarray): one_hot_encoded label data\n '
output_labels = np.zeros((len(cleared_labels_stack), one_hot_length))
for (id, label) in enumerate(cleared_labels_stack):
output_labels[id][label] = 1
return output_labels | def _one_hot_decoding(self, cleared_labels_stack, one_hot_length):
' Construct one_hot outputs from int input labels\n Args: \n one_hot_length (int): Dimensionality of one_hot_encoded outputs\n cleared_labels_stack (list): list of labels.\n Return:\n output_labels (np.ndarray): one_hot_encoded label data\n '
output_labels = np.zeros((len(cleared_labels_stack), one_hot_length))
for (id, label) in enumerate(cleared_labels_stack):
output_labels[id][label] = 1
return output_labels<|docstring|>Construct one_hot outputs from int input labels
Args:
one_hot_length (int): Dimensionality of one_hot_encoded outputs
cleared_labels_stack (list): list of labels.
Return:
output_labels (np.ndarray): one_hot_encoded label data<|endoftext|> |
75d85822fbf5b20c40a669e7c7cdb40a3e222541079cd8626a088351d56dd803 | def __init__(self, type='Eucleidian') -> None:
' Constructor method of FeasibleSetDefender class.\n Default Distance_metric is Eucleidian distance.\n '
self._type = type | Constructor method of FeasibleSetDefender class.
Default Distance_metric is Eucleidian distance. | niteshade/defence.py | __init__ | oskarfernlund/data-poisoning-attacks | 0 | python | def __init__(self, type='Eucleidian') -> None:
' Constructor method of FeasibleSetDefender class.\n Default Distance_metric is Eucleidian distance.\n '
self._type = type | def __init__(self, type='Eucleidian') -> None:
' Constructor method of FeasibleSetDefender class.\n Default Distance_metric is Eucleidian distance.\n '
self._type = type<|docstring|>Constructor method of FeasibleSetDefender class.
Default Distance_metric is Eucleidian distance.<|endoftext|> |
269b53ba9aaed698b646655fdf8bbf965949ee1309c0573d2f4c355a2e938bfa | def distance(self, input_1, Input_2):
' Calculates the distance between 2 input points\n Currently only Eucleidian (l2 norm) distance metric is implemented off-the-shelf\n Args: \n input_1 (np.ndarray) : point_1 data (shape data dimensionality).\n input_2 (np.ndarray) : point_2 data (shape data dimensionality).\n Return:\n distance (float) : distance between the 2 input points.\n '
if (self._type == 'Eucleidian'):
return np.sqrt(np.sum(((input_1 - Input_2) ** 2)))
else:
raise NotImplementedError('This distance metric type has not been implemented') | Calculates the distance between 2 input points
Currently only Eucleidian (l2 norm) distance metric is implemented off-the-shelf
Args:
input_1 (np.ndarray) : point_1 data (shape data dimensionality).
input_2 (np.ndarray) : point_2 data (shape data dimensionality).
Return:
distance (float) : distance between the 2 input points. | niteshade/defence.py | distance | oskarfernlund/data-poisoning-attacks | 0 | python | def distance(self, input_1, Input_2):
' Calculates the distance between 2 input points\n Currently only Eucleidian (l2 norm) distance metric is implemented off-the-shelf\n Args: \n input_1 (np.ndarray) : point_1 data (shape data dimensionality).\n input_2 (np.ndarray) : point_2 data (shape data dimensionality).\n Return:\n distance (float) : distance between the 2 input points.\n '
if (self._type == 'Eucleidian'):
return np.sqrt(np.sum(((input_1 - Input_2) ** 2)))
else:
raise NotImplementedError('This distance metric type has not been implemented') | def distance(self, input_1, Input_2):
' Calculates the distance between 2 input points\n Currently only Eucleidian (l2 norm) distance metric is implemented off-the-shelf\n Args: \n input_1 (np.ndarray) : point_1 data (shape data dimensionality).\n input_2 (np.ndarray) : point_2 data (shape data dimensionality).\n Return:\n distance (float) : distance between the 2 input points.\n '
if (self._type == 'Eucleidian'):
return np.sqrt(np.sum(((input_1 - Input_2) ** 2)))
else:
raise NotImplementedError('This distance metric type has not been implemented')<|docstring|>Calculates the distance between 2 input points
Currently only Eucleidian (l2 norm) distance metric is implemented off-the-shelf
Args:
input_1 (np.ndarray) : point_1 data (shape data dimensionality).
input_2 (np.ndarray) : point_2 data (shape data dimensionality).
Return:
distance (float) : distance between the 2 input points.<|endoftext|> |
e96d92f4b5b4e60717cfe3db6a14d84ee3332a5209c508a40fd3b6121f4b3ea5 | def get_args():
'Get command-line arguments'
parser = argparse.ArgumentParser(description='Have a Picnic', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('items', metavar='str', nargs='+', help='items to bring')
parser.add_argument('-s', '--sorted', help='sort the items', action='store_true')
return parser.parse_args() | Get command-line arguments | examples/03_picnic/picnic.py | get_args | hbuell6/be434-fall-2021 | 0 | python | def get_args():
parser = argparse.ArgumentParser(description='Have a Picnic', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('items', metavar='str', nargs='+', help='items to bring')
parser.add_argument('-s', '--sorted', help='sort the items', action='store_true')
return parser.parse_args() | def get_args():
parser = argparse.ArgumentParser(description='Have a Picnic', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('items', metavar='str', nargs='+', help='items to bring')
parser.add_argument('-s', '--sorted', help='sort the items', action='store_true')
return parser.parse_args()<|docstring|>Get command-line arguments<|endoftext|> |
963166ce833da6503fb81f6903491b8e4ab3ea59542f00c19295b99b8b37e919 | def main():
'Make a jazz noise here'
args = get_args()
items = args.items
if args.sorted:
items.sort()
if (len(items) == 1):
print((('You are bringing ' + items[0]) + '.'))
elif (len(items) == 2):
print((((('You are bringing ' + items[0]) + ' and ') + items[1]) + '.'))
else:
print((((('You are bringing ' + ', '.join(items[:(- 1)])) + ', and ') + items[(- 1)]) + '.')) | Make a jazz noise here | examples/03_picnic/picnic.py | main | hbuell6/be434-fall-2021 | 0 | python | def main():
args = get_args()
items = args.items
if args.sorted:
items.sort()
if (len(items) == 1):
print((('You are bringing ' + items[0]) + '.'))
elif (len(items) == 2):
print((((('You are bringing ' + items[0]) + ' and ') + items[1]) + '.'))
else:
print((((('You are bringing ' + ', '.join(items[:(- 1)])) + ', and ') + items[(- 1)]) + '.')) | def main():
args = get_args()
items = args.items
if args.sorted:
items.sort()
if (len(items) == 1):
print((('You are bringing ' + items[0]) + '.'))
elif (len(items) == 2):
print((((('You are bringing ' + items[0]) + ' and ') + items[1]) + '.'))
else:
print((((('You are bringing ' + ', '.join(items[:(- 1)])) + ', and ') + items[(- 1)]) + '.'))<|docstring|>Make a jazz noise here<|endoftext|> |
bc9bb8e059a681a7fce0ea921316a8528e6a23ec00d54fc796fe3af994700897 | @staticmethod
def get_dlq_queue_arn_and_resources(cw_event_source, source_arn, attributes):
'returns dlq queue arn and dlq_resources, assuming cw_event_source.DeadLetterConfig has been validated'
dlq_queue_arn = cw_event_source.DeadLetterConfig.get('Arn')
if (dlq_queue_arn is not None):
return (dlq_queue_arn, [])
queue_logical_id = cw_event_source.DeadLetterConfig.get('QueueLogicalId')
dlq_resources = EventBridgeRuleUtils.create_dead_letter_queue_with_policy(cw_event_source.logical_id, source_arn, queue_logical_id, attributes)
dlq_queue_arn = dlq_resources[0].get_runtime_attr('arn')
return (dlq_queue_arn, dlq_resources) | returns dlq queue arn and dlq_resources, assuming cw_event_source.DeadLetterConfig has been validated | samtranslator/model/eventbridge_utils.py | get_dlq_queue_arn_and_resources | aaythapa/serverless-application-model | 1,279 | python | @staticmethod
def get_dlq_queue_arn_and_resources(cw_event_source, source_arn, attributes):
dlq_queue_arn = cw_event_source.DeadLetterConfig.get('Arn')
if (dlq_queue_arn is not None):
return (dlq_queue_arn, [])
queue_logical_id = cw_event_source.DeadLetterConfig.get('QueueLogicalId')
dlq_resources = EventBridgeRuleUtils.create_dead_letter_queue_with_policy(cw_event_source.logical_id, source_arn, queue_logical_id, attributes)
dlq_queue_arn = dlq_resources[0].get_runtime_attr('arn')
return (dlq_queue_arn, dlq_resources) | @staticmethod
def get_dlq_queue_arn_and_resources(cw_event_source, source_arn, attributes):
dlq_queue_arn = cw_event_source.DeadLetterConfig.get('Arn')
if (dlq_queue_arn is not None):
return (dlq_queue_arn, [])
queue_logical_id = cw_event_source.DeadLetterConfig.get('QueueLogicalId')
dlq_resources = EventBridgeRuleUtils.create_dead_letter_queue_with_policy(cw_event_source.logical_id, source_arn, queue_logical_id, attributes)
dlq_queue_arn = dlq_resources[0].get_runtime_attr('arn')
return (dlq_queue_arn, dlq_resources)<|docstring|>returns dlq queue arn and dlq_resources, assuming cw_event_source.DeadLetterConfig has been validated<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.