_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 31
13.1k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q275700
|
D.filter_threshold
|
test
|
def filter_threshold(self, analyte, threshold):
"""
Apply threshold filter.
Generates threshold filters for the given analytes above and below
the specified threshold.
Two filters are created with prefixes '_above' and '_below'.
'_above' keeps all the data above the threshold.
'_below' keeps all the data below the threshold.
i.e. to select data below the threshold value, you should turn the
'_above' filter off.
Parameters
----------
analyte : TYPE
Description of `analyte`.
threshold : TYPE
Description of `threshold`.
Returns
-------
None
"""
params = locals()
del(params['self'])
# generate filter
below, above = filters.threshold(self.focus[analyte], threshold)
|
python
|
{
"resource": ""
}
|
q275701
|
D.filter_gradient_threshold
|
test
|
def filter_gradient_threshold(self, analyte, win, threshold, recalc=True):
"""
Apply gradient threshold filter.
Generates threshold filters for the given analytes above and below
the specified threshold.
Two filters are created with prefixes '_above' and '_below'.
'_above' keeps all the data above the threshold.
'_below' keeps all the data below the threshold.
i.e. to select data below the threshold value, you should turn the
'_above' filter off.
Parameters
----------
analyte : str
Description of `analyte`.
threshold : float
Description of `threshold`.
win : int
Window used to calculate gradients (n points)
recalc : bool
Whether or not to re-calculate the gradients.
Returns
-------
None
"""
params = locals()
del(params['self'])
# calculate absolute gradient
if recalc or not self.grads_calced:
|
python
|
{
"resource": ""
}
|
q275702
|
D.calc_correlation
|
test
|
def calc_correlation(self, x_analyte, y_analyte, window=15, filt=True, recalc=True):
"""
Calculate local correlation between two analytes.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
recalc : bool
If True, the correlation is re-calculated, even if it is already present.
Returns
-------
None
"""
label = '{:}_{:}_{:.0f}'.format(x_analyte, y_analyte, window)
if label in self.correlations and not recalc:
return
# make window odd
if window % 2 != 1:
window += 1
# get
|
python
|
{
"resource": ""
}
|
q275703
|
D.filter_correlation
|
test
|
def filter_correlation(self, x_analyte, y_analyte, window=15,
r_threshold=0.9, p_threshold=0.05, filt=True, recalc=False):
"""
Calculate correlation filter.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
r_threshold : float
The correlation index above which to exclude data.
Note: the absolute pearson R value is considered, so
negative correlations below -`r_threshold` will also
be excluded.
p_threshold : float
The significant level below which data are excluded.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
recalc : bool
If True, the correlation is re-calculated, even if it is already present.
Returns
-------
None
"""
# make window odd
if window % 2
|
python
|
{
"resource": ""
}
|
q275704
|
D.filter_new
|
test
|
def filter_new(self, name, filt_str):
"""
Make new filter from combination of other filters.
Parameters
----------
name : str
The name of the new filter. Should be unique.
filt_str : str
A logical combination of partial strings which will create
the new filter. For example, 'Albelow & Mnbelow' will combine
|
python
|
{
"resource": ""
}
|
q275705
|
D.get_params
|
test
|
def get_params(self):
"""
Returns paramters used to process data.
Returns
-------
dict
dict of analysis parameters
"""
outputs = ['sample',
'ratio_params',
'despike_params',
'autorange_params',
'bkgcorrect_params']
out = {}
|
python
|
{
"resource": ""
}
|
q275706
|
histograms
|
test
|
def histograms(dat, keys=None, bins=25, logy=False, cmap=None, ncol=4):
"""
Plot histograms of all items in dat.
Parameters
----------
dat : dict
Data in {key: array} pairs.
keys : arra-like
The keys in dat that you want to plot. If None,
all are plotted.
bins : int
The number of bins in each histogram (default = 25)
logy : bool
If true, y axis is a log scale.
cmap : dict
The colours that the different items should be. If None,
all are grey.
Returns
-------
fig, axes
"""
if keys is None:
keys = dat.keys()
ncol = int(ncol)
nrow = calc_nrow(len(keys), ncol)
fig, axs = plt.subplots(nrow, 4, figsize=[ncol * 2, nrow * 2])
pn = 0
for k, ax in zip(keys, axs.flat):
tmp = nominal_values(dat[k])
x = tmp[~np.isnan(tmp)]
if cmap is not None:
c = cmap[k]
else:
c = (0, 0, 0, 0.5)
|
python
|
{
"resource": ""
}
|
q275707
|
summary_stats
|
test
|
def summary_stats(x, y, nm=None):
"""
Compute summary statistics for paired x, y data.
Tests
-----
Parameters
----------
x, y : array-like
Data to compare
nm : str (optional)
Index value of created dataframe.
Returns
-------
pandas dataframe of statistics.
"""
# create datafrane for results
if isinstance(nm, str):
nm = [nm]
# cols = pd.MultiIndex.from_arrays([['', 'Pairwise', 'Pairwise', cat, cat, cat, cat],
# ['N', 'W', 'p', 'Median', 'IQR', 'W', 'p']])
# cols = ['Median', 'IQR', 'CI95', 'L95', 'LQ', 'UQ', 'U95', 'N',
# 'Wilcoxon_stat', 'Wilcoxon_p',
# 'KS_stat', 'KS_p',
# 'LR_slope', 'LR_intercept', 'LR_slope_tvalue', 'LR_intercept_tvalue', 'LR_slope_p', 'LR_intercept_p', 'LR_R2adj']
# out = pd.DataFrame(index=nm, columns=cols)
cols = pd.MultiIndex.from_tuples([('Residual Summary', 'N'),
('Residual Summary', 'Median'),
('Residual Summary', 'LQ'),
('Residual Summary', 'IQR'),
('Residual Summary', 'UQ'),
('Residual Regression', 'Slope'),
('Residual Regression', 'Slope t'),
('Residual Regression', 'Slope p'),
('Residual Regression', 'Intercept'),
('Residual Regression', 'Intercept t'),
('Residual Regression', 'Intercept p'),
('Residual
|
python
|
{
"resource": ""
}
|
q275708
|
load_reference_data
|
test
|
def load_reference_data(name=None):
"""
Fetch LAtools reference data from online repository.
Parameters
----------
name : str<
Which data to download. Can be one of 'culture_reference',
'culture_test', 'downcore_reference', 'downcore_test', 'iolite_reference'
or 'zircon_reference'.
If None, all are downloaded and returned as a dict.
Returns
-------
pandas.DataFrame or dict.
"""
base_url = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vQJfCeuqrtFFMAeSpA9rguzLAo9OVuw50AHhAULuqjMJzbd3h46PK1KjF69YiJAeNAAjjMDkJK7wMpG/pub?gid={:}&single=true&output=csv'
gids = {'culture_reference': '0',
|
python
|
{
"resource": ""
}
|
q275709
|
AllInstances.lookup
|
test
|
def lookup(self, TC: type, G: type) -> Optional[TypeClass]:
''' Find an instance of the type class `TC` for type `G`.
Iterates `G`'s parent classes, looking up instances for each,
checking whether the instance is a subclass of the target type
class `TC`.
'''
if isinstance(G, str):
raise ImplicitNotFound(TC, G, f'{G} is a string annotation')
if not isinstance(G, (type, TypeVar, _GenericAlias)):
raise ImplicitNotFound(TC, G, f'{G} is neither type, _GenericAlias nor TypeVar:
|
python
|
{
"resource": ""
}
|
q275710
|
elements
|
test
|
def elements(all_isotopes=True):
"""
Loads a DataFrame of all elements and isotopes.
Scraped from https://www.webelements.com/
Returns
-------
pandas DataFrame with columns (element, atomic_number, isotope, atomic_weight, percent)
"""
el = pd.read_pickle(pkgrs.resource_filename('latools', 'resources/elements.pkl'))
if all_isotopes:
return el.set_index('element')
else:
|
python
|
{
"resource": ""
}
|
q275711
|
calc_M
|
test
|
def calc_M(molecule):
"""
Returns molecular weight of molecule.
Where molecule is in standard chemical notation,
e.g. 'CO2', 'HCO3' or B(OH)4
Returns
-------
molecular_weight : float
"""
# load periodic table
els = elements()
# define regexs
parens = re.compile('\(([A-z0-9]+)\)([0-9]+)?')
stoich = re.compile('([A-Z][a-z]?)([0-9]+)?')
ps = parens.findall(molecule) # find subgroups in parentheses
rem = parens.sub('', molecule) # get remainder
m = 0
# deal with sub-groups
if len(ps) > 0:
for sub, ns in ps:
ms = 0
for e, n in stoich.findall(sub):
me = (els.loc[e, 'atomic_weight'] *
els.loc[e, 'percent'] / 100).sum()
if n == '':
|
python
|
{
"resource": ""
}
|
q275712
|
gen_keywords
|
test
|
def gen_keywords(*args: Union[ANSIColors, ANSIStyles], **kwargs: Union[ANSIColors, ANSIStyles]) -> tuple:
'''generate single escape sequence mapping.'''
fields: tuple = tuple()
values: tuple = tuple()
for tpl in args:
fields += tpl._fields
values += tpl
|
python
|
{
"resource": ""
}
|
q275713
|
annihilate
|
test
|
def annihilate(predicate: tuple, stack: tuple) -> tuple:
'''Squash and reduce the input stack.
Removes the elements of input that match predicate and only keeps the
|
python
|
{
"resource": ""
}
|
q275714
|
dedup
|
test
|
def dedup(stack: tuple) -> tuple:
'''Remove duplicates from the stack in first-seen order.'''
# Initializes with an accumulator and then reduces the stack with first match
# deduplication.
|
python
|
{
"resource": ""
}
|
q275715
|
gauss_weighted_stats
|
test
|
def gauss_weighted_stats(x, yarray, x_new, fwhm):
"""
Calculate gaussian weigted moving mean, SD and SE.
Parameters
----------
x : array-like
The independent variable
yarray : (n,m) array
Where n = x.size, and m is the number of
dependent variables to smooth.
x_new : array-like
The new x-scale to interpolate the data
fwhm : int
FWHM of the gaussian kernel.
Returns
-------
(mean, std, se) : tuple
"""
sigma = fwhm / (2 * np.sqrt(2 * np.log(2)))
# create empty mask array
mask = np.zeros((x.size, yarray.shape[1], x_new.size))
# fill mask
for i, xni in enumerate(x_new):
mask[:, :, i] = gauss(x[:, np.newaxis], 1, xni, sigma)
# normalise mask
nmask = mask / mask.sum(0) # sum of each gaussian = 1
|
python
|
{
"resource": ""
}
|
q275716
|
gauss
|
test
|
def gauss(x, *p):
""" Gaussian function.
Parameters
----------
x : array_like
Independent variable.
*p : parameters unpacked to A, mu, sigma
A = amplitude, mu = centre, sigma = width
Return
------
|
python
|
{
"resource": ""
}
|
q275717
|
stderr
|
test
|
def stderr(a):
"""
Calculate the standard error of a.
"""
|
python
|
{
"resource": ""
}
|
q275718
|
analyse._get_samples
|
test
|
def _get_samples(self, subset=None):
"""
Helper function to get sample names from subset.
Parameters
----------
subset : str
Subset name. If None, returns all samples.
Returns
-------
List of sample names
"""
if subset is None:
samples = self.subsets['All_Samples']
else:
try:
|
python
|
{
"resource": ""
}
|
q275719
|
analyse.despike
|
test
|
def despike(self, expdecay_despiker=False, exponent=None,
noise_despiker=True, win=3, nlim=12., exponentplot=False,
maxiter=4, autorange_kwargs={}, focus_stage='rawdata'):
"""
Despikes data with exponential decay and noise filters.
Parameters
----------
expdecay_despiker : bool
Whether or not to apply the exponential decay filter.
exponent : None or float
The exponent for the exponential decay filter. If None,
it is determined automatically using `find_expocoef`.
tstep : None or float
The timeinterval between measurements. If None, it is
determined automatically from the Time variable.
noise_despiker : bool
Whether or not to apply the standard deviation spike filter.
win : int
The rolling window over which the spike filter calculates
the trace statistics.
nlim : float
The number of standard deviations above the rolling mean
that data are excluded.
exponentplot : bool
Whether or not to show a plot of the automatically
|
python
|
{
"resource": ""
}
|
q275720
|
analyse.bkg_calc_weightedmean
|
test
|
def bkg_calc_weightedmean(self, analytes=None, weight_fwhm=None,
n_min=20, n_max=None, cstep=None,
bkg_filter=False, f_win=7, f_n_lim=3, focus_stage='despiked'):
"""
Background calculation using a gaussian weighted mean.
Parameters
----------
analytes : str or iterable
Which analyte or analytes to calculate.
weight_fwhm : float
The full-width-at-half-maximum of the gaussian used
to calculate the weighted average.
n_min : int
Background regions with fewer than n_min points
will not be included in the fit.
cstep : float or None
The interval between calculated background points.
filter : bool
If true, apply a rolling filter to the isolated background regions
to exclude regions with anomalously high values. If True, two parameters
alter the filter's behaviour:
f_win : int
The size of the rolling window
f_n_lim : float
The number of standard deviations above the rolling mean
to set the threshold.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
"""
if analytes is None:
analytes = self.analytes
self.bkg = Bunch()
elif isinstance(analytes, str):
analytes = [analytes]
if weight_fwhm is None:
weight_fwhm = 600 # 10 minute default window
self.get_background(n_min=n_min, n_max=n_max,
|
python
|
{
"resource": ""
}
|
q275721
|
analyse.bkg_calc_interp1d
|
test
|
def bkg_calc_interp1d(self, analytes=None, kind=1, n_min=10, n_max=None, cstep=None,
bkg_filter=False, f_win=7, f_n_lim=3, focus_stage='despiked'):
"""
Background calculation using a 1D interpolation.
scipy.interpolate.interp1D is used for interpolation.
Parameters
----------
analytes : str or iterable
Which analyte or analytes to calculate.
kind : str or int
Integer specifying the order of the spline interpolation
used, or string specifying a type of interpolation.
Passed to `scipy.interpolate.interp1D`
n_min : int
Background regions with fewer than n_min points
will not be included in the fit.
cstep : float or None
The interval between calculated background points.
filter : bool
If true, apply a rolling filter to the isolated background regions
to exclude regions with anomalously high values. If True, two parameters
alter the filter's behaviour:
f_win : int
The size of the rolling window
f_n_lim : float
The number of standard deviations above the rolling mean
to set the threshold.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
"""
if analytes is None:
analytes = self.analytes
self.bkg = Bunch()
elif isinstance(analytes, str):
analytes = [analytes]
self.get_background(n_min=n_min, n_max=n_max,
|
python
|
{
"resource": ""
}
|
q275722
|
analyse.bkg_subtract
|
test
|
def bkg_subtract(self, analytes=None, errtype='stderr', focus_stage='despiked'):
"""
Subtract calculated background from data.
Must run bkg_calc first!
Parameters
----------
analytes : str or iterable
Which analyte(s) to subtract.
errtype : str
Which type of error to propagate. default is 'stderr'.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
"""
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
if focus_stage == 'despiked':
if 'despiked' not in self.stages_complete:
|
python
|
{
"resource": ""
}
|
q275723
|
analyse.ratio
|
test
|
def ratio(self, internal_standard=None):
"""
Calculates the ratio of all analytes to a single analyte.
Parameters
----------
internal_standard : str
The name of the analyte to divide all other analytes
by.
Returns
-------
None
"""
if 'bkgsub' not in self.stages_complete:
|
python
|
{
"resource": ""
}
|
q275724
|
analyse.make_subset
|
test
|
def make_subset(self, samples=None, name=None):
"""
Creates a subset of samples, which can be treated independently.
Parameters
----------
samples : str or array - like
Name of sample, or list of sample names.
name : (optional) str or number
The name of the sample group. Defaults to n + 1, where n is
the highest existing group number
"""
# Check if a subset containing the same samples already exists.
for k, v in self.subsets.items():
if set(v) == set(samples) and k != 'not_in_set':
return k
if isinstance(samples, str):
|
python
|
{
"resource": ""
}
|
q275725
|
analyse.filter_gradient_threshold_percentile
|
test
|
def filter_gradient_threshold_percentile(self, analyte, percentiles, level='population', win=15, filt=False,
samples=None, subset=None):
"""
Calculate a gradient threshold filter to the data.
Generates two filters above and below the threshold value for a
given analyte.
Parameters
----------
analyte : str
The analyte that the filter applies to.
win : int
The window over which to calculate the moving gradient
percentiles : float or iterable of len=2
The percentile values.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
params = locals()
del(params['self'])
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
self.minimal_analytes.update([analyte])
# Calculate gradients of all samples
self.get_gradients(analytes=[analyte], win=win, filt=filt, subset=subset)
grad = self.gradients[analyte][~np.isnan(self.gradients[analyte])]
if isinstance(percentiles, (int, float)):
percentiles = [percentiles]
if level == 'population':
# calculate filter limits
lims = np.percentile(grad, percentiles)
# Calculate filter for individual samples
with self.pbar.set(total=len(samples), desc='Percentile Threshold Filter') as prog:
for s in samples:
d = self.data[s]
setn = d.filt.maxset + 1
g = calc_grads(d.Time, d.focus, [analyte], win)[analyte]
if level == 'individual':
gt = nominal_values(g)
lims = np.percentile(gt[~np.isnan(gt)], percentiles)
if len(lims) == 1:
above =
|
python
|
{
"resource": ""
}
|
q275726
|
analyse.fit_classifier
|
test
|
def fit_classifier(self, name, analytes, method, samples=None,
subset=None, filt=True, sort_by=0, **kwargs):
"""
Create a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier.
analytes : str or iterable
Which analytes the clustering algorithm should consider.
method : str
Which clustering algorithm to use. Can be:
'meanshift'
The `sklearn.cluster.MeanShift` algorithm.
Automatically determines number of clusters
in data based on the `bandwidth` of expected
variation.
'kmeans'
The `sklearn.cluster.KMeans` algorithm. Determines
the characteristics of a known number of clusters
within the data. Must provide `n_clusters` to specify
the expected number of clusters.
|
python
|
{
"resource": ""
}
|
q275727
|
analyse.apply_classifier
|
test
|
def apply_classifier(self, name, samples=None, subset=None):
"""
Apply a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier to apply.
subset : str
The subset of samples to apply the classifier to.
Returns
-------
name : str
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
c = self.classifiers[name]
labs = c.classifier.ulabels_
with self.pbar.set(total=len(samples), desc='Applying ' + name + ' classifier') as prog:
for s in samples:
d = self.data[s]
try:
f = c.predict(d.focus)
except ValueError:
|
python
|
{
"resource": ""
}
|
q275728
|
analyse.filter_correlation
|
test
|
def filter_correlation(self, x_analyte, y_analyte, window=None,
r_threshold=0.9, p_threshold=0.05, filt=True,
samples=None, subset=None):
"""
Applies a correlation filter to the data.
Calculates a rolling correlation between every `window` points of
two analytes, and excludes data where their Pearson's R value is
above `r_threshold` and statistically significant.
Data will be excluded where their absolute R value is greater than
`r_threshold` AND the p - value associated with the correlation is
less than `p_threshold`. i.e. only correlations that are statistically
significant are considered.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
r_threshold : float
The correlation index above which to exclude data.
Note: the absolute pearson R value is considered, so
negative correlations below -`r_threshold` will also
be excluded.
p_threshold : float
The significant level below which data are
|
python
|
{
"resource": ""
}
|
q275729
|
analyse.filter_on
|
test
|
def filter_on(self, filt=None, analyte=None, samples=None, subset=None, show_status=False):
"""
Turns data filters on for particular analytes and samples.
Parameters
----------
filt : optional, str or array_like
Name, partial name or list of names of filters. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name are activated.
Defaults to all filters.
|
python
|
{
"resource": ""
}
|
q275730
|
analyse.filter_off
|
test
|
def filter_off(self, filt=None, analyte=None, samples=None, subset=None, show_status=False):
"""
Turns data filters off for particular analytes and samples.
Parameters
----------
filt : optional, str or array_like
Name, partial name or list of names of filters. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name are activated.
Defaults to all filters.
|
python
|
{
"resource": ""
}
|
q275731
|
analyse.filter_status
|
test
|
def filter_status(self, sample=None, subset=None, stds=False):
"""
Prints the current status of filters for specified samples.
Parameters
----------
sample : str
Which sample to print.
subset : str
Specify a subset
stds : bool
Whether or not to include standards.
"""
s = ''
if sample is None and subset is None:
if not self._has_subsets:
s += 'Subset: All Samples\n\n'
s += self.data[self.subsets['All_Samples'][0]].filt.__repr__()
else:
for n in sorted(str(sn) for sn in self._subset_names):
if n in self.subsets:
pass
elif int(n) in self.subsets:
n = int(n)
pass
s += 'Subset: ' + str(n) + '\n'
s += 'Samples: ' + ', '.join(self.subsets[n]) + '\n\n'
s += self.data[self.subsets[n][0]].filt.__repr__()
|
python
|
{
"resource": ""
}
|
q275732
|
analyse.filter_defragment
|
test
|
def filter_defragment(self, threshold, mode='include', filt=True, samples=None, subset=None):
"""
Remove 'fragments' from the calculated filter
Parameters
----------
threshold : int
Contiguous data regions that contain this number
or fewer points are considered 'fragments'
mode : str
Specifies wither to 'include' or 'exclude' the identified
fragments.
filt : bool or filt string
Which filter to apply the defragmenter to. Defaults to True
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
if samples is
|
python
|
{
"resource": ""
}
|
q275733
|
analyse.filter_nremoved
|
test
|
def filter_nremoved(self, filt=True, quiet=False):
"""
Report how many data are removed by the active filters.
"""
rminfo = {}
for n in self.subsets['All_Samples']:
s = self.data[n]
rminfo[n] = s.filt_nremoved(filt)
if not quiet:
maxL = max([len(s) for s in rminfo.keys()])
print('{string:{number}s}'.format(string='Sample ', number=maxL + 3) +
'{total:4s}'.format(total='tot') +
'{removed:4s}'.format(removed='flt') +
'{percent:4s}'.format(percent='%rm'))
|
python
|
{
"resource": ""
}
|
q275734
|
analyse.gradient_histogram
|
test
|
def gradient_histogram(self, analytes=None, win=15, filt=False, bins=None, samples=None, subset=None, recalc=True, ncol=4):
"""
Plot a histogram of the gradients in all samples.
Parameters
----------
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
bins : None or array-like
The bins to use in the histogram
samples : str or list
which samples to get
subset : str or int
which subset to get
recalc : bool
Whether to re-calculate the gradients, or use existing gradients.
Returns
-------
fig, ax
"""
if analytes is None:
analytes = [a for a in self.analytes if self.internal_standard not in a]
if not hasattr(self, 'gradients'):
self.gradients = Bunch()
ncol = int(ncol)
n = len(analytes)
nrow = plot.calc_nrow(n, ncol)
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
self.get_gradients(analytes=analytes, win=win, filt=filt, subset=subset, recalc=recalc)
fig, axs = plt.subplots(nrow, ncol, figsize=[3. * ncol, 2.5 * nrow])
|
python
|
{
"resource": ""
}
|
q275735
|
analyse.gradient_crossplot
|
test
|
def gradient_crossplot(self, analytes=None, win=15, lognorm=True,
bins=25, filt=False, samples=None,
subset=None, figsize=(12, 12), save=False,
colourful=True, mode='hist2d', recalc=True, **kwargs):
"""
Plot analyte gradients against each other.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
lognorm : bool
Whether or not to log normalise the colour scale
of the 2D histogram.
bins : int
The number of bins in the 2D histogram.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
figsize : tuple
Figure size (width, height) in inches.
save : bool or str
If True, plot is saves as 'crossplot.png', if str plot is
|
python
|
{
"resource": ""
}
|
q275736
|
analyse.histograms
|
test
|
def histograms(self, analytes=None, bins=25, logy=False,
filt=False, colourful=True):
"""
Plot histograms of analytes.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
bins : int
The number of bins in each histogram (default = 25)
logy : bool
If true, y axis is a log scale.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
colourful : bool
If True, histograms are colourful :)
Returns
-------
(fig, axes)
"""
if analytes is None:
analytes = self.analytes
|
python
|
{
"resource": ""
}
|
q275737
|
analyse.trace_plots
|
test
|
def trace_plots(self, analytes=None, samples=None, ranges=False,
focus=None, outdir=None, filt=None, scale='log',
figsize=[10, 4], stats=False, stat='nanmean',
err='nanstd', subset='All_Analyses'):
"""
Plot analytes as a function of time.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
samples: optional, array_like or str
The sample(s) to plot. Defaults to all samples.
ranges : bool
Whether or not to show the signal/backgroudn regions
identified by 'autorange'.
focus : str
The focus 'stage' of the analysis to plot. Can be
'rawdata', 'despiked':, 'signal', 'background',
'bkgsub', 'ratios' or 'calibrated'.
outdir : str
Path to a directory where you'd like the plots to be
saved. Defaults to 'reports/[focus]' in your data directory.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
scale : str
If 'log', plots the data on a log scale.
figsize : array_like
Array of length 2 specifying figure [width, height] in
inches.
stats : bool
Whether or not to overlay the mean and standard deviations
for each trace.
stat, err: str
The names of the statistic and error components to plot.
Deafaults to 'nanmean' and 'nanstd'.
Returns
-------
None
"""
if focus is None:
focus = self.focus_stage
if outdir is None:
outdir = self.report_dir + '/' + focus
if not os.path.isdir(outdir):
os.mkdir(outdir)
|
python
|
{
"resource": ""
}
|
q275738
|
analyse.gradient_plots
|
test
|
def gradient_plots(self, analytes=None, win=15, samples=None, ranges=False,
focus=None, outdir=None,
figsize=[10, 4], subset='All_Analyses'):
"""
Plot analyte gradients as a function of time.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
samples: optional, array_like or str
The sample(s) to plot. Defaults to all samples.
ranges : bool
Whether or not to show the signal/backgroudn regions
identified by 'autorange'.
focus : str
The focus 'stage' of the analysis to plot. Can be
'rawdata', 'despiked':, 'signal', 'background',
'bkgsub', 'ratios' or 'calibrated'.
outdir : str
Path to a directory where you'd like the plots to be
saved. Defaults to 'reports/[focus]' in your data directory.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
scale : str
If 'log', plots the data on a log scale.
figsize : array_like
Array of length 2 specifying figure [width, height] in
inches.
stats : bool
Whether or not to overlay the mean and standard deviations
for each trace.
stat, err: str
The names of the statistic and error components to plot.
Deafaults to 'nanmean' and 'nanstd'.
Returns
-------
None
"""
if focus is None:
focus = self.focus_stage
if outdir is None:
outdir = self.report_dir + '/' + focus + '_gradient'
if not os.path.isdir(outdir):
|
python
|
{
"resource": ""
}
|
q275739
|
analyse.filter_reports
|
test
|
def filter_reports(self, analytes, filt_str='all', nbin=5, samples=None,
outdir=None, subset='All_Samples'):
"""
Plot filter reports for all filters that contain ``filt_str``
in the name.
"""
if outdir is None:
outdir = self.report_dir + '/filters/' + filt_str
if not os.path.isdir(self.report_dir + '/filters'):
os.mkdir(self.report_dir + '/filters')
if not os.path.isdir(outdir):
os.mkdir(outdir)
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
|
python
|
{
"resource": ""
}
|
q275740
|
analyse.sample_stats
|
test
|
def sample_stats(self, analytes=None, filt=True,
stats=['mean', 'std'],
eachtrace=True, csf_dict={}):
"""
Calculate sample statistics.
Returns samples, analytes, and arrays of statistics
of shape (samples, analytes). Statistics are calculated
from the 'focus' data variable, so output depends on how
the data have been processed.
Included stat functions:
* :func:`~latools.stat_fns.mean`: arithmetic mean
* :func:`~latools.stat_fns.std`: arithmetic standard deviation
* :func:`~latools.stat_fns.se`: arithmetic standard error
* :func:`~latools.stat_fns.H15_mean`: Huber mean (outlier removal)
* :func:`~latools.stat_fns.H15_std`: Huber standard deviation (outlier removal)
* :func:`~latools.stat_fns.H15_se`: Huber standard error (outlier removal)
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to calculate statistics for. Defaults to
all analytes.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
stats : array_like
take a single array_like input, and return a single statistic.
list of functions or names (see above) or functions that
Function should be able to cope with NaN values.
eachtrace : bool
Whether to calculate the statistics for each analysis
spot individually, or to produce per - sample means.
Default is True.
Returns
-------
None
Adds dict to analyse object containing samples, analytes and
functions and data.
"""
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
self.stats = Bunch()
self.stats_calced = []
stat_fns = Bunch()
stat_dict = {'mean': np.nanmean,
'std': np.nanstd,
|
python
|
{
"resource": ""
}
|
q275741
|
analyse.getstats
|
test
|
def getstats(self, save=True, filename=None, samples=None, subset=None, ablation_time=False):
"""
Return pandas dataframe of all sample statistics.
"""
slst = []
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
for s in self.stats_calced:
for nm in [n for n in samples if self.srm_identifier
not in n]:
if self.stats[nm][s].ndim == 2:
# make multi - index
reps = np.arange(self.stats[nm][s].shape[-1])
ss = np.array([s] * reps.size)
nms = np.array([nm] * reps.size)
# make sub - dataframe
stdf = pd.DataFrame(self.stats[nm][s].T,
columns=self.stats[nm]['analytes'],
index=[ss, nms, reps])
stdf.index.set_names(['statistic', 'sample', 'rep'],
inplace=True)
else:
stdf = pd.DataFrame(self.stats[nm][s],
index=self.stats[nm]['analytes'],
columns=[[s], [nm]]).T
stdf.index.set_names(['statistic', 'sample'],
|
python
|
{
"resource": ""
}
|
q275742
|
analyse._minimal_export_traces
|
test
|
def _minimal_export_traces(self, outdir=None, analytes=None,
samples=None, subset='All_Analyses'):
"""
Used for exporting minimal dataset. DON'T USE.
"""
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
focus_stage = 'rawdata'
# ud = 'counts'
if not os.path.isdir(outdir):
os.mkdir(outdir)
for s in samples:
d = self.data[s].data[focus_stage]
out = Bunch()
for a in analytes:
out[a] = d[a]
out = pd.DataFrame(out, index=self.data[s].Time)
out.index.name = 'Time'
d = dateutil.parser.parse(self.data[s].meta['date'])
header = ['# Minimal Reproduction Dataset Exported from LATOOLS on %s' %
|
python
|
{
"resource": ""
}
|
q275743
|
analyse.export_traces
|
test
|
def export_traces(self, outdir=None, focus_stage=None, analytes=None,
samples=None, subset='All_Analyses', filt=False, zip_archive=False):
"""
Function to export raw data.
Parameters
----------
outdir : str
directory to save toe traces. Defaults to 'main-dir-name_export'.
focus_stage : str
The name of the analysis stage to export.
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Defaults to the most recent stage of analysis.
analytes : str or array - like
Either a single analyte, or list of analytes to export.
Defaults to all analytes.
samples : str or array - like
Either a single sample name, or list of samples to export.
Defaults to all samples.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
"""
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
if focus_stage is None:
focus_stage = self.focus_stage
if focus_stage in ['ratios', 'calibrated']:
analytes = [a for a in analytes if a != self.internal_standard]
if outdir is None:
outdir = os.path.join(self.export_dir, 'trace_export')
ud = {'rawdata': 'counts',
'despiked': 'counts',
'bkgsub': 'background corrected counts',
'ratios': 'counts/count {:s}',
'calibrated': 'mol/mol
|
python
|
{
"resource": ""
}
|
q275744
|
analyse.save_log
|
test
|
def save_log(self, directory=None, logname=None, header=None):
"""
Save analysis.lalog in specified location
"""
if directory is None:
directory = self.export_dir
if not os.path.isdir(directory):
|
python
|
{
"resource": ""
}
|
q275745
|
analyse.minimal_export
|
test
|
def minimal_export(self, target_analytes=None, path=None):
"""
Exports a analysis parameters, standard info and a minimal dataset,
which can be imported by another user.
Parameters
----------
target_analytes : str or iterable
Which analytes to include in the export. If specified, the export
will contain these analytes, and all other analytes used during
data processing (e.g. during filtering). If not specified,
all analytes are exported.
path : str
Where to save the minimal export.
If it ends with .zip, a zip file is created.
If it's a folder, all data are exported to a folder.
"""
if target_analytes is None:
target_analytes = self.analytes
if isinstance(target_analytes, str):
target_analytes = [target_analytes]
self.minimal_analytes.update(target_analytes)
zip_archive = False
# set up data path
if path is None:
path = self.export_dir + '/minimal_export.zip'
if path.endswith('.zip'):
path = path.replace('.zip', '')
zip_archive = True
if not os.path.isdir(path):
os.mkdir(path)
# export data
self._minimal_export_traces(path + '/data', analytes=self.minimal_analytes)
# define analysis_log header
log_header = ['# Minimal Reproduction Dataset Exported from LATOOLS on %s' %
(time.strftime('%Y:%m:%d %H:%M:%S')),
'data_folder :: ./data/']
if hasattr(self, 'srmdat'):
log_header.append('srm_table :: ./srm.table')
# export srm table
els = np.unique([re.sub('[0-9]', '', a) for a in self.minimal_analytes])
srmdat = []
for e in els:
srmdat.append(self.srmdat.loc[self.srmdat.element == e, :])
|
python
|
{
"resource": ""
}
|
q275746
|
by_regex
|
test
|
def by_regex(file, outdir=None, split_pattern=None, global_header_rows=0, fname_pattern=None, trim_tail_lines=0, trim_head_lines=0):
"""
Split one long analysis file into multiple smaller ones.
Parameters
----------
file : str
The path to the file you want to split.
outdir : str
The directory to save the split files to.
If None, files are saved to a new directory
called 'split', which is created inside the
data directory.
split_pattern : regex string
A regular expression that will match lines in the
file that mark the start of a new section. Does
not have to match the whole line, but must provide
a positive match to the lines containing the pattern.
global_header_rows : int
How many rows at the start of the file to include
in each new sub-file.
fname_pattern : regex string
A regular expression that identifies a new file name
in the lines identified by split_pattern. If none,
files will be called 'noname_N'. The extension of the
main file will be used for all sub-files.
trim_head_lines : int
If greater than zero, this many lines are removed from the start of each segment
trim_tail_lines : int
If greater than zero, this many lines are removed from the end of each segment
Returns
-------
Path to new directory : str
"""
# create output sirectory
if outdir is None:
outdir = os.path.join(os.path.dirname(file), 'split')
if not os.path.exists(outdir):
os.mkdir(outdir)
# read input file
|
python
|
{
"resource": ""
}
|
q275747
|
Foldable.fold_map
|
test
|
def fold_map(self, fa: F[A], z: B, f: Callable[[A], B], g: Callable[[Z, B], Z]=operator.add) -> Z:
''' map `f` over the traversable, then fold over the result
|
python
|
{
"resource": ""
}
|
q275748
|
pca_plot
|
test
|
def pca_plot(pca, dt, xlabs=None, mode='scatter', lognorm=True):
"""
Plot a fitted PCA, and all components.
"""
nc = pca.n_components
f = np.arange(pca.n_features_)
cs = list(itertools.combinations(range(nc), 2))
ind = ~np.apply_along_axis(any, 1, np.isnan(dt))
cylim = (pca.components_.min(), pca.components_.max())
yd = cylim[1] - cylim[0]
# Make figure
fig, axs = plt.subplots(nc, nc, figsize=[3 * nc, nc * 3], tight_layout=True)
for x, y in zip(*np.triu_indices(nc)):
if x == y:
tax = axs[x, y]
tax.bar(f, pca.components_[x], 0.8)
tax.set_xticks([])
tax.axhline(0, zorder=-1, c=(0,0,0,0.6))
# labels
tax.set_ylim(cylim[0] - 0.2 * yd,
cylim[1] + 0.2 * yd)
for xi, yi, lab in zip(f, pca.components_[x], xlabs):
if yi > 0:
yo = yd * 0.03
va = 'bottom'
else:
yo = yd * -0.02
va = 'top'
tax.text(xi, yi + yo, lab, ha='center', va=va, rotation=90, fontsize=8)
else:
xv = dt[ind, x]
yv = dt[ind, y]
if mode == 'scatter':
|
python
|
{
"resource": ""
}
|
q275749
|
bayes_scale
|
test
|
def bayes_scale(s):
"""
Remove mean and divide by standard deviation, using bayes_kvm statistics.
"""
if sum(~np.isnan(s)) > 1:
bm, bv, bs = bayes_mvs(s[~np.isnan(s)])
|
python
|
{
"resource": ""
}
|
q275750
|
median_scaler
|
test
|
def median_scaler(s):
"""
Remove median, divide by IQR.
"""
if sum(~np.isnan(s)) > 2:
ss = s[~np.isnan(s)]
median = np.median(ss)
|
python
|
{
"resource": ""
}
|
q275751
|
noise_despike
|
test
|
def noise_despike(sig, win=3, nlim=24., maxiter=4):
"""
Apply standard deviation filter to remove anomalous values.
Parameters
----------
win : int
The window used to calculate rolling statistics.
nlim : float
The number of standard deviations above the rolling
mean above which data are considered outliers.
Returns
-------
None
"""
if win % 2 != 1:
win += 1 # win must be odd
kernel = np.ones(win) / win # make convolution kernel
over = np.ones(len(sig), dtype=bool) # initialize bool array
# pad edges to avoid edge-effects
npad = int((win - 1) / 2)
over[:npad] = False
over[-npad:] = False
# set up monitoring
nloops = 0
# do the despiking
while any(over) and (nloops < maxiter):
rmean = np.convolve(sig, kernel, 'valid') # mean by convolution
rstd = rmean**0.5 # std = sqrt(signal), because count statistics
# identify where signal > mean + std * nlim (OR signa < mean - std *
# nlim)
# | (sig[npad:-npad] < rmean - nlim * rstd)
|
python
|
{
"resource": ""
}
|
q275752
|
expdecay_despike
|
test
|
def expdecay_despike(sig, expdecay_coef, tstep, maxiter=3):
"""
Apply exponential decay filter to remove physically impossible data based on instrumental washout.
The filter is re-applied until no more points are removed, or maxiter is reached.
Parameters
----------
exponent : float
Exponent used in filter
tstep : float
The time increment between data points.
maxiter : int
The maximum number of times the filter should be applied.
Returns
-------
None
"""
# determine rms noise of data
noise = np.std(sig[:5]) # initially, calculated based on first 5 points
# expand the selection up to 50 points, unless it dramatically increases
# the std (i.e. catches the 'laser on' region)
for i in [10, 20, 30, 50]:
inoise = np.std(sig[:i])
if inoise < 1.5 * noise:
noise = inoise
rms_noise3 = 3 * noise
i = 0
f = True
while (i < maxiter) and f:
# calculate low and high possibles values based on exponential decay
siglo = np.roll(sig * np.exp(tstep * expdecay_coef), 1)
|
python
|
{
"resource": ""
}
|
q275753
|
filt.add
|
test
|
def add(self, name, filt, info='', params=(), setn=None):
"""
Add filter.
Parameters
----------
name : str
filter name
filt : array_like
boolean filter array
info : str
informative description of the filter
params : tuple
parameters used to make the filter
Returns
-------
None
"""
iname = '{:.0f}_'.format(self.n) + name
self.index[self.n] = iname
if setn is None:
setn = self.maxset + 1
|
python
|
{
"resource": ""
}
|
q275754
|
filt.remove
|
test
|
def remove(self, name=None, setn=None):
"""
Remove filter.
Parameters
----------
name : str
name of the filter to remove
setn : int or True
int: number of set to remove
True: remove all filters in set that 'name' belongs to
Returns
-------
None
"""
if isinstance(name, int):
name = self.index[name]
if setn is not None:
name = self.sets[setn]
del self.sets[setn]
elif isinstance(name, (int, str)):
name = [name]
if setn is True:
for n in name:
for k, v in self.sets.items():
if n in v:
|
python
|
{
"resource": ""
}
|
q275755
|
filt.clear
|
test
|
def clear(self):
"""
Clear all filters.
"""
self.components = {}
self.info = {}
self.params = {}
self.switches = {}
self.keys = {}
self.index = {}
|
python
|
{
"resource": ""
}
|
q275756
|
filt.clean
|
test
|
def clean(self):
"""
Remove unused filters.
"""
for f in sorted(self.components.keys()):
|
python
|
{
"resource": ""
}
|
q275757
|
filt.fuzzmatch
|
test
|
def fuzzmatch(self, fuzzkey, multi=False):
"""
Identify a filter by fuzzy string matching.
Partial ('fuzzy') matching performed by `fuzzywuzzy.fuzzy.ratio`
Parameters
----------
fuzzkey : str
A string that partially matches one filter name more than the others.
Returns
-------
The name of the most closely matched filter. : str
"""
keys, ratios = np.array([(f, seqm(None, fuzzkey, f).ratio()) for f in self.components.keys()]).T
mratio = max(ratios)
if multi:
return keys[ratios == mratio]
|
python
|
{
"resource": ""
}
|
q275758
|
filt.make_fromkey
|
test
|
def make_fromkey(self, key):
"""
Make filter from logical expression.
Takes a logical expression as an input, and returns a filter. Used for advanced
filtering, where combinations of nested and/or filters are desired. Filter names must
exactly match the names listed by print(filt).
Example: ``key = '(Filter_1 | Filter_2) & Filter_3'``
is equivalent to:
``(Filter_1 OR Filter_2) AND Filter_3``
statements in parentheses are evaluated first.
Parameters
----------
key : str
logical expression describing filter
|
python
|
{
"resource": ""
}
|
q275759
|
filt.grab_filt
|
test
|
def grab_filt(self, filt, analyte=None):
"""
Flexible access to specific filter using any key format.
Parameters
----------
f : str, dict or bool
either logical filter expression, dict of expressions,
or a boolean
analyte : str
name of analyte the filter is for.
Returns
-------
array_like
boolean filter
"""
if isinstance(filt, str):
if filt in self.components:
if analyte is None:
return self.components[filt]
else:
if self.switches[analyte][filt]:
return self.components[filt]
else:
try:
ind = self.make_fromkey(filt)
except KeyError:
|
python
|
{
"resource": ""
}
|
q275760
|
filt.get_info
|
test
|
def get_info(self):
"""
Get info for all filters.
"""
out = ''
for k in sorted(self.components.keys()):
|
python
|
{
"resource": ""
}
|
q275761
|
_log
|
test
|
def _log(func):
"""
Function for logging method calls and parameters
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
a = func(self, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q275762
|
write_logfile
|
test
|
def write_logfile(log, header, file_name):
"""
Write and analysis log to a file.
Parameters
----------
log : list
latools.analyse analysis log
header : list
File header lines.
file_name : str
Destination file. If no file extension
specified, uses '.lalog'
Returns
|
python
|
{
"resource": ""
}
|
q275763
|
read_logfile
|
test
|
def read_logfile(log_file):
"""
Reads an latools analysis.log file, and returns dicts of arguments.
Parameters
----------
log_file : str
Path to an analysis.log file produced by latools.
Returns
-------
runargs, paths : tuple
Two dictionaries. runargs contains all the arguments required to run each step
of analysis in the form (function_name, {'args': (), 'kwargs': {}}). paths contains
the locations of the data directory and the SRM database used for analysis.
"""
dirname = os.path.dirname(log_file) + '/'
with open(log_file, 'r') as f:
rlog = f.readlines()
hashind = [i for i, n in enumerate(rlog) if '#' in n]
pathread = re.compile('(.*) :: (.*)\n')
paths = (pathread.match(l).groups() for l in rlog[hashind[0] + 1:hashind[-1]] if pathread.match(l))
paths = {k: os.path.join(dirname, v) for k, v in paths}
# paths = {k: os.path.abspath(v) for k, v in paths}
|
python
|
{
"resource": ""
}
|
q275764
|
autologin
|
test
|
def autologin(function, timeout=TIMEOUT):
"""Decorator that will try to login and redo an action before failing."""
@wraps(function)
async def wrapper(self, *args, **kwargs):
"""Wrap a function with timeout."""
try:
async with async_timeout.timeout(timeout):
return await function(self, *args, **kwargs)
except (asyncio.TimeoutError, ClientError, Error):
pass
_LOGGER.debug("autologin")
try:
|
python
|
{
"resource": ""
}
|
q275765
|
get_information
|
test
|
async def get_information():
"""Example of printing the inbox."""
jar = aiohttp.CookieJar(unsafe=True)
websession = aiohttp.ClientSession(cookie_jar=jar)
modem =
|
python
|
{
"resource": ""
}
|
q275766
|
send_message
|
test
|
async def send_message():
"""Example of sending a message."""
jar = aiohttp.CookieJar(unsafe=True)
websession = aiohttp.ClientSession(cookie_jar=jar)
modem = eternalegypt.Modem(hostname=sys.argv[1], websession=websession)
|
python
|
{
"resource": ""
}
|
q275767
|
parse
|
test
|
def parse(file_or_string):
"""Parse a file-like object or string.
Args:
file_or_string (file, str): File-like object or string.
Returns:
ParseResults: instance of pyparsing parse results.
"""
from mysqlparse.grammar.sql_file import sql_file_syntax
if hasattr(file_or_string,
|
python
|
{
"resource": ""
}
|
q275768
|
nbviewer_link
|
test
|
def nbviewer_link(url):
"""Return the link to the Jupyter nbviewer for the given notebook url"""
if six.PY2:
from urlparse import urlparse as urlsplit
|
python
|
{
"resource": ""
}
|
q275769
|
NotebookProcessor.thumbnail_div
|
test
|
def thumbnail_div(self):
"""The string for creating the thumbnail of this example"""
return self.THUMBNAIL_TEMPLATE.format(
|
python
|
{
"resource": ""
}
|
q275770
|
NotebookProcessor.code_div
|
test
|
def code_div(self):
"""The string for creating a code example for the gallery"""
code_example = self.code_example
if code_example is None:
return None
return self.CODE_TEMPLATE.format(
|
python
|
{
"resource": ""
}
|
q275771
|
NotebookProcessor.code_example
|
test
|
def code_example(self):
"""The code example out of the notebook metadata"""
if self._code_example is not None:
|
python
|
{
"resource": ""
}
|
q275772
|
NotebookProcessor.url
|
test
|
def url(self):
"""The url on jupyter nbviewer for this notebook or None if unknown"""
if self._url is not None:
url = self._url
else:
|
python
|
{
"resource": ""
}
|
q275773
|
NotebookProcessor.get_out_file
|
test
|
def get_out_file(self, ending='rst'):
"""get the output file with the specified `ending`"""
|
python
|
{
"resource": ""
}
|
q275774
|
NotebookProcessor.process_notebook
|
test
|
def process_notebook(self, disable_warnings=True):
"""Process the notebook and create all the pictures and files
This method runs the notebook using the :mod:`nbconvert` and
:mod:`nbformat` modules. It creates the :attr:`outfile` notebook,
a python and a rst file"""
infile = self.infile
outfile = self.outfile
in_dir = os.path.dirname(infile) + os.path.sep
odir = os.path.dirname(outfile) + os.path.sep
create_dirs(os.path.join(odir, 'images'))
ep = nbconvert.preprocessors.ExecutePreprocessor(
timeout=300)
cp = nbconvert.preprocessors.ClearOutputPreprocessor(
timeout=300)
self.nb = nb = nbformat.read(infile, nbformat.current_nbformat)
# disable warnings in the rst file
if disable_warnings:
for i, cell in enumerate(nb.cells):
if cell['cell_type'] == 'code':
cell = cell.copy()
break
cell = cell.copy()
cell.source = """
import logging
logging.captureWarnings(True)
logging.getLogger('py.warnings').setLevel(logging.ERROR)
"""
nb.cells.insert(i, cell)
# write and process rst_file
if self.preprocess:
t = dt.datetime.now()
logger.info('Processing %s', self.infile)
try:
|
python
|
{
"resource": ""
}
|
q275775
|
NotebookProcessor.create_py
|
test
|
def create_py(self, nb, force=False):
"""Create the python script from the notebook node"""
# Although we would love to simply use ``nbconvert.export_python(nb)``
# this causes troubles in other cells processed by the ipython
# directive. Instead of getting something like ``Out [5]:``, we get
# some weird like '[0;31mOut[[1;31m5[0;31m]: [0m' which look like
# color information if we allow the call of nbconvert.export_python
if list(map(int, re.findall('\d+', nbconvert.__version__))) >= [4, 2]:
py_file = os.path.basename(self.py_file)
else:
py_file = self.py_file
try:
level = logger.logger.level
|
python
|
{
"resource": ""
}
|
q275776
|
NotebookProcessor.data_download
|
test
|
def data_download(self, files):
"""Create the rst string to download supplementary data"""
if len(files) > 1:
return self.DATA_DOWNLOAD % (
('\n\n' + ' '*8) + ('\n' + ' '*8).join(
|
python
|
{
"resource": ""
}
|
q275777
|
NotebookProcessor.create_thumb
|
test
|
def create_thumb(self):
"""Create the thumbnail for html output"""
thumbnail_figure = self.copy_thumbnail_figure()
if thumbnail_figure is not None:
if isinstance(thumbnail_figure, six.string_types):
|
python
|
{
"resource": ""
}
|
q275778
|
NotebookProcessor.get_description
|
test
|
def get_description(self):
"""Get summary and description of this notebook"""
def split_header(s, get_header=True):
s = s.lstrip().rstrip()
parts = s.splitlines()
if parts[0].startswith('#'):
if get_header:
header = re.sub('#+\s*', '', parts.pop(0))
if not parts:
return header, ''
else:
header = ''
rest = '\n'.join(parts).lstrip().split('\n\n')
desc = rest[0].replace('\n', ' ')
return header, desc
else:
if get_header:
|
python
|
{
"resource": ""
}
|
q275779
|
NotebookProcessor.scale_image
|
test
|
def scale_image(self, in_fname, out_fname, max_width, max_height):
"""Scales an image with the same aspect ratio centered in an
image with a given max_width and max_height
if in_fname == out_fname the image can only be scaled down
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
|
python
|
{
"resource": ""
}
|
q275780
|
NotebookProcessor.save_thumbnail
|
test
|
def save_thumbnail(self, image_path):
"""Save the thumbnail image"""
thumb_dir = os.path.join(os.path.dirname(image_path), 'thumb')
create_dirs(thumb_dir)
thumb_file = os.path.join(thumb_dir,
'%s_thumb.png' %
|
python
|
{
"resource": ""
}
|
q275781
|
NotebookProcessor.copy_thumbnail_figure
|
test
|
def copy_thumbnail_figure(self):
"""The integer of the thumbnail figure"""
ret = None
if self._thumbnail_figure is not None:
if not isstring(self._thumbnail_figure):
ret = self._thumbnail_figure
else:
ret = osp.join(osp.dirname(self.outfile),
osp.basename(self._thumbnail_figure))
copyfile(self._thumbnail_figure, ret)
return ret
elif hasattr(self.nb.metadata, 'thumbnail_figure'):
if not isstring(self.nb.metadata.thumbnail_figure):
ret =
|
python
|
{
"resource": ""
}
|
q275782
|
Gallery.get_url
|
test
|
def get_url(self, nbfile):
"""Return the url corresponding to the given notebook file
Parameters
----------
nbfile: str
The path of the notebook relative to the corresponding
:attr:``in_dir``
Returns
-------
str or None
The url or None if no url has been specified
"""
urls = self.urls
|
python
|
{
"resource": ""
}
|
q275783
|
Command.get_db_change_languages
|
test
|
def get_db_change_languages(self, field_name, db_table_fields):
""" get only db changes fields """
for lang_code, lang_name in get_languages():
if get_real_fieldname(field_name, lang_code) not in db_table_fields:
yield lang_code
for db_table_field in db_table_fields:
|
python
|
{
"resource": ""
}
|
q275784
|
default_value
|
test
|
def default_value(field):
'''
When accessing to the name of the field itself, the value
in the current language will be returned. Unless it's set,
the value in the default language will be returned.
'''
def default_value_func(self):
attname = lambda x: get_real_fieldname(field, x)
if getattr(self, attname(get_language()), None):
result = getattr(self, attname(get_language()))
elif getattr(self, attname(get_language()[:2]), None):
result = getattr(self, attname(get_language()[:2]))
else:
default_language = fallback_language()
|
python
|
{
"resource": ""
}
|
q275785
|
process
|
test
|
def process(thumbnail_file, size, **kwargs):
"""
Post processors are functions that receive file objects,
performs necessary operations and return the results as
|
python
|
{
"resource": ""
}
|
q275786
|
ImageField.pre_save
|
test
|
def pre_save(self, model_instance, add):
"""
Process the source image through the defined processors.
"""
file = getattr(model_instance, self.attname)
if file and not file._committed:
image_file = file
|
python
|
{
"resource": ""
}
|
q275787
|
ThumbnailManager._refresh_cache
|
test
|
def _refresh_cache(self):
"""Populate self._thumbnails."""
self._thumbnails = {}
metadatas = self.metadata_backend.get_thumbnails(self.source_image.name)
|
python
|
{
"resource": ""
}
|
q275788
|
ThumbnailManager.all
|
test
|
def all(self):
"""
Return all thumbnails in a dict format.
"""
if self._thumbnails is not None:
|
python
|
{
"resource": ""
}
|
q275789
|
ThumbnailManager.create
|
test
|
def create(self, size):
"""
Creates and return a thumbnail of a given size.
"""
thumbnail = images.create(self.source_image.name, size,
|
python
|
{
"resource": ""
}
|
q275790
|
ThumbnailManager.delete
|
test
|
def delete(self, size):
"""
Deletes a thumbnail of a given size
"""
images.delete(self.source_image.name, size,
|
python
|
{
"resource": ""
}
|
q275791
|
create
|
test
|
def create(source_name, size, metadata_backend=None, storage_backend=None):
"""
Creates a thumbnail file and its relevant metadata. Returns a
Thumbnail instance.
"""
if storage_backend is None:
storage_backend = backends.storage.get_backend()
if metadata_backend is None:
|
python
|
{
"resource": ""
}
|
q275792
|
get
|
test
|
def get(source_name, size, metadata_backend=None, storage_backend=None):
"""
Returns a Thumbnail instance, or None if thumbnail does not yet exist.
"""
if storage_backend is None:
storage_backend = backends.storage.get_backend()
if metadata_backend is None:
|
python
|
{
"resource": ""
}
|
q275793
|
delete
|
test
|
def delete(source_name, size, metadata_backend=None, storage_backend=None):
"""
Deletes a thumbnail file and its relevant metadata.
"""
if storage_backend is None:
storage_backend = backends.storage.get_backend()
if metadata_backend is None:
|
python
|
{
"resource": ""
}
|
q275794
|
LoopbackProvider.received
|
test
|
def received(self, src, body):
""" Simulate an incoming message
:type src: str
:param src: Message source
:type boby: str | unicode
:param body: Message body
:rtype: IncomingMessage
"""
# Create the message
|
python
|
{
"resource": ""
}
|
q275795
|
LoopbackProvider.subscribe
|
test
|
def subscribe(self, number, callback):
""" Register a virtual subscriber which receives messages to the matching number.
:type number: str
:param number: Subscriber phone number
:type callback: callable
:param callback: A callback(OutgoingMessage) which handles the messages directed to the subscriber.
|
python
|
{
"resource": ""
}
|
q275796
|
MessageStatus.states
|
test
|
def states(self):
""" Get the set of states. Mostly used for pretty printing
:rtype: set
:returns: Set of 'accepted', 'delivered', 'expired', 'error'
|
python
|
{
"resource": ""
}
|
q275797
|
Gateway.add_provider
|
test
|
def add_provider(self, name, Provider, **config):
""" Register a provider on the gateway
The first provider defined becomes the default one: used in case the routing function has no better idea.
:type name: str
:param name: Provider name that will be used to uniquely identify it
:type Provider: type
:param Provider: Provider class that inherits from `smsframework.IProvider`
:param config: Provider configuration. Please refer to the Provider documentation.
:rtype: IProvider
:returns: The created provider
"""
assert issubclass(Provider, IProvider), 'Provider does not implement IProvider'
assert
|
python
|
{
"resource": ""
}
|
q275798
|
Gateway.send
|
test
|
def send(self, message):
""" Send a message object
:type message: data.OutgoingMessage
:param message: The message to send
:rtype: data.OutgoingMessage
:returns: The sent message with populated fields
:raises AssertionError: wrong provider name encountered (returned by the router, or provided to OutgoingMessage)
:raises MessageSendError: generic errors
:raises AuthError: provider authentication failed
:raises LimitsError: sending limits exceeded
:raises CreditError: not enough money on the account
"""
# Which provider to use?
provider_name = self._default_provider # default
if message.provider is not None:
assert message.provider in self._providers, \
'Unknown provider specified in OutgoingMessage.provideer: {}'.format(provider_name)
provider = self.get_provider(message.provider)
else:
# Apply routing
if message.routing_values is not None: # Use the default provider when no routing values are given
# Routing values are present
|
python
|
{
"resource": ""
}
|
q275799
|
Gateway.receiver_blueprint_for
|
test
|
def receiver_blueprint_for(self, name):
""" Get a Flask blueprint for the named provider that handles incoming messages & status reports
Note: this requires Flask microframework.
:rtype: flask.blueprints.Blueprint
:returns: Flask Blueprint, fully functional
:raises KeyError: provider not found
:raises NotImplementedError: Provider does not implement a receiver
"""
# Get the provider & blueprint
provider = self.get_provider(name)
bp = provider.make_receiver_blueprint()
# Register a Flask handler that initializes `g.provider`
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.