text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def optimize_order(self, data, min_p=1, max_p=None):
"""Determine optimal model order by minimizing the mean squared generalization error. Parameters data : array, shape (n_trials, n_channels, n_samples) Epoched data set on which to optimize the model order. At least two trials are required. min_p : int Minimal model order to check. max_p : int Maximum model order to check """
|
data = np.asarray(data)
if data.shape[0] < 2:
raise ValueError("At least two trials are required.")
msge, prange = [], []
par, func = parallel_loop(_get_msge_with_gradient, n_jobs=self.n_jobs,
verbose=self.verbose)
if self.n_jobs is None:
npar = 1
elif self.n_jobs < 0:
npar = 4 # is this a sane default?
else:
npar = self.n_jobs
p = min_p
while True:
result = par(func(data, self.delta, self.xvschema, 1, p_)
for p_ in range(p, p + npar))
j, k = zip(*result)
prange.extend(range(p, p + npar))
msge.extend(j)
p += npar
if max_p is None:
if len(msge) >= 2 and msge[-1] > msge[-2]:
break
else:
if prange[-1] >= max_p:
i = prange.index(max_p) + 1
prange = prange[:i]
msge = msge[:i]
break
self.p = prange[np.argmin(msge)]
return zip(prange, msge)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fromvector(cls, v):
"""Initialize from euclidean vector"""
|
w = v.normalized()
return cls(w.x, w.y, w.z)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list(self):
"""position in 3d space"""
|
return [self._pos3d.x, self._pos3d.y, self._pos3d.z]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def distance(self, other):
"""Distance to another point on the sphere"""
|
return math.acos(self._pos3d.dot(other.vector))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def distances(self, points):
"""Distance to other points on the sphere"""
|
return [math.acos(self._pos3d.dot(p.vector)) for p in points]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fromiterable(cls, itr):
"""Initialize from iterable"""
|
x, y, z = itr
return cls(x, y, z)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fromvector(cls, v):
"""Copy another vector"""
|
return cls(v.x, v.y, v.z)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def norm2(self):
"""Squared norm of the vector"""
|
return self.x * self.x + self.y * self.y + self.z * self.z
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rotate(self, l, u):
"""rotate l radians around axis u"""
|
cl = math.cos(l)
sl = math.sin(l)
x = (cl + u.x * u.x * (1 - cl)) * self.x + (u.x * u.y * (1 - cl) - u.z * sl) * self.y + (
u.x * u.z * (1 - cl) + u.y * sl) * self.z
y = (u.y * u.x * (1 - cl) + u.z * sl) * self.x + (cl + u.y * u.y * (1 - cl)) * self.y + (
u.y * u.z * (1 - cl) - u.x * sl) * self.z
z = (u.z * u.x * (1 - cl) - u.y * sl) * self.x + (u.z * u.y * (1 - cl) + u.x * sl) * self.y + (
cl + u.z * u.z * (1 - cl)) * self.z
self.x, self.y, self.z = x, y, z
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cuthill_mckee(matrix):
"""Implementation of the Cuthill-McKee algorithm. Permute a symmetric binary matrix into a band matrix form with a small bandwidth. Parameters matrix : ndarray, dtype=bool, shape = [n, n] The matrix is internally converted to a symmetric matrix by setting each element [i,j] to True if either [i,j] or [j,i] evaluates to true. Returns ------- order : list of int Permutation intices Examples -------- array([[0, 0, 1, 1], [0, 0, 0, 0], [1, 0, 1, 0], [1, 0, 0, 0]]) array([[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 1]]) """
|
matrix = np.atleast_2d(matrix)
n, m = matrix.shape
assert(n == m)
# make sure the matrix is really symmetric. This is equivalent to
# converting a directed adjacency matrix into a undirected adjacency matrix.
matrix = np.logical_or(matrix, matrix.T)
degree = np.sum(matrix, 0)
order = [np.argmin(degree)]
for i in range(n):
adj = np.nonzero(matrix[order[i]])[0]
adj = [a for a in adj if a not in order]
if not adj:
idx = [i for i in range(n) if i not in order]
order.append(idx[np.argmin(degree[idx])])
else:
if len(adj) == 1:
order.append(adj[0])
else:
adj = np.asarray(adj)
i = adj[np.argsort(degree[adj])]
order.extend(i.tolist())
if len(order) == n:
break
return order
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connectivity(measure_names, b, c=None, nfft=512):
"""Calculate connectivity measures. Parameters measure_names : str or list of str Name(s) of the connectivity measure(s) to calculate. See :class:`Connectivity` for supported measures. b : array, shape (n_channels, n_channels * model_order) VAR model coefficients. See :ref:`var-model-coefficients` for details about the arrangement of coefficients. c : array, shape (n_channels, n_channels), optional Covariance matrix of the driving noise process. Identity matrix is used if set to None (default). nfft : int, optional Number of frequency bins to calculate. Note that these points cover the range between 0 and half the sampling rate. Returns ------- result : array, shape (n_channels, n_channels, `nfft`) An array of shape (m, m, nfft) is returned if measures is a string. If measures is a list of strings, a dictionary is returned, where each key is the name of the measure, and the corresponding values are arrays of shape (m, m, nfft). Notes ----- When using this function, it is more efficient to get several measures at once than calling the function multiple times. Examples -------- """
|
con = Connectivity(b, c, nfft)
try:
return getattr(con, measure_names)()
except TypeError:
return dict((m, getattr(con, m)()) for m in measure_names)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Cinv(self):
"""Inverse of the noise covariance."""
|
try:
return np.linalg.inv(self.c)
except np.linalg.linalg.LinAlgError:
print('Warning: non-invertible noise covariance matrix c.')
return np.eye(self.c.shape[0])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def A(self):
"""Spectral VAR coefficients. .. math:: \mathbf{A}(f) = \mathbf{I} - \sum_{k=1}^{p} \mathbf{a}^{(k)} \mathrm{e}^{-2\pi f} """
|
return fft(np.dstack([np.eye(self.m), -self.b]),
self.nfft * 2 - 1)[:, :, :self.nfft]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def S(self):
"""Cross-spectral density. .. math:: \mathbf{S}(f) = \mathbf{H}(f) \mathbf{C} \mathbf{H}'(f) """
|
if self.c is None:
raise RuntimeError('Cross-spectral density requires noise '
'covariance matrix c.')
H = self.H()
# TODO: can we do that more efficiently?
S = np.empty(H.shape, dtype=H.dtype)
for f in range(H.shape[2]):
S[:, :, f] = H[:, :, f].dot(self.c).dot(H[:, :, f].conj().T)
return S
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def G(self):
"""Inverse cross-spectral density. .. math:: \mathbf{G}(f) = \mathbf{A}(f) \mathbf{C}^{-1} \mathbf{A}'(f) """
|
if self.c is None:
raise RuntimeError('Inverse cross spectral density requires '
'invertible noise covariance matrix c.')
A = self.A()
# TODO: can we do that more efficiently?
G = np.einsum('ji..., jk... ->ik...', A.conj(), self.Cinv())
G = np.einsum('ij..., jk... ->ik...', G, A)
return G
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pCOH(self):
"""Partial coherence. .. math:: \mathrm{pCOH}_{ij}(f) = \\frac{G_{ij}(f)} {\sqrt{G_{ii}(f) G_{jj}(f)}} References P. J. Franaszczuk, K. J. Blinowska, M. Kowalczyk. The application of parametric multichannel spectral estimates in the study of electrical brain activity. Biol. Cybernetics 51(4):
239-247, 1985. """
|
G = self.G()
# TODO: can we do that more efficiently?
return G / np.sqrt(np.einsum('ii..., jj... ->ij...', G, G))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def PDC(self):
"""Partial directed coherence. .. math:: \mathrm{PDC}_{ij}(f) = \\frac{A_{ij}(f)} {\sqrt{A_{:j}'(f) A_{:j}(f)}} References L. A. Baccalá, K. Sameshima. Partial directed coherence: a new concept in neural structure determination. Biol. Cybernetics 84(6):
463-474, 2001. """
|
A = self.A()
return np.abs(A / np.sqrt(np.sum(A.conj() * A, axis=0, keepdims=True)))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ffPDC(self):
"""Full frequency partial directed coherence. .. math:: \mathrm{ffPDC}_{ij}(f) = \\frac{A_{ij}(f)}{\sqrt{\sum_f A_{:j}'(f) A_{:j}(f)}} """
|
A = self.A()
return np.abs(A * self.nfft / np.sqrt(np.sum(A.conj() * A, axis=(0, 2),
keepdims=True)))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def PDCF(self):
"""Partial directed coherence factor. .. math:: \mathrm{PDCF}_{ij}(f) = \\frac{A_{ij}(f)}{\sqrt{A_{:j}'(f) \mathbf{C}^{-1} A_{:j}(f)}} References L. A. Baccalá, K. Sameshima. Partial directed coherence: a new concept in neural structure determination. Biol. Cybernetics 84(6):
463-474, 2001. """
|
A = self.A()
# TODO: can we do that more efficiently?
return np.abs(A / np.sqrt(np.einsum('aj..., ab..., bj... ->j...',
A.conj(), self.Cinv(), A)))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def GPDC(self):
"""Generalized partial directed coherence. .. math:: \mathrm{GPDC}_{ij}(f) = \\frac{|A_{ij}(f)|} {\sigma_i \sqrt{A_{:j}'(f) \mathrm{diag}(\mathbf{C})^{-1} A_{:j}(f)}} References L. Faes, S. Erla, G. Nollo. Measuring connectivity in linear multivariate processes: definitions, interpretation, and practical analysis. Comput. Math. Meth. Med. 2012: 140513, 2012. """
|
A = self.A()
tmp = A / np.sqrt(np.einsum('aj..., a..., aj..., ii... ->ij...',
A.conj(), 1 / np.diag(self.c), A, self.c))
return np.abs(tmp)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def DTF(self):
"""Directed transfer function. .. math:: \mathrm{DTF}_{ij}(f) = \\frac{H_{ij}(f)} {\sqrt{H_{i:}(f) H_{i:}'(f)}} References M. J. Kaminski, K. J. Blinowska. A new method of the description of the information flow in the brain structures. Biol. Cybernetics 65(3):
203-210, 1991. """
|
H = self.H()
return np.abs(H / np.sqrt(np.sum(H * H.conj(), axis=1, keepdims=True)))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ffDTF(self):
"""Full frequency directed transfer function. .. math:: \mathrm{ffDTF}_{ij}(f) = \\frac{H_{ij}(f)}{\sqrt{\sum_f H_{i:}(f) H_{i:}'(f)}} References A. Korzeniewska, M. Mańczak, M. Kaminski, K. J. Blinowska, S. Kasicki. Determination of information flow direction among brain structures by a modified directed transfer function (dDTF) method. J. Neurosci. Meth. 125(1-2):
195-207, 2003. """
|
H = self.H()
return np.abs(H * self.nfft / np.sqrt(np.sum(H * H.conj(), axis=(1, 2),
keepdims=True)))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def GDTF(self):
"""Generalized directed transfer function. .. math:: \mathrm{GPDC}_{ij}(f) = \\frac{\sigma_j |H_{ij}(f)|} {\sqrt{H_{i:}(f) \mathrm{diag}(\mathbf{C}) H_{i:}'(f)}} References L. Faes, S. Erla, G. Nollo. Measuring connectivity in linear multivariate processes: definitions, interpretation, and practical analysis. Comput. Math. Meth. Med. 2012: 140513, 2012. """
|
H = self.H()
tmp = H / np.sqrt(np.einsum('ia..., aa..., ia..., j... ->ij...',
H.conj(), self.c, H,
1 / self.c.diagonal()))
return np.abs(tmp)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def enrich(self, expected=None, provided=None, path=None, validator=None):
""" Enrich this error with additional information. This works with both Invalid and MultipleInvalid (thanks to `Invalid` being iterable):
in the latter case, the defaults are applied to all collected errors. The specified arguments are only set on `Invalid` errors which do not have any value on the property. One exclusion is `path`: if provided, it is prepended to `Invalid.path`. This feature is especially useful when validating the whole input with multiple different schemas: ```python from good import Schema, Invalid schema = Schema(int) input = { 'user': { 'age': 10, } } try: schema(input['user']['age']) except Invalid as e: e.enrich(path=['user', 'age']) # Make the path reflect the reality raise # re-raise the error with updated fields ``` This is used when validating a value within a container. :param expected: Invalid.expected default :type expected: unicode|None :param provided: Invalid.provided default :type provided: unicode|None :param path: Prefix to prepend to Invalid.path :type path: list|None :param validator: Invalid.validator default :rtype: Invalid|MultipleInvalid """
|
for e in self:
# defaults on fields
if e.expected is None and expected is not None:
e.expected = expected
if e.provided is None and provided is not None:
e.provided = provided
if e.validator is None and validator is not None:
e.validator = validator
# path prefix
e.path = (path or []) + e.path
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def flatten(cls, errors):
""" Unwind `MultipleErrors` to have a plain list of `Invalid` :type errors: list[Invalid|MultipleInvalid] :rtype: list[Invalid] """
|
ers = []
for e in errors:
if isinstance(e, MultipleInvalid):
ers.extend(cls.flatten(e.errors))
else:
ers.append(e)
return ers
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def warp_locations(locations, y_center=None, return_ellipsoid=False, verbose=False):
""" Warp EEG electrode locations to spherical layout. EEG Electrodes are warped to a spherical layout in three steps: 1. An ellipsoid is least-squares-fitted to the electrode locations. 2. Electrodes are displaced to the nearest point on the ellipsoid's surface. 3. The ellipsoid is transformed to a sphere, causing the new locations to lie exactly on a spherical surface with unit radius. This procedure intends to minimize electrode displacement in the original coordinate space. Simply projecting electrodes on a sphere (e.g. by normalizing the x/y/z coordinates) typically gives much larger displacements. Parameters locations : array-like, shape = [n_electrodes, 3] Eeach row of `locations` corresponds to the location of an EEG electrode in cartesian x/y/z coordinates. y_center : float, optional Fix the y-coordinate of the ellipsoid's center to this value (optional). This is useful to align the ellipsoid with the central electrodes. return_ellipsoid : bool, optional If `true` center and radii of the ellipsoid are returned. Returns ------- newlocs : array-like, shape = [n_electrodes, 3] Electrode locations on unit sphere. c : array-like, shape = [3], (only returned if `return_ellipsoid` evaluates to `True`) Center of the ellipsoid in the original location's coordinate space. r : array-like, shape = [3], (only returned if `return_ellipsoid` evaluates to `True`) Radii (x, y, z) of the ellipsoid in the original location's coordinate space. """
|
locations = np.asarray(locations)
if y_center is None:
c, r = _fit_ellipsoid_full(locations)
else:
c, r = _fit_ellipsoid_partial(locations, y_center)
elliptic_locations = _project_on_ellipsoid(c, r, locations)
if verbose:
print('Head ellipsoid center:', c)
print('Head ellipsoid radii:', r)
distance = np.sqrt(np.sum((locations - elliptic_locations)**2, axis=1))
print('Minimum electrode displacement:', np.min(distance))
print('Average electrode displacement:', np.mean(distance))
print('Maximum electrode displacement:', np.max(distance))
spherical_locations = (elliptic_locations - c) / r
if return_ellipsoid:
return spherical_locations, c, r
return spherical_locations
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _project_on_ellipsoid(c, r, locations):
"""displace locations to the nearest point on ellipsoid surface"""
|
p0 = locations - c # original locations
l2 = 1 / np.sum(p0**2 / r**2, axis=1, keepdims=True)
p = p0 * np.sqrt(l2) # initial approximation (projection of points towards center of ellipsoid)
fun = lambda x: np.sum((x.reshape(p0.shape) - p0)**2) # minimize distance between new and old points
con = lambda x: np.sum(x.reshape(p0.shape)**2 / r**2, axis=1) - 1 # new points constrained to surface of ellipsoid
res = sp.optimize.minimize(fun, p, constraints={'type': 'eq', 'fun': con}, method='SLSQP')
return res['x'].reshape(p0.shape) + c
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cut_segments(x2d, tr, start, stop):
"""Cut continuous signal into segments. Parameters x2d : array, shape (m, n) Input data with m signals and n samples. tr : list of int Trigger positions. start : int Window start (offset relative to trigger). stop : int Window end (offset relative to trigger). Returns ------- x3d : array, shape (len(tr), m, stop-start) Segments cut from data. Individual segments are stacked along the first dimension. See also -------- cat_trials : Concatenate segments. Examples -------- (3, 5, 50) """
|
if start != int(start):
raise ValueError("start index must be an integer")
if stop != int(stop):
raise ValueError("stop index must be an integer")
x2d = np.atleast_2d(x2d)
tr = np.asarray(tr, dtype=int).ravel()
win = np.arange(start, stop, dtype=int)
return np.concatenate([x2d[np.newaxis, :, t + win] for t in tr])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cat_trials(x3d):
"""Concatenate trials along time axis. Parameters x3d : array, shape (t, m, n) Segmented input data with t trials, m signals, and n samples. Returns ------- x2d : array, shape (m, t * n) Trials are concatenated along the second axis. See also -------- cut_segments : Cut segments from continuous data. Examples -------- (4, 900) """
|
x3d = atleast_3d(x3d)
t = x3d.shape[0]
return np.concatenate(np.split(x3d, t, 0), axis=2).squeeze(0)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dot_special(x2d, x3d):
"""Segment-wise dot product. This function calculates the dot product of x2d with each trial of x3d. Parameters x2d : array, shape (p, m) Input argument. x3d : array, shape (t, m, n) Segmented input data with t trials, m signals, and n samples. The dot product with x2d is calculated for each trial. Returns ------- out : array, shape (t, p, n) Dot product of x2d with each trial of x3d. Examples -------- (6, 7, 150) """
|
x3d = atleast_3d(x3d)
x2d = np.atleast_2d(x2d)
return np.concatenate([x2d.dot(x3d[i, ...])[np.newaxis, ...]
for i in range(x3d.shape[0])])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def randomize_phase(data, random_state=None):
"""Phase randomization. This function randomizes the spectral phase of the input data along the last dimension. Parameters data : array Input array. Returns ------- out : array Array of same shape as data. Notes ----- The algorithm randomizes the phase component of the input's complex Fourier transform. Examples -------- .. plot:: :include-source: from pylab import * from scot.datatools import randomize_phase np.random.seed(1234) s = np.sin(np.linspace(0,10*np.pi,1000)) x = np.vstack([s, np.sign(s)]) y = randomize_phase(x) subplot(2,1,1) title('Phase randomization of sine wave and rectangular function') plot(x.T + [1.5, -1.5]), axis([0,1000,-3,3]) subplot(2,1,2) plot(y.T + [1.5, -1.5]), axis([0,1000,-3,3]) plt.show() """
|
rng = check_random_state(random_state)
data = np.asarray(data)
data_freq = np.fft.rfft(data)
data_freq = np.abs(data_freq) * np.exp(1j*rng.random_sample(data_freq.shape)*2*np.pi)
return np.fft.irfft(data_freq, data.shape[-1])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def acm(x, l):
"""Compute autocovariance matrix at lag l. This function calculates the autocovariance matrix of `x` at lag `l`. Parameters x : array, shape (n_trials, n_channels, n_samples) Signal data (2D or 3D for multiple trials) l : int Lag Returns ------- c : ndarray, shape = [nchannels, n_channels] Autocovariance matrix of `x` at lag `l`. """
|
x = atleast_3d(x)
if l > x.shape[2]-1:
raise AttributeError("lag exceeds data length")
## subtract mean from each trial
#for t in range(x.shape[2]):
# x[:, :, t] -= np.mean(x[:, :, t], axis=0)
if l == 0:
a, b = x, x
else:
a = x[:, :, l:]
b = x[:, :, 0:-l]
c = np.zeros((x.shape[1], x.shape[1]))
for t in range(x.shape[0]):
c += a[t, :, :].dot(b[t, :, :].T) / a.shape[2]
c /= x.shape[0]
return c.T
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def jackknife_connectivity(measures, data, var, nfft=512, leaveout=1, n_jobs=1, verbose=0):
"""Calculate jackknife estimates of connectivity. For each jackknife estimate a block of trials is left out. This is repeated until each trial was left out exactly once. The number of estimates depends on the number of trials and the value of `leaveout`. It is calculated by repeats = `n_trials` // `leaveout`. .. note:: Parameter `var` will be modified by the function. Treat as undefined after the function returns. Parameters measures : str or list of str Name(s) of the connectivity measure(s) to calculate. See :class:`Connectivity` for supported measures. data : array, shape (trials, channels, samples) Time series data (multiple trials). var : VARBase-like object Instance of a VAR model. nfft : int, optional Number of frequency bins to calculate. Note that these points cover the range between 0 and half the sampling rate. leaveout : int, optional Number of trials to leave out in each estimate. n_jobs : int | None, optional Number of jobs to run in parallel. If set to None, joblib is not used at all. See `joblib.Parallel` for details. verbose : int, optional Verbosity level passed to joblib. Returns ------- result : array, shape (`repeats`, n_channels, n_channels, nfft) Values of the connectivity measure for each surrogate. If `measure_names` is a list of strings a dictionary is returned, where each key is the name of the measure, and the corresponding values are arrays of shape (`repeats`, n_channels, n_channels, nfft). """
|
data = atleast_3d(data)
t, m, n = data.shape
assert(t > 1)
if leaveout < 1:
leaveout = int(leaveout * t)
num_blocks = t // leaveout
mask = lambda block: [i for i in range(t) if i < block*leaveout or
i >= (block + 1) * leaveout]
par, func = parallel_loop(_calc_jackknife, n_jobs=n_jobs, verbose=verbose)
output = par(func(data[mask(b), :, :], var, measures, nfft)
for b in range(num_blocks))
return convert_output_(output, measures)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bootstrap_connectivity(measures, data, var, nfft=512, repeats=100, num_samples=None, n_jobs=1, verbose=0, random_state=None):
"""Calculate bootstrap estimates of connectivity. To obtain a bootstrap estimate trials are sampled randomly with replacement from the data set. .. note:: Parameter `var` will be modified by the function. Treat as undefined after the function returns. Parameters measures : str or list of str Name(s) of the connectivity measure(s) to calculate. See :class:`Connectivity` for supported measures. data : array, shape (trials, channels, samples) Time series data (multiple trials). var : VARBase-like object Instance of a VAR model. nfft : int, optional Number of frequency bins to calculate. Note that these points cover the range between 0 and half the sampling rate. repeats : int, optional Number of bootstrap estimates to take. num_samples : int, optional Number of samples to take for each bootstrap estimates. Defaults to the same number of trials as present in the data. n_jobs : int, optional n_jobs : int | None, optional Number of jobs to run in parallel. If set to None, joblib is not used at all. See `joblib.Parallel` for details. verbose : int, optional Verbosity level passed to joblib. Returns ------- measure : array, shape (`repeats`, n_channels, n_channels, nfft) Values of the connectivity measure for each bootstrap estimate. If `measure_names` is a list of strings a dictionary is returned, where each key is the name of the measure, and the corresponding values are arrays of shape (`repeats`, n_channels, n_channels, nfft). """
|
rng = check_random_state(random_state)
data = atleast_3d(data)
n, m, t = data.shape
assert(t > 1)
if num_samples is None:
num_samples = t
mask = lambda r: rng.random_integers(0, data.shape[0]-1, num_samples)
par, func = parallel_loop(_calc_bootstrap, n_jobs=n_jobs, verbose=verbose)
output = par(func(data[mask(r), :, :], var, measures, nfft)
for r in range(repeats))
return convert_output_(output, measures)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def significance_fdr(p, alpha):
"""Calculate significance by controlling for the false discovery rate. This function determines which of the p-values in `p` can be considered significant. Correction for multiple comparisons is performed by controlling the false discovery rate (FDR). The FDR is the maximum fraction of p-values that are wrongly considered significant [1]_. Parameters p : array, shape (channels, channels, nfft) p-values. alpha : float Maximum false discovery rate. Returns ------- s : array, dtype=bool, shape (channels, channels, nfft) Significance of each p-value. References .. [1] Y. Benjamini, Y. Hochberg. Controlling the false discovery rate: a practical and powerful approach to multiple testing. J. Royal Stat. Soc. Series B 57(1):
289-300, 1995. """
|
i = np.argsort(p, axis=None)
m = i.size - np.sum(np.isnan(p))
j = np.empty(p.shape, int)
j.flat[i] = np.arange(1, i.size + 1)
mask = p <= alpha * j / m
if np.sum(mask) == 0:
return mask
# find largest k so that p_k <= alpha*k/m
k = np.max(j[mask])
# reject all H_i for i = 0...k
s = j <= k
return s
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register_type_name(t, name):
""" Register a human-friendly name for the given type. This will be used in Invalid errors :param t: The type to register :type t: type :param name: Name for the type :type name: unicode """
|
assert isinstance(t, type)
assert isinstance(name, unicode)
__type_names[t] = name
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_type_name(t):
""" Get a human-friendly name for the given type. :type t: type|None :rtype: unicode """
|
# Lookup in the mapping
try:
return __type_names[t]
except KeyError:
# Specific types
if issubclass(t, six.integer_types):
return _(u'Integer number')
# Get name from the Type itself
return six.text_type(t.__name__).capitalize()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_callable_name(c):
""" Get a human-friendly name for the given callable. :param c: The callable to get the name for :type c: callable :rtype: unicode """
|
if hasattr(c, 'name'):
return six.text_type(c.name)
elif hasattr(c, '__name__'):
return six.text_type(c.__name__) + u'()'
else:
return six.text_type(c)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_primitive_name(schema):
""" Get a human-friendly name for the given primitive. :param schema: Schema :type schema: * :rtype: unicode """
|
try:
return {
const.COMPILED_TYPE.LITERAL: six.text_type,
const.COMPILED_TYPE.TYPE: get_type_name,
const.COMPILED_TYPE.ENUM: get_type_name,
const.COMPILED_TYPE.CALLABLE: get_callable_name,
const.COMPILED_TYPE.ITERABLE: lambda x: _(u'{type}[{content}]').format(type=get_type_name(list), content=_(u'...') if x else _(u'-')),
const.COMPILED_TYPE.MAPPING: lambda x: _(u'{type}[{content}]').format(type=get_type_name(dict), content=_(u'...') if x else _(u'-')),
}[primitive_type(schema)](schema)
except KeyError:
return six.text_type(repr(schema))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def primitive_type(schema):
""" Get schema type for the primitive argument. Note: it does treats markers & schemas as callables! :param schema: Value of a primitive type :type schema: * :return: const.COMPILED_TYPE.* :rtype: str|None """
|
schema_type = type(schema)
# Literal
if schema_type in const.literal_types:
return const.COMPILED_TYPE.LITERAL
# Enum
elif Enum is not None and isinstance(schema, (EnumMeta, Enum)):
return const.COMPILED_TYPE.ENUM
# Type
elif issubclass(schema_type, six.class_types):
return const.COMPILED_TYPE.TYPE
# Mapping
elif isinstance(schema, collections.Mapping):
return const.COMPILED_TYPE.MAPPING
# Iterable
elif isinstance(schema, collections.Iterable):
return const.COMPILED_TYPE.ITERABLE
# Callable
elif callable(schema):
return const.COMPILED_TYPE.CALLABLE
# Not detected
else:
return None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def commajoin_as_strings(iterable):
""" Join the given iterable with ',' """
|
return _(u',').join((six.text_type(i) for i in iterable))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prepare_topoplots(topo, values):
"""Prepare multiple topo maps for cached plotting. .. note:: Parameter `topo` is modified by the function by calling :func:`~eegtopo.topoplot.Topoplot.set_values`. Parameters topo : :class:`~eegtopo.topoplot.Topoplot` Scalp maps are created with this class values : array, shape = [n_topos, n_channels] Channel values for each topo plot Returns ------- topomaps : list of array The map for each topo plot """
|
values = np.atleast_2d(values)
topomaps = []
for i in range(values.shape[0]):
topo.set_values(values[i, :])
topo.create_map()
topomaps.append(topo.get_map())
return topomaps
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_topo(axis, topo, topomap, crange=None, offset=(0,0), plot_locations=True, plot_head=True):
"""Draw a topoplot in given axis. .. note:: Parameter `topo` is modified by the function by calling :func:`~eegtopo.topoplot.Topoplot.set_map`. Parameters axis : axis Axis to draw into. topo : :class:`~eegtopo.topoplot.Topoplot` This object draws the topo plot topomap : array, shape = [w_pixels, h_pixels] Scalp-projected data crange : [int, int], optional Range of values covered by the colormap. If set to None, [-max(abs(topomap)), max(abs(topomap))] is substituted. offset : [float, float], optional Shift the topo plot by [x,y] in axis units. plot_locations : bool, optional Plot electrode locations. plot_head : bool, optional Plot head cartoon. Returns ------- h : image Image object the map was plotted into """
|
topo.set_map(topomap)
h = topo.plot_map(axis, crange=crange, offset=offset)
if plot_locations:
topo.plot_locations(axis, offset=offset)
if plot_head:
topo.plot_head(axis, offset=offset)
return h
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_sources(topo, mixmaps, unmixmaps, global_scale=None, fig=None):
"""Plot all scalp projections of mixing- and unmixing-maps. .. note:: Parameter `topo` is modified by the function by calling :func:`~eegtopo.topoplot.Topoplot.set_map`. Parameters topo : :class:`~eegtopo.topoplot.Topoplot` This object draws the topo plot mixmaps : array, shape = [w_pixels, h_pixels] Scalp-projected mixing matrix unmixmaps : array, shape = [w_pixels, h_pixels] Scalp-projected unmixing matrix global_scale : float, optional Set common color scale as given percentile of all map values to use as the maximum. `None` scales each plot individually (default). fig : Figure object, optional Figure to plot into. If set to `None`, a new figure is created. Returns ------- fig : Figure object The figure into which was plotted. """
|
urange, mrange = None, None
m = len(mixmaps)
if global_scale:
tmp = np.asarray(unmixmaps)
tmp = tmp[np.logical_not(np.isnan(tmp))]
umax = np.percentile(np.abs(tmp), global_scale)
umin = -umax
urange = [umin, umax]
tmp = np.asarray(mixmaps)
tmp = tmp[np.logical_not(np.isnan(tmp))]
mmax = np.percentile(np.abs(tmp), global_scale)
mmin = -mmax
mrange = [mmin, mmax]
y = np.floor(np.sqrt(m * 3 / 4))
x = np.ceil(m / y)
if fig is None:
fig = new_figure()
axes = []
for i in range(m):
axes.append(fig.add_subplot(2 * y, x, i + 1))
plot_topo(axes[-1], topo, unmixmaps[i], crange=urange)
axes[-1].set_title(str(i))
axes.append(fig.add_subplot(2 * y, x, m + i + 1))
plot_topo(axes[-1], topo, mixmaps[i], crange=mrange)
axes[-1].set_title(str(i))
for a in axes:
a.set_yticks([])
a.set_xticks([])
a.set_frame_on(False)
axes[0].set_ylabel('Unmixing weights')
axes[1].set_ylabel('Scalp projections')
return fig
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_connectivity_topos(layout='diagonal', topo=None, topomaps=None, fig=None):
"""Place topo plots in a figure suitable for connectivity visualization. .. note:: Parameter `topo` is modified by the function by calling :func:`~eegtopo.topoplot.Topoplot.set_map`. Parameters layout : str 'diagonal' -> place topo plots on diagonal. otherwise -> place topo plots in left column and top row. topo : :class:`~eegtopo.topoplot.Topoplot` This object draws the topo plot topomaps : array, shape = [w_pixels, h_pixels] Scalp-projected map fig : Figure object, optional Figure to plot into. If set to `None`, a new figure is created. Returns ------- fig : Figure object The figure into which was plotted. """
|
m = len(topomaps)
if fig is None:
fig = new_figure()
if layout == 'diagonal':
for i in range(m):
ax = fig.add_subplot(m, m, i*(1+m) + 1)
plot_topo(ax, topo, topomaps[i])
ax.set_yticks([])
ax.set_xticks([])
ax.set_frame_on(False)
else:
for i in range(m):
for j in [i+2, (i+1)*(m+1)+1]:
ax = fig.add_subplot(m+1, m+1, j)
plot_topo(ax, topo, topomaps[i])
ax.set_yticks([])
ax.set_xticks([])
ax.set_frame_on(False)
return fig
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_connectivity_significance(s, fs=2, freq_range=(-np.inf, np.inf), diagonal=0, border=False, fig=None):
"""Plot significance. Significance is drawn as a background image where dark vertical stripes indicate freuquencies where a evaluates to True. Parameters a : array, shape (n_channels, n_channels, n_fft), dtype bool Significance fs : float Sampling frequency freq_range : (float, float) Frequency range to plot diagonal : {-1, 0, 1} If diagonal == -1 nothing is plotted on the diagonal (a[i,i,:] are not plotted), if diagonal == 0, a is plotted on the diagonal too (all a[i,i,:] are plotted), if diagonal == 1, a is plotted on the diagonal only (only a[i,i,:] are plotted) border : bool If border == true the leftmost column and the topmost row are left blank fig : Figure object, optional Figure to plot into. If set to `None`, a new figure is created. Returns ------- fig : Figure object The figure into which was plotted. """
|
a = np.atleast_3d(s)
[_, m, f] = a.shape
freq = np.linspace(0, fs / 2, f)
left = max(freq_range[0], freq[0])
right = min(freq_range[1], freq[-1])
imext = (freq[0], freq[-1], -1e25, 1e25)
if fig is None:
fig = new_figure()
axes = []
for i in range(m):
if diagonal == 1:
jrange = [i]
elif diagonal == 0:
jrange = range(m)
else:
jrange = [j for j in range(m) if j != i]
for j in jrange:
if border:
ax = fig.add_subplot(m+1, m+1, j + (i+1) * (m+1) + 2)
else:
ax = fig.add_subplot(m, m, j + i * m + 1)
axes.append((i, j, ax))
ax.imshow(s[i, j, np.newaxis], vmin=0, vmax=2, cmap='binary', aspect='auto', extent=imext, zorder=-999)
ax.xaxis.set_major_locator(MaxNLocator(max(1, 7 - m)))
ax.yaxis.set_major_locator(MaxNLocator(max(1, 7 - m)))
ax.set_xlim(left, right)
if 0 < i < m - 1:
ax.set_xticks([])
if 0 < j < m - 1:
ax.set_yticks([])
if j == 0:
ax.yaxis.tick_left()
if j == m-1:
ax.yaxis.tick_right()
_plot_labels(fig,
{'x': 0.5, 'y': 0.025, 's': 'frequency (Hz)', 'horizontalalignment': 'center'},
{'x': 0.05, 'y': 0.5, 's': 'magnitude', 'horizontalalignment': 'center', 'rotation': 'vertical'})
return fig
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_whiteness(var, h, repeats=1000, axis=None):
""" Draw distribution of the Portmanteu whiteness test. Parameters var : :class:`~scot.var.VARBase`-like object Vector autoregressive model (VAR) object whose residuals are tested for whiteness. h : int Maximum lag to include in the test. repeats : int, optional Number of surrogate estimates to draw under the null hypothesis. axis : axis, optional Axis to draw into. By default draws into :func:`matplotlib.pyplot.gca()`. Returns ------- pr : float *p*-value of whiteness under the null hypothesis """
|
pr, q0, q = var.test_whiteness(h, repeats, True)
if axis is None:
axis = current_axis()
pdf, _, _ = axis.hist(q0, 30, normed=True, label='surrogate distribution')
axis.plot([q,q], [0,np.max(pdf)], 'r-', label='fitted model')
#df = m*m*(h-p)
#x = np.linspace(np.min(q0)*0.0, np.max(q0)*2.0, 100)
#y = sp.stats.chi2.pdf(x, df)
#hc = axis.plot(x, y, label='chi-squared distribution (df=%i)' % df)
axis.set_title('significance: p = %f'%pr)
axis.set_xlabel('Li-McLeod statistic (Q)')
axis.set_ylabel('probability')
axis.legend()
return pr
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def singletrial(num_trials, skipstep=1):
""" Single-trial cross-validation schema Use one trial for training, all others for testing. Parameters num_trials : int Total number of trials skipstep : int only use every `skipstep` trial for training Returns ------- gen : generator object the generator returns tuples (trainset, testset) """
|
for t in range(0, num_trials, skipstep):
trainset = [t]
testset = [i for i in range(trainset[0])] + \
[i for i in range(trainset[-1] + 1, num_trials)]
testset = sort([t % num_trials for t in testset])
yield trainset, testset
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def splitset(num_trials, skipstep=None):
""" Split-set cross validation Use half the trials for training, and the other half for testing. Then repeat the other way round. Parameters num_trials : int Total number of trials skipstep : int unused Returns ------- gen : generator object the generator returns tuples (trainset, testset) """
|
split = num_trials // 2
a = list(range(0, split))
b = list(range(split, num_trials))
yield a, b
yield b, a
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_data(self, data, cl=None, time_offset=0):
""" Assign data to the workspace. This function assigns a new data set to the workspace. Doing so invalidates currently fitted VAR models, connectivity estimates, and activations. Parameters data : array-like, shape = [n_trials, n_channels, n_samples] or [n_channels, n_samples] EEG data set cl : list of valid dict keys Class labels associated with each trial. time_offset : float, optional Trial starting time; used for labelling the x-axis of time/frequency plots. Returns ------- self : Workspace The Workspace object. """
|
self.data_ = atleast_3d(data)
self.cl_ = np.asarray(cl if cl is not None else [None]*self.data_.shape[0])
self.time_offset_ = time_offset
self.var_model_ = None
self.var_cov_ = None
self.connectivity_ = None
self.trial_mask_ = np.ones(self.cl_.size, dtype=bool)
if self.unmixing_ is not None:
self.activations_ = dot_special(self.unmixing_.T, self.data_)
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_used_labels(self, labels):
""" Specify which trials to use in subsequent analysis steps. This function masks trials based on their class labels. Parameters labels : list of class labels Marks all trials that have a label that is in the `labels` list for further processing. Returns ------- self : Workspace The Workspace object. """
|
mask = np.zeros(self.cl_.size, dtype=bool)
for l in labels:
mask = np.logical_or(mask, self.cl_ == l)
self.trial_mask_ = mask
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_sources(self, sources):
""" Remove sources from the decomposition. This function removes sources from the decomposition. Doing so invalidates currently fitted VAR models and connectivity estimates. Parameters sources : {slice, int, array of ints} Indices of components to remove. Returns ------- self : Workspace The Workspace object. Raises ------ RuntimeError If the :class:`Workspace` instance does not contain a source decomposition. """
|
if self.unmixing_ is None or self.mixing_ is None:
raise RuntimeError("No sources available (run do_mvarica first)")
self.mixing_ = np.delete(self.mixing_, sources, 0)
self.unmixing_ = np.delete(self.unmixing_, sources, 1)
if self.activations_ is not None:
self.activations_ = np.delete(self.activations_, sources, 1)
self.var_model_ = None
self.var_cov_ = None
self.connectivity_ = None
self.mixmaps_ = []
self.unmixmaps_ = []
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def keep_sources(self, keep):
"""Keep only the specified sources in the decomposition. """
|
if self.unmixing_ is None or self.mixing_ is None:
raise RuntimeError("No sources available (run do_mvarica first)")
n_sources = self.mixing_.shape[0]
self.remove_sources(np.setdiff1d(np.arange(n_sources), np.array(keep)))
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fit_var(self):
""" Fit a VAR model to the source activations. Returns ------- self : Workspace The Workspace object. Raises ------ RuntimeError If the :class:`Workspace` instance does not contain source activations. """
|
if self.activations_ is None:
raise RuntimeError("VAR fitting requires source activations (run do_mvarica first)")
self.var_.fit(data=self.activations_[self.trial_mask_, :, :])
self.connectivity_ = Connectivity(self.var_.coef, self.var_.rescov, self.nfft_)
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_connectivity(self, measure_name, plot=False):
""" Calculate spectral connectivity measure. Parameters measure_name : str Name of the connectivity measure to calculate. See :class:`Connectivity` for supported measures. plot : {False, None, Figure object}, optional Whether and where to plot the connectivity. If set to **False**, nothing is plotted. Otherwise set to the Figure object. If set to **None**, a new figure is created. Returns ------- measure : array, shape = [n_channels, n_channels, nfft] Values of the connectivity measure. fig : Figure object Instance of the figure in which was plotted. This is only returned if `plot` is not **False**. Raises ------ RuntimeError If the :class:`Workspace` instance does not contain a fitted VAR model. """
|
if self.connectivity_ is None:
raise RuntimeError("Connectivity requires a VAR model (run do_mvarica or fit_var first)")
cm = getattr(self.connectivity_, measure_name)()
cm = np.abs(cm) if np.any(np.iscomplex(cm)) else cm
if plot is None or plot:
fig = plot
if self.plot_diagonal == 'fill':
diagonal = 0
elif self.plot_diagonal == 'S':
diagonal = -1
sm = np.abs(self.connectivity_.S())
sm /= np.max(sm) # scale to 1 since components are scaled arbitrarily anyway
fig = self.plotting.plot_connectivity_spectrum(sm, fs=self.fs_, freq_range=self.plot_f_range,
diagonal=1, border=self.plot_outside_topo, fig=fig)
else:
diagonal = -1
fig = self.plotting.plot_connectivity_spectrum(cm, fs=self.fs_, freq_range=self.plot_f_range,
diagonal=diagonal, border=self.plot_outside_topo, fig=fig)
return cm, fig
return cm
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_surrogate_connectivity(self, measure_name, repeats=100, plot=False, random_state=None):
""" Calculate spectral connectivity measure under the assumption of no actual connectivity. Repeatedly samples connectivity from phase-randomized data. This provides estimates of the connectivity distribution if there was no causal structure in the data. Parameters measure_name : str Name of the connectivity measure to calculate. See :class:`Connectivity` for supported measures. repeats : int, optional How many surrogate samples to take. Returns ------- measure : array, shape = [`repeats`, n_channels, n_channels, nfft] Values of the connectivity measure for each surrogate. See Also -------- :func:`scot.connectivity_statistics.surrogate_connectivity` : Calculates surrogate connectivity """
|
cs = surrogate_connectivity(measure_name, self.activations_[self.trial_mask_, :, :],
self.var_, self.nfft_, repeats, random_state=random_state)
if plot is None or plot:
fig = plot
if self.plot_diagonal == 'fill':
diagonal = 0
elif self.plot_diagonal == 'S':
diagonal = -1
sb = self.get_surrogate_connectivity('absS', repeats)
sb /= np.max(sb) # scale to 1 since components are scaled arbitrarily anyway
su = np.percentile(sb, 95, axis=0)
fig = self.plotting.plot_connectivity_spectrum([su], fs=self.fs_, freq_range=self.plot_f_range,
diagonal=1, border=self.plot_outside_topo, fig=fig)
else:
diagonal = -1
cu = np.percentile(cs, 95, axis=0)
fig = self.plotting.plot_connectivity_spectrum([cu], fs=self.fs_, freq_range=self.plot_f_range,
diagonal=diagonal, border=self.plot_outside_topo, fig=fig)
return cs, fig
return cs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_bootstrap_connectivity(self, measure_names, repeats=100, num_samples=None, plot=False, random_state=None):
""" Calculate bootstrap estimates of spectral connectivity measures. Bootstrapping is performed on trial level. Parameters measure_names : {str, list of str} Name(s) of the connectivity measure(s) to calculate. See :class:`Connectivity` for supported measures. repeats : int, optional How many bootstrap estimates to take. num_samples : int, optional How many samples to take for each bootstrap estimates. Defaults to the same number of trials as present in the data. Returns ------- measure : array, shape = [`repeats`, n_channels, n_channels, nfft] Values of the connectivity measure for each bootstrap estimate. If `measure_names` is a list of strings a dictionary is returned, where each key is the name of the measure, and the corresponding values are ndarrays of shape [`repeats`, n_channels, n_channels, nfft]. See Also -------- :func:`scot.connectivity_statistics.bootstrap_connectivity` : Calculates bootstrap connectivity """
|
if num_samples is None:
num_samples = np.sum(self.trial_mask_)
cb = bootstrap_connectivity(measure_names, self.activations_[self.trial_mask_, :, :],
self.var_, self.nfft_, repeats, num_samples, random_state=random_state)
if plot is None or plot:
fig = plot
if self.plot_diagonal == 'fill':
diagonal = 0
elif self.plot_diagonal == 'S':
diagonal = -1
sb = self.get_bootstrap_connectivity('absS', repeats, num_samples)
sb /= np.max(sb) # scale to 1 since components are scaled arbitrarily anyway
sm = np.median(sb, axis=0)
sl = np.percentile(sb, 2.5, axis=0)
su = np.percentile(sb, 97.5, axis=0)
fig = self.plotting.plot_connectivity_spectrum([sm, sl, su], fs=self.fs_, freq_range=self.plot_f_range,
diagonal=1, border=self.plot_outside_topo, fig=fig)
else:
diagonal = -1
cm = np.median(cb, axis=0)
cl = np.percentile(cb, 2.5, axis=0)
cu = np.percentile(cb, 97.5, axis=0)
fig = self.plotting.plot_connectivity_spectrum([cm, cl, cu], fs=self.fs_, freq_range=self.plot_f_range,
diagonal=diagonal, border=self.plot_outside_topo, fig=fig)
return cb, fig
return cb
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_source_topos(self, common_scale=None):
""" Plot topography of the Source decomposition. Parameters common_scale : float, optional If set to None, each topoplot's color axis is scaled individually. Otherwise specifies the percentile (1-99) of values in all plot. This value is taken as the maximum color scale. """
|
if self.unmixing_ is None and self.mixing_ is None:
raise RuntimeError("No sources available (run do_mvarica first)")
self._prepare_plots(True, True)
self.plotting.plot_sources(self.topo_, self.mixmaps_, self.unmixmaps_, common_scale)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_connectivity_topos(self, fig=None):
""" Plot scalp projections of the sources. This function only plots the topos. Use in combination with connectivity plotting. Parameters fig : {None, Figure object}, optional Where to plot the topos. f set to **None**, a new figure is created. Otherwise plot into the provided figure object. Returns ------- fig : Figure object Instance of the figure in which was plotted. """
|
self._prepare_plots(True, False)
if self.plot_outside_topo:
fig = self.plotting.plot_connectivity_topos('outside', self.topo_, self.mixmaps_, fig)
elif self.plot_diagonal == 'topo':
fig = self.plotting.plot_connectivity_topos('diagonal', self.topo_, self.mixmaps_, fig)
return fig
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_connectivity_surrogate(self, measure_name, repeats=100, fig=None):
""" Plot spectral connectivity measure under the assumption of no actual connectivity. Repeatedly samples connectivity from phase-randomized data. This provides estimates of the connectivity distribution if there was no causal structure in the data. Parameters measure_name : str Name of the connectivity measure to calculate. See :class:`Connectivity` for supported measures. repeats : int, optional How many surrogate samples to take. fig : {None, Figure object}, optional Where to plot the topos. f set to **None**, a new figure is created. Otherwise plot into the provided figure object. Returns ------- fig : Figure object Instance of the figure in which was plotted. """
|
cb = self.get_surrogate_connectivity(measure_name, repeats)
self._prepare_plots(True, False)
cu = np.percentile(cb, 95, axis=0)
fig = self.plotting.plot_connectivity_spectrum([cu], self.fs_, freq_range=self.plot_f_range, fig=fig)
return fig
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parallel_loop(func, n_jobs=1, verbose=1):
"""run loops in parallel, if joblib is available. Parameters func : function function to be executed in parallel n_jobs : int | None Number of jobs. If set to None, do not attempt to use joblib. verbose : int verbosity level Notes ----- Execution of the main script must be guarded with `if __name__ == '__main__':` when using parallelization. """
|
if n_jobs:
try:
from joblib import Parallel, delayed
except ImportError:
try:
from sklearn.externals.joblib import Parallel, delayed
except ImportError:
n_jobs = None
if not n_jobs:
if verbose:
print('running ', func, ' serially')
par = lambda x: list(x)
else:
if verbose:
print('running ', func, ' in parallel')
func = delayed(func)
par = Parallel(n_jobs=n_jobs, verbose=verbose)
return par, func
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _convert_errors(func):
""" Decorator to convert throws errors to Voluptuous format."""
|
cast_Invalid = lambda e: Invalid(
u"{message}, expected {expected}".format(
message=e.message,
expected=e.expected)
if e.expected != u'-none-' else e.message,
e.path,
six.text_type(e))
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except good.SchemaError as e:
raise SchemaError(six.text_type(e))
except good.MultipleInvalid as ee:
raise MultipleInvalid([cast_Invalid(e) for e in ee])
except good.Invalid as e:
# Since voluptuous throws MultipleInvalid almost always -- we follow the same pattern...
raise MultipleInvalid([cast_Invalid(e)])
return wrapper
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def on_compiled(self, name=None, key_schema=None, value_schema=None, as_mapping_key=None):
""" When CompiledSchema compiles this marker, it sets informational values onto it. Note that arguments may be provided in two incomplete sets, e.g. (name, key_schema, None) and then (None, None, value_schema). Thus, all assignments must be handled individually. It is possible that a marker may have no `value_schema` at all: e.g. in the case of { Extra: Reject } -- `Reject` will have no value schema, but `Extra` will have compiled `Reject` as the value. :param key_schema: Compiled key schema :type key_schema: CompiledSchema|None :param value_schema: Compiled value schema :type value_schema: CompiledSchema|None :param name: Human-friendly marker name :type name: unicode|None :param as_mapping_key: Whether it's used as a mapping key? :type as_mapping_key: bool|None :rtype: Marker """
|
if self.name is None:
self.name = name
if self.key_schema is None:
self.key_schema = key_schema
if self.value_schema is None:
self.value_schema = value_schema
if as_mapping_key:
self.as_mapping_key = True
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def colorlogs(format="short"):
"""Append a rainbow logging handler and a formatter to the root logger"""
|
try:
from rainbow_logging_handler import RainbowLoggingHandler
import sys
# setup `RainbowLoggingHandler`
logger = logging.root
# same as default
if format == "short":
fmt = "%(message)s "
else:
fmt = "[%(asctime)s] %(name)s %(funcName)s():%(lineno)d\t%(message)s [%(levelname)s]"
formatter = logging.Formatter(fmt)
handler = RainbowLoggingHandler(sys.stderr,
color_funcName=('black', 'gray', True))
handler.setFormatter(formatter)
logger.addHandler(handler)
except ImportError:
# rainbow logger not found, that's ok
pass
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main():
"""main bmi runner program"""
|
arguments = docopt.docopt(__doc__, version=__version__)
colorlogs()
# Read input file file
wrapper = BMIWrapper(
engine=arguments['<engine>'],
configfile=arguments['<config>'] or ''
)
# add logger if required
if not arguments['--disable-logger']:
logging.root.setLevel(logging.DEBUG)
wrapper.set_logger(logging.root)
with wrapper as model:
# if siginfo is supported by OS (BSD)
def handler(signum, frame):
"""report progress information"""
t_start = model.get_start_time()
t_end = model.get_end_time()
t_current = model.get_current_time()
total = (t_end - t_start)
now = (t_current - t_start)
if total > 0:
logging.info("progress: %s%%", 100.0 * now / total)
else:
logging.info("progress: unknown")
if hasattr(signal, 'SIGINFO'):
# attach a siginfo handler (CTRL-t) to print progress
signal.signal(signal.SIGINFO, handler)
if arguments['--info']:
logging.info("%s", trace(model))
t_end = model.get_end_time()
t = model.get_start_time()
while t < t_end:
model.update(-1)
t = model.get_current_time()
if arguments['--info']:
logging.info("%s", trace(model))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def get_def_conf():
'''return default configurations as simple dict'''
ret = dict()
for k,v in defConf.items():
ret[k] = v[0]
return ret
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def move(self):
""" Advance game by single move, if possible. @return: logical indicator if move was performed. """
|
if len(self.moves) == MAX_MOVES:
return False
elif len(self.moves) % 2:
active_engine = self.black_engine
active_engine_name = self.black
inactive_engine = self.white_engine
inactive_engine_name = self.white
else:
active_engine = self.white_engine
active_engine_name = self.white
inactive_engine = self.black_engine
inactive_engine_name = self.black
active_engine.setposition(self.moves)
movedict = active_engine.bestmove()
bestmove = movedict.get('move')
info = movedict.get('info')
ponder = movedict.get('ponder')
self.moves.append(bestmove)
if info["score"]["eval"] == "mate":
matenum = info["score"]["value"]
if matenum > 0:
self.winner_engine = active_engine
self.winner = active_engine_name
elif matenum < 0:
self.winner_engine = inactive_engine
self.winner = inactive_engine_name
return False
if ponder != '(none)':
return True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bestmove(self):
""" Get proposed best move for current position. @return: dictionary with 'move', 'ponder', 'info' containing best move's UCI notation, ponder value and info dictionary. """
|
self.go()
last_info = ""
while True:
text = self.stdout.readline().strip()
split_text = text.split(' ')
print(text)
if split_text[0] == "info":
last_info = Engine._bestmove_get_info(text)
if split_text[0] == "bestmove":
ponder = None if len(split_text[0]) < 3 else split_text[3]
return {'move': split_text[1],
'ponder': ponder,
'info': last_info}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _bestmove_get_info(text):
""" Parse stockfish evaluation output as dictionary. Examples of input: "info depth 2 seldepth 3 multipv 1 score cp -656 nodes 43 nps 43000 tbhits 0 \ time 1 pv g7g6 h3g3 g6f7" "info depth 10 seldepth 12 multipv 1 score mate 5 nodes 2378 nps 1189000 tbhits 0 \ time 2 pv h3g3 g6f7 g3c7 b5d7 d1d7 f7g6 c7g3 g6h5 e6f4" """
|
result_dict = Engine._get_info_pv(text)
result_dict.update(Engine._get_info_score(text))
single_value_fields = ['depth', 'seldepth', 'multipv', 'nodes', 'nps', 'tbhits', 'time']
for field in single_value_fields:
result_dict.update(Engine._get_info_singlevalue_subfield(text, field))
return result_dict
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def isready(self):
""" Used to synchronize the python engine object with the back-end engine. Sends 'isready' and waits for 'readyok.' """
|
self.put('isready')
while True:
text = self.stdout.readline().strip()
if text == 'readyok':
return text
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def project_activity(index, start, end):
"""Compute the metrics for the project activity section of the enriched github pull requests index. Returns a dictionary containing a "metric" key. This key contains the metrics for this section. :param index: index object :param start: start date to get the data from :param end: end date to get the data upto :return: dictionary with the value of the metrics """
|
results = {
"metrics": [SubmittedPRs(index, start, end),
ClosedPRs(index, start, end)]
}
return results
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def aggregations(self):
"""Get the single valued aggregations with respect to the previous time interval."""
|
prev_month_start = get_prev_month(self.end, self.query.interval_)
self.query.since(prev_month_start)
agg = super().aggregations()
if agg is None:
agg = 0 # None is because NaN in ES. Let's convert to 0
return agg
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def timeseries(self, dataframe=False):
"""Get BMIPR as a time series."""
|
closed_timeseries = self.closed.timeseries(dataframe=dataframe)
opened_timeseries = self.opened.timeseries(dataframe=dataframe)
return calculate_bmi(closed_timeseries, opened_timeseries)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_query(self, evolutionary=False):
""" Basic query to get the metric values :param evolutionary: if True the metric values time series is returned. If False the aggregated metric value. :return: the DSL query to be sent to Elasticsearch """
|
if not evolutionary:
interval = None
offset = None
else:
interval = self.interval
offset = self.offset
if not interval:
raise RuntimeError("Evolutionary query without an interval.")
query = ElasticQuery.get_agg(field=self.FIELD_COUNT,
date_field=self.FIELD_DATE,
start=self.start, end=self.end,
filters=self.esfilters,
agg_type=self.AGG_TYPE,
interval=interval,
offset=offset)
logger.debug("Metric: '%s' (%s); Query: %s",
self.name, self.id, query)
return query
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_list(self):
""" Extract from a DSL aggregated response the values for each bucket :return: a list with the values in a DSL aggregated response """
|
field = self.FIELD_NAME
query = ElasticQuery.get_agg(field=field,
date_field=self.FIELD_DATE,
start=self.start, end=self.end,
filters=self.esfilters)
logger.debug("Metric: '%s' (%s); Query: %s",
self.name, self.id, query)
res = self.get_metrics_data(query)
list_ = {field: [], "value": []}
for bucket in res['aggregations'][str(ElasticQuery.AGGREGATION_ID)]['buckets']:
list_[field].append(bucket['key'])
list_['value'].append(bucket['doc_count'])
return list_
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_metrics_data(self, query):
""" Get the metrics data from Elasticsearch given a DSL query :param query: query to be sent to Elasticsearch :return: a dict with the results of executing the query """
|
if self.es_url.startswith("http"):
url = self.es_url
else:
url = 'http://' + self.es_url
es = Elasticsearch(url)
s = Search(using=es, index=self.es_index)
s = s.update_from_dict(query)
try:
response = s.execute()
return response.to_dict()
except Exception as e:
print()
print("In get_metrics_data: Failed to fetch data.\n Query: {}, \n Error Info: {}"
.format(query, e.info))
raise
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_ts(self):
""" Returns a time series of a specific class A timeseries consists of a unixtime date, labels, some other fields and the data of the specific instantiated class metric per interval. This is built on a hash table. :return: a list with a time series with the values of the metric """
|
query = self.get_query(True)
res = self.get_metrics_data(query)
# Time to convert it to our grimoire timeseries format
ts = {"date": [], "value": [], "unixtime": []}
agg_id = ElasticQuery.AGGREGATION_ID
if 'buckets' not in res['aggregations'][str(agg_id)]:
raise RuntimeError("Aggregation results have no buckets in time series results.")
for bucket in res['aggregations'][str(agg_id)]['buckets']:
ts['date'].append(bucket['key_as_string'])
if str(agg_id + 1) in bucket:
# We have a subaggregation with the value
# If it is percentiles we get the median
if 'values' in bucket[str(agg_id + 1)]:
val = bucket[str(agg_id + 1)]['values']['50.0']
if val == 'NaN':
# ES returns NaN. Convert to None for matplotlib graph
val = None
ts['value'].append(val)
else:
ts['value'].append(bucket[str(agg_id + 1)]['value'])
else:
ts['value'].append(bucket['doc_count'])
# unixtime comes in ms from ElasticSearch
ts['unixtime'].append(bucket['key'] / 1000)
return ts
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_agg(self):
""" Returns the aggregated value for the metric :return: the value of the metric """
|
""" Returns an aggregated value """
query = self.get_query(False)
res = self.get_metrics_data(query)
# We need to extract the data from the JSON res
# If we have agg data use it
agg_id = str(ElasticQuery.AGGREGATION_ID)
if 'aggregations' in res and 'values' in res['aggregations'][agg_id]:
if self.AGG_TYPE == 'median':
agg = res['aggregations'][agg_id]['values']["50.0"]
if agg == 'NaN':
# ES returns NaN. Convert to None for matplotlib graph
agg = None
else:
raise RuntimeError("Multivalue aggregation result not supported")
elif 'aggregations' in res and 'value' in res['aggregations'][agg_id]:
agg = res['aggregations'][agg_id]['value']
else:
agg = res['hits']['total']
return agg
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_trend(self):
""" Get the trend for the last two metric values using the interval defined in the metric :return: a tuple with the metric value for the last interval and the trend percentage between the last two intervals """
|
""" """
# TODO: We just need the last two periods, not the full ts
ts = self.get_ts()
last = ts['value'][len(ts['value']) - 1]
prev = ts['value'][len(ts['value']) - 2]
trend = last - prev
trend_percentage = None
if last == 0:
if prev > 0:
trend_percentage = -100
else:
trend_percentage = 0
else:
trend_percentage = int((trend / last) * 100)
return (last, trend_percentage)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def _load_preset(self, path):
''' load, validate and store a single preset file'''
try:
with open(path, 'r') as f:
presetBody = json.load(f)
except IOError as e:
raise PresetException("IOError: " + e.strerror)
except ValueError as e:
raise PresetException("JSON decoding error: " + str(e))
except Exception as e:
raise PresetException(str(e))
try:
preset = Preset(presetBody)
except PresetException as e:
e.message = "Bad format: " + e.message
raise
if(preset.id in self.presets):
raise PresetException("Duplicate preset id: " + preset.id)
else:
self.presets[preset.id] = preset
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def validate(self, data):
'''
Checks if `data` respects this preset specification
It will check that every required property is present and
for every property type it will make some specific control.
'''
for prop in self.properties:
if prop.id in data:
if prop.type == 'string':
if not isinstance(data[prop.id], basestring):
raise PresetFieldTypeException("property '{}' must be of type string".format(prop.id))
elif prop.type == 'enum':
if not isinstance(data[prop.id], basestring):
raise PresetFieldTypeException("property '{}' must be of type string".format(prop.id))
if data[prop.id] not in prop.values:
raise PresetException("property '{}' can be one of {}".format(prop.id, prop.values))
else:
if prop.required:
raise PresetMissingFieldException("missing required property: '{}'".format(prop.id))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def requestedFormat(request,acceptedFormat):
"""Return the response format requested by client Client could specify requested format using: (options are processed in this order) - `format` field in http request - `Accept` header in http request Example: chooseFormat(request, ['text/html','application/json']) Args: acceptedFormat: list containing all the accepted format Returns: string: the user requested mime-type (if supported) Raises: ValueError: if user request a mime-type not supported """
|
if 'format' in request.args:
fieldFormat = request.args.get('format')
if fieldFormat not in acceptedFormat:
raise ValueError("requested format not supported: "+ fieldFormat)
return fieldFormat
else:
return request.accept_mimetypes.best_match(acceptedFormat)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def routes_collector(gatherer):
"""Decorator utility to collect flask routes in a dictionary. This function together with :func:`add_routes` provides an easy way to split flask routes declaration in multiple modules. :param gatherer: dict in which will be collected routes The decorator provided by this function should be used as the `original flask decorator <http://flask.pocoo.org/docs/latest/api/#flask.Flask.route>`_ example:: routes = [] route = routes_collector(routes) @route('/volumes/', methods=['GET', 'POST']) def volumes():
return 'page body' After you've collected your routes you can use :func:`add_routes` to register them onto the main blueprint/flask_app. """
|
def hatFunc(rule, **options):
def decorator(f):
rule_dict = {'rule':rule, 'view_func':f}
rule_dict.update(options)
gatherer.append(rule_dict)
return decorator
return hatFunc
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_routes(fapp, routes, prefix=""):
"""Batch routes registering Register routes to a blueprint/flask_app previously collected with :func:`routes_collector`. :param fapp: bluprint or flask_app to whom attach new routes. :param routes: dict of routes collected by :func:`routes_collector` :param prefix: url prefix under which register all routes """
|
for r in routes:
r['rule'] = prefix + r['rule']
fapp.add_url_rule(**r)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def get_centered_pagination(current, total, visible=5):
''' Return the range of pages to render in a pagination menu.
The current page is always kept in the middle except
for the edge cases.
Reeturns a dict
{ prev, first, current, last, next }
:param current: the current page
:param total: total number of pages available
:param visible: number of pages visible
'''
inc = visible/2
first = current - inc
last = current + inc
if (total <= visible):
first = 1
last = total
elif (last > total):
first = total - (visible-1)
last = total
elif (first < 1):
first = 1
last = visible
return dict(prev = current-1 if(current > 1) else None,
first=first,
current = current,
last=last,
next = current+1 if(current < total) else None)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fwhm(x, y, k=10):
# http://stackoverflow.com/questions/10582795/finding-the-full-width-half-maximum-of-a-peak """ Determine full-with-half-maximum of a peaked set of points, x and y. Assumes that there is only one peak present in the datasset. The function uses a spline interpolation of order k. """
|
class MultiplePeaks(Exception):
pass
class NoPeaksFound(Exception):
pass
half_max = np.amax(y) / 2.0
s = splrep(x, y - half_max)
roots = sproot(s)
if len(roots) > 2:
raise MultiplePeaks("The dataset appears to have multiple peaks, and "
"thus the FWHM can't be determined.")
elif len(roots) < 2:
raise NoPeaksFound("No proper peaks were found in the data set; likely "
"the dataset is flat (e.g. all zeros).")
else:
return roots[0], roots[1]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main(arguments):
""" Main function of smatch score calculation """
|
global verbose
global veryVerbose
global iteration_num
global single_score
global pr_flag
global match_triple_dict
# set the iteration number
# total iteration number = restart number + 1
iteration_num = arguments.r + 1
if arguments.ms:
single_score = False
if arguments.v:
verbose = True
if arguments.vv:
veryVerbose = True
if arguments.pr:
pr_flag = True
# significant digits to print out
floatdisplay = "%%.%df" % arguments.significant
for (precision, recall, best_f_score) in score_amr_pairs(args.f[0], args.f[1],
justinstance=arguments.justinstance,
justattribute=arguments.justattribute,
justrelation=arguments.justrelation):
# print("Sentence", sent_num)
if pr_flag:
print("Precision: " + floatdisplay % precision)
print("Recall: " + floatdisplay % recall)
print("F-score: " + floatdisplay % best_f_score)
args.f[0].close()
args.f[1].close()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def normalize_volume(volume):
'''convert volume metadata from es to archivant format
This function makes side effect on input volume
output example::
{
'id': 'AU0paPZOMZchuDv1iDv8',
'type': 'volume',
'metadata': {'_language': 'en',
'key1': 'value1',
'key2': 'value2',
'key3': 'value3'},
'attachments': [{'id': 'a910e1kjdo2d192d1dko1p2kd1209d',
'type' : 'attachment',
'url': 'fsdb:///624bffa8a6f90813b7982d0e5b4c1475ebec40e3',
'metadata': {'download_count': 0,
'mime': 'application/json',
'name': 'tmp9fyat_',
'notes': 'this file is awsome',
'sha1': '624bffa8a6f90813b7982d0e5b4c1475ebec40e3',
'size': 10}
}]
}
'''
res = dict()
res['type'] = 'volume'
res['id'] = volume['_id']
if '_score' in volume:
res['score'] = volume['_score']
source = volume['_source']
attachments = source['_attachments']
del(source['_attachments'])
del(source['_text_' + source['_language']])
res['metadata'] = source
atts = list()
for attachment in attachments:
atts.append(Archivant.normalize_attachment(attachment))
res['attachments'] = atts
return res
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def normalize_attachment(attachment):
''' Convert attachment metadata from es to archivant format
This function makes side effect on input attachment
'''
res = dict()
res['type'] = 'attachment'
res['id'] = attachment['id']
del(attachment['id'])
res['url'] = attachment['url']
del(attachment['url'])
res['metadata'] = attachment
return res
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def denormalize_volume(volume):
'''convert volume metadata from archivant to es format'''
id = volume.get('id', None)
res = dict()
res.update(volume['metadata'])
denorm_attachments = list()
for a in volume['attachments']:
denorm_attachments.append(Archivant.denormalize_attachment(a))
res['_attachments'] = denorm_attachments
return id, res
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def denormalize_attachment(attachment):
'''convert attachment metadata from archivant to es format'''
res = dict()
ext = ['id', 'url']
for k in ext:
if k in attachment['metadata']:
raise ValueError("metadata section could not contain special key '{}'".format(k))
res[k] = attachment[k]
res.update(attachment['metadata'])
return res
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def iter_all_volumes(self):
'''iterate over all stored volumes'''
for raw_volume in self._db.iterate_all():
v = self.normalize_volume(raw_volume)
del v['score']
yield v
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def delete_attachments(self, volumeID, attachmentsID):
''' delete attachments from a volume '''
log.debug("deleting attachments from volume '{}': {}".format(volumeID, attachmentsID))
rawVolume = self._req_raw_volume(volumeID)
insID = [a['id'] for a in rawVolume['_source']['_attachments']]
# check that all requested file are present
for id in attachmentsID:
if id not in insID:
raise NotFoundException("could not found attachment '{}' of the volume '{}'".format(id, volumeID))
for index, id in enumerate(attachmentsID):
rawVolume['_source']['_attachments'].pop(insID.index(id))
self._db.modify_book(volumeID, rawVolume['_source'], version=rawVolume['_version'])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def insert_attachments(self, volumeID, attachments):
''' add attachments to an already existing volume '''
log.debug("adding new attachments to volume '{}': {}".format(volumeID, attachments))
if not attachments:
return
rawVolume = self._req_raw_volume(volumeID)
attsID = list()
for index, a in enumerate(attachments):
try:
rawAttachment = self._assemble_attachment(a['file'], a)
rawVolume['_source']['_attachments'].append(rawAttachment)
attsID.append(rawAttachment['id'])
except Exception:
log.exception("Error while elaborating attachments array at index: {}".format(index))
raise
self._db.modify_book(volumeID, rawVolume['_source'], version=rawVolume['_version'])
return attsID
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def insert_volume(self, metadata, attachments=[]):
'''Insert a new volume
Returns the ID of the added volume
`metadata` must be a dict containg metadata of the volume::
{
"_language" : "it", # language of the metadata
"key1" : "value1", # attribute
"key2" : "value2",
...
"keyN" : "valueN"
}
The only required key is `_language`
`attachments` must be an array of dict::
{
"file" : "/prova/una/path/a/caso" # path or fp
"name" : "nome_buffo.ext" # name of the file (extension included) [optional if a path was given]
"mime" : "application/json" # mime type of the file [optional]
"notes" : "this file is awesome" # notes that will be attached to this file [optional]
}
'''
log.debug("adding new volume:\n\tdata: {}\n\tfiles: {}".format(metadata, attachments))
requiredFields = ['_language']
for requiredField in requiredFields:
if requiredField not in metadata:
raise KeyError("Required field '{}' is missing".format(requiredField))
volume = deepcopy(metadata)
attsData = []
for index, a in enumerate(attachments):
try:
attData = self._assemble_attachment(a['file'], a)
attsData.append(attData)
except Exception:
log.exception("Error while elaborating attachments array at index: {}".format(index))
raise
volume['_attachments'] = attsData
log.debug('constructed volume for insertion: {}'.format(volume))
addedVolume = self._db.add_book(body=volume)
log.debug("added new volume: '{}'".format(addedVolume['_id']))
return addedVolume['_id']
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def _assemble_attachment(self, file, metadata):
''' store file and return a dict containing assembled metadata
param `file` must be a path or a File Object
param `metadata` must be a dict:
{
"name" : "nome_buffo.ext" # name of the file (extension included) [optional if a path was given]
"mime" : "application/json" # mime type of the file [optional]
"notes" : "this file is awesome" # notes about this file [optional]
}
'''
res = dict()
if isinstance(file, basestring) and os.path.isfile(file):
res['name'] = metadata['name'] if 'name' in metadata else os.path.basename(file)
res['size'] = os.path.getsize(file)
res['sha1'] = calc_file_digest(file, algorithm="sha1")
elif hasattr(file, 'read') and hasattr(file, 'seek'):
if 'name' in metadata and metadata['name']:
res['name'] = metadata['name']
elif hasattr(file, 'name'):
file['name'] = file.name
else:
raise ValueError("Could not assign a name to the file")
old_position = file.tell()
file.seek(0, os.SEEK_END)
res['size'] = file.tell() - old_position
file.seek(old_position, os.SEEK_SET)
res['sha1'] = calc_digest(file, algorithm="sha1")
file.seek(old_position, os.SEEK_SET)
else:
raise ValueError("Unsupported file value type: {}".format(type(file)))
res['id'] = uuid4().hex
res['mime'] = metadata['mime'] if 'mime' in metadata else None
res['notes'] = metadata['notes'] if 'notes' in metadata else ""
res['download_count'] = 0
fsdb_id = self._fsdb.add(file)
res['url'] = "fsdb:///" + fsdb_id
return res
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def update_volume(self, volumeID, metadata):
'''update existing volume metadata
the given metadata will substitute the old one
'''
log.debug('updating volume metadata: {}'.format(volumeID))
rawVolume = self._req_raw_volume(volumeID)
normalized = self.normalize_volume(rawVolume)
normalized['metadata'] = metadata
_, newRawVolume = self.denormalize_volume(normalized)
self._db.modify_book(volumeID, newRawVolume)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def update_attachment(self, volumeID, attachmentID, metadata):
'''update an existing attachment
the given metadata dict will be merged with the old one.
only the following fields could be updated:
[name, mime, notes, download_count]
'''
log.debug('updating metadata of attachment {} from volume {}'.format(attachmentID, volumeID))
modifiable_fields = ['name', 'mime', 'notes', 'download_count']
for k in metadata.keys():
if k not in modifiable_fields:
raise ValueError('Not modifiable field given: {}'.format(k))
if 'name' in metadata and not isinstance(metadata['name'], basestring):
raise ValueError("'name' must be a string")
if 'mime' in metadata and not isinstance(metadata['mime'], basestring):
raise ValueError("'mime' must be a string")
if 'notes' in metadata and not isinstance(metadata['notes'], basestring):
raise ValueError("'notes' must be a string")
if 'download_count' in metadata and not isinstance(metadata['download_count'], Integral):
raise ValueError("'download_count' must be a number")
rawVolume = self._req_raw_volume(volumeID)
for attachment in rawVolume['_source']['_attachments']:
if attachment['id'] == attachmentID:
attachment.update(metadata)
self._db.modify_book(id=volumeID, body=rawVolume['_source'], version=rawVolume['_version'])
return
raise NotFoundException('Could not found attachment with id {} in volume {}'.format(attachmentID, volumeID))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def dangling_files(self):
'''iterate over fsdb files no more attached to any volume'''
for fid in self._fsdb:
if not self._db.file_is_attached('fsdb:///' + fid):
yield fid
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_string(data, position, obj_end, dummy):
"""Decode a BSON string to python unicode string."""
|
length = _UNPACK_INT(data[position:position + 4])[0]
position += 4
if length < 1 or obj_end - position < length:
raise InvalidBSON("invalid string length")
end = position + length - 1
if data[end:end + 1] != b"\x00":
raise InvalidBSON("invalid end of string")
return _utf_8_decode(data[position:end], None, True)[0], end + 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.