text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Generate a schema dict of size `size` using library `lib`. <END_TASK> <USER_TASK:> Description: def generate_dict_schema(size, valid): """ Generate a schema dict of size `size` using library `lib`. In addition, it returns samples generator :param size: Schema size :type size: int :param samples: The number of samples to generate :type samples: int :param valid: Generate valid samples? :type valid: bool :returns """
schema = {} generator_items = [] # Generate schema for i in range(0, size): while True: key_schema, key_generator = generate_random_schema(valid) if key_schema not in schema: break value_schema, value_generator = generate_random_schema(valid) schema[key_schema] = value_schema generator_items.append((key_generator, value_generator)) # Samples generator = ({next(k_gen): next(v_gen) for k_gen, v_gen in generator_items} for i in itertools.count()) # Finish return schema, generator
<SYSTEM_TASK:> Calculate q under the null hypothesis of whiteness. <END_TASK> <USER_TASK:> Description: def _calc_q_h0(n, x, h, nt, n_jobs=1, verbose=0, random_state=None): """Calculate q under the null hypothesis of whiteness. """
rng = check_random_state(random_state) par, func = parallel_loop(_calc_q_statistic, n_jobs, verbose) q = par(func(rng.permutation(x.T).T, h, nt) for _ in range(n)) return np.array(q)
<SYSTEM_TASK:> Determine VAR model from autocorrelation matrices by solving the <END_TASK> <USER_TASK:> Description: def from_yw(self, acms): """Determine VAR model from autocorrelation matrices by solving the Yule-Walker equations. Parameters ---------- acms : array, shape (n_lags, n_channels, n_channels) acms[l] contains the autocorrelation matrix at lag l. The highest lag must equal the model order. Returns ------- self : :class:`VAR` The :class:`VAR` object to facilitate method chaining (see usage example). """
if len(acms) != self.p + 1: raise ValueError("Number of autocorrelation matrices ({}) does not" " match model order ({}) + 1.".format(len(acms), self.p)) n_channels = acms[0].shape[0] acm = lambda l: acms[l] if l >= 0 else acms[-l].T r = np.concatenate(acms[1:], 0) rr = np.array([[acm(m-k) for k in range(self.p)] for m in range(self.p)]) rr = np.concatenate(np.concatenate(rr, -2), -1) c = sp.linalg.solve(rr, r) # calculate residual covariance r = acm(0) for k in range(self.p): bs = k * n_channels r -= np.dot(c[bs:bs + n_channels, :].T, acm(k + 1)) self.coef = np.concatenate([c[m::n_channels, :] for m in range(n_channels)]).T self.rescov = r return self
<SYSTEM_TASK:> Predict samples on actual data. <END_TASK> <USER_TASK:> Description: def predict(self, data): """Predict samples on actual data. The result of this function is used for calculating the residuals. Parameters ---------- data : array, shape (trials, channels, samples) or (channels, samples) Epoched or continuous data set. Returns ------- predicted : array, shape `data`.shape Data as predicted by the VAR model. Notes ----- Residuals are obtained by r = x - var.predict(x) """
data = atleast_3d(data) t, m, l = data.shape p = int(np.shape(self.coef)[1] / m) y = np.zeros(data.shape) if t > l - p: # which takes less loop iterations for k in range(1, p + 1): bp = self.coef[:, (k - 1)::p] for n in range(p, l): y[:, :, n] += np.dot(data[:, :, n - k], bp.T) else: for k in range(1, p + 1): bp = self.coef[:, (k - 1)::p] for s in range(t): y[s, :, p:] += np.dot(bp, data[s, :, (p - k):(l - k)]) return y
<SYSTEM_TASK:> Test if VAR model is stable. <END_TASK> <USER_TASK:> Description: def is_stable(self): """Test if VAR model is stable. This function tests stability of the VAR model as described in [1]_. Returns ------- out : bool True if the model is stable. References ---------- .. [1] H. Lütkepohl, "New Introduction to Multiple Time Series Analysis", 2005, Springer, Berlin, Germany. """
m, mp = self.coef.shape p = mp // m assert(mp == m * p) # TODO: replace with raise? top_block = [] for i in range(p): top_block.append(self.coef[:, i::p]) top_block = np.hstack(top_block) im = np.eye(m) eye_block = im for i in range(p - 2): eye_block = sp.linalg.block_diag(im, eye_block) eye_block = np.hstack([eye_block, np.zeros((m * (p - 1), m))]) tmp = np.vstack([top_block, eye_block]) return np.all(np.abs(np.linalg.eig(tmp)[0]) < 1)
<SYSTEM_TASK:> Fetch example dataset. <END_TASK> <USER_TASK:> Description: def fetch(dataset="mi", datadir=datadir): """Fetch example dataset. If the requested dataset is not found in the location specified by `datadir`, the function attempts to download it. Parameters ---------- dataset : str Which dataset to load. Currently only 'mi' is supported. datadir : str Path to the storage location of example datasets. Datasets are downloaded to this location if they cannot be found. If the directory does not exist it is created. Returns ------- data : list of dicts The data set is stored in a list, where each list element corresponds to data from one subject. Each list element is a dictionary with the following keys: "eeg" ... EEG signals "triggers" ... Trigger latencies "labels" ... Class labels "fs" ... Sample rate "locations" ... Channel locations """
if dataset not in datasets: raise ValueError("Example data '{}' not available.".format(dataset)) else: files = datasets[dataset]["files"] url = datasets[dataset]["url"] md5 = datasets[dataset]["md5"] if not isdir(datadir): makedirs(datadir) data = [] for n, filename in enumerate(files): fullfile = join(datadir, filename) if not isfile(fullfile): with open(fullfile, "wb") as f: response = get(join(url, filename)) f.write(response.content) with open(fullfile, "rb") as f: # check if MD5 of downloaded file matches original hash hash = hashlib.md5(f.read()).hexdigest() if hash != md5[n]: raise MD5MismatchError("MD5 hash of {} does not match {}.".format(fullfile, md5[n])) data.append(convert(dataset, loadmat(fullfile))) return data
<SYSTEM_TASK:> Test whether this schema supports Undefined. <END_TASK> <USER_TASK:> Description: def supports_undefined(self): """ Test whether this schema supports Undefined. A Schema that supports `Undefined`, when given `Undefined`, should return some value (other than `Undefined`) without raising errors. This is designed to support a very special case like that: ```python Schema(Default(0)).supports_undefined #-> True ``` This way a validator can declare that it has a default in case no value was provided, and this case happens when: 1. A [`Required`](#required) mapping key was not provided, and it's mapped to `Default()` 2. .. no more supported cases. Yet. :rtype: bool """
# Test try: yes = self(const.UNDEFINED) is not const.UNDEFINED except (Invalid, SchemaError): yes = False # Remember (lame @cached_property) self.__dict__['supports_undefined'] = yes return yes
<SYSTEM_TASK:> Get schema type for the argument <END_TASK> <USER_TASK:> Description: def get_schema_type(cls, schema): """ Get schema type for the argument :param schema: Schema to analyze :return: COMPILED_TYPE constant :rtype: str|None """
schema_type = type(schema) # Marker if issubclass(schema_type, markers.Marker): return const.COMPILED_TYPE.MARKER # Marker Type elif issubclass(schema_type, six.class_types) and issubclass(schema, markers.Marker): return const.COMPILED_TYPE.MARKER # CompiledSchema elif isinstance(schema, CompiledSchema): return const.COMPILED_TYPE.SCHEMA else: return primitive_type(schema)
<SYSTEM_TASK:> Get priority for this Schema. <END_TASK> <USER_TASK:> Description: def priority(self): """ Get priority for this Schema. Used to sort mapping keys :rtype: int """
# Markers have priority set on the class if self.compiled_type == const.COMPILED_TYPE.MARKER: return self.compiled.priority # Other types have static priority return const.compiled_type_priorities[self.compiled_type]
<SYSTEM_TASK:> Sort the provided list of schemas according to their priority. <END_TASK> <USER_TASK:> Description: def sort_schemas(cls, schemas_list): """ Sort the provided list of schemas according to their priority. This also supports markers, and markers of a single type are also sorted according to the priority of the wrapped schema. :type schemas_list: list[CompiledSchema] :rtype: list[CompiledSchema] """
return sorted(schemas_list, key=lambda x: ( # Top-level priority: # priority of the schema itself x.priority, # Second-level priority (for markers of the common type) # This ensures that Optional(1) always goes before Optional(int) x.compiled.key_schema.priority if x.compiled_type == const.COMPILED_TYPE.MARKER else 0 ), reverse=True)
<SYSTEM_TASK:> Compile a sub-schema <END_TASK> <USER_TASK:> Description: def sub_compile(self, schema, path=None, matcher=False): """ Compile a sub-schema :param schema: Validation schema :type schema: * :param path: Path to this schema, if any :type path: list|None :param matcher: Compile a matcher? :type matcher: bool :rtype: CompiledSchema """
return type(self)( schema, self.path + (path or []), None, None, matcher )
<SYSTEM_TASK:> Helper for Invalid errors. <END_TASK> <USER_TASK:> Description: def Invalid(self, message, expected): """ Helper for Invalid errors. Typical use: err_type = self.Invalid(_(u'Message'), self.name) raise err_type(<provided-value>) Note: `provided` and `expected` are unicode-typecasted automatically :type message: unicode :type expected: unicode """
def InvalidPartial(provided, path=None, **info): """ Create an Invalid exception :type provided: unicode :type path: list|None :rtype: Invalid """ return Invalid( message, expected, #six.text_type(expected), # -- must be unicode provided, #six.text_type(provided), # -- must be unicode self.path + (path or []), self.schema, **info ) return InvalidPartial
<SYSTEM_TASK:> Get compiler method for the provided schema <END_TASK> <USER_TASK:> Description: def get_schema_compiler(self, schema): """ Get compiler method for the provided schema :param schema: Schema to analyze :return: Callable compiled :rtype: callable|None """
# Schema type schema_type = self.get_schema_type(schema) if schema_type is None: return None # Compiler compilers = { const.COMPILED_TYPE.LITERAL: self._compile_literal, const.COMPILED_TYPE.TYPE: self._compile_type, const.COMPILED_TYPE.SCHEMA: self._compile_schema, const.COMPILED_TYPE.ENUM: self._compile_enum, const.COMPILED_TYPE.CALLABLE: self._compile_callable, const.COMPILED_TYPE.ITERABLE: self._compile_iterable, const.COMPILED_TYPE.MAPPING: self._compile_mapping, const.COMPILED_TYPE.MARKER: self._compile_marker, } return compilers[schema_type]
<SYSTEM_TASK:> Compile the current schema into a callable validator <END_TASK> <USER_TASK:> Description: def compile_schema(self, schema): """ Compile the current schema into a callable validator :return: Callable validator :rtype: callable :raises SchemaError: Schema compilation error """
compiler = self.get_schema_compiler(schema) if compiler is None: raise SchemaError(_(u'Unsupported schema data type {!r}').format(type(schema).__name__)) return compiler(schema)
<SYSTEM_TASK:> Compile another schema <END_TASK> <USER_TASK:> Description: def _compile_schema(self, schema): """ Compile another schema """
assert self.matcher == schema.matcher self.name = schema.name self.compiled_type = schema.compiled_type return schema.compiled
<SYSTEM_TASK:> This function should be called instead of direct spio.loadmat <END_TASK> <USER_TASK:> Description: def loadmat(filename): """This function should be called instead of direct spio.loadmat as it cures the problem of not properly recovering python dictionaries from mat files. It calls the function check keys to cure all entries which are still mat-objects """
data = sploadmat(filename, struct_as_record=False, squeeze_me=True) return _check_keys(data)
<SYSTEM_TASK:> checks if entries in dictionary are mat-objects. If yes <END_TASK> <USER_TASK:> Description: def _check_keys(dictionary): """ checks if entries in dictionary are mat-objects. If yes todict is called to change them to nested dictionaries """
for key in dictionary: if isinstance(dictionary[key], matlab.mio5_params.mat_struct): dictionary[key] = _todict(dictionary[key]) return dictionary
<SYSTEM_TASK:> a recursive function which constructs from matobjects nested dictionaries <END_TASK> <USER_TASK:> Description: def _todict(matobj): """ a recursive function which constructs from matobjects nested dictionaries """
dictionary = {} #noinspection PyProtectedMember for strg in matobj._fieldnames: elem = matobj.__dict__[strg] if isinstance(elem, matlab.mio5_params.mat_struct): dictionary[strg] = _todict(elem) else: dictionary[strg] = elem return dictionary
<SYSTEM_TASK:> Source decomposition with ICA. <END_TASK> <USER_TASK:> Description: def plainica(x, reducedim=0.99, backend=None, random_state=None): """ Source decomposition with ICA. Apply ICA to the data x, with optional PCA dimensionality reduction. Parameters ---------- x : array, shape (n_trials, n_channels, n_samples) or (n_channels, n_samples) data set reducedim : {int, float, 'no_pca'}, optional A number of less than 1 in interpreted as the fraction of variance that should remain in the data. All components that describe in total less than `1-reducedim` of the variance are removed by the PCA step. An integer numer of 1 or greater is interpreted as the number of components to keep after applying the PCA. If set to 'no_pca' the PCA step is skipped. backend : dict-like, optional Specify backend to use. When set to None the backend configured in config.backend is used. Returns ------- result : ResultICA Source decomposition """
x = atleast_3d(x) t, m, l = np.shape(x) if backend is None: backend = scotbackend # pre-transform the data with PCA if reducedim == 'no pca': c = np.eye(m) d = np.eye(m) xpca = x else: c, d, xpca = backend['pca'](x, reducedim) # run on residuals ICA to estimate volume conduction mx, ux = backend['ica'](cat_trials(xpca), random_state=random_state) # correct (un)mixing matrix estimatees mx = mx.dot(d) ux = c.dot(ux) class Result: unmixing = ux mixing = mx return Result
<SYSTEM_TASK:> Calculate mean squared generalization error and its gradient for <END_TASK> <USER_TASK:> Description: def _msge_with_gradient_underdetermined(data, delta, xvschema, skipstep, p): """Calculate mean squared generalization error and its gradient for underdetermined equation system. """
t, m, l = data.shape d = None j, k = 0, 0 nt = np.ceil(t / skipstep) for trainset, testset in xvschema(t, skipstep): a, b = _construct_var_eqns(atleast_3d(data[trainset, :, :]), p) c, d = _construct_var_eqns(atleast_3d(data[testset, :, :]), p) e = sp.linalg.inv(np.eye(a.shape[0]) * delta ** 2 + a.dot(a.T)) cc = c.transpose().dot(c) be = b.transpose().dot(e) bee = be.dot(e) bea = be.dot(a) beea = bee.dot(a) beacc = bea.dot(cc) dc = d.transpose().dot(c) j += np.sum(beacc * bea - 2 * bea * dc) + np.sum(d ** 2) k += np.sum(beea * dc - beacc * beea) * 4 * delta return j / (nt * d.size), k / (nt * d.size)
<SYSTEM_TASK:> Calculate mean squared generalization error and its gradient for <END_TASK> <USER_TASK:> Description: def _msge_with_gradient_overdetermined(data, delta, xvschema, skipstep, p): """Calculate mean squared generalization error and its gradient for overdetermined equation system. """
t, m, l = data.shape d = None l, k = 0, 0 nt = np.ceil(t / skipstep) for trainset, testset in xvschema(t, skipstep): a, b = _construct_var_eqns(atleast_3d(data[trainset, :, :]), p) c, d = _construct_var_eqns(atleast_3d(data[testset, :, :]), p) e = sp.linalg.inv(np.eye(a.shape[1]) * delta ** 2 + a.T.dot(a)) ba = b.transpose().dot(a) dc = d.transpose().dot(c) bae = ba.dot(e) baee = bae.dot(e) baecc = bae.dot(c.transpose().dot(c)) l += np.sum(baecc * bae - 2 * bae * dc) + np.sum(d ** 2) k += np.sum(baee * dc - baecc * baee) * 4 * delta return l / (nt * d.size), k / (nt * d.size)
<SYSTEM_TASK:> Calculate mean squared generalization error and its gradient, <END_TASK> <USER_TASK:> Description: def _get_msge_with_gradient(data, delta, xvschema, skipstep, p): """Calculate mean squared generalization error and its gradient, automatically selecting the best function. """
t, m, l = data.shape n = (l - p) * t underdetermined = n < m * p if underdetermined: return _msge_with_gradient_underdetermined(data, delta, xvschema, skipstep, p) else: return _msge_with_gradient_overdetermined(data, delta, xvschema, skipstep, p)
<SYSTEM_TASK:> Determine optimal model order by minimizing the mean squared <END_TASK> <USER_TASK:> Description: def optimize_order(self, data, min_p=1, max_p=None): """Determine optimal model order by minimizing the mean squared generalization error. Parameters ---------- data : array, shape (n_trials, n_channels, n_samples) Epoched data set on which to optimize the model order. At least two trials are required. min_p : int Minimal model order to check. max_p : int Maximum model order to check """
data = np.asarray(data) if data.shape[0] < 2: raise ValueError("At least two trials are required.") msge, prange = [], [] par, func = parallel_loop(_get_msge_with_gradient, n_jobs=self.n_jobs, verbose=self.verbose) if self.n_jobs is None: npar = 1 elif self.n_jobs < 0: npar = 4 # is this a sane default? else: npar = self.n_jobs p = min_p while True: result = par(func(data, self.delta, self.xvschema, 1, p_) for p_ in range(p, p + npar)) j, k = zip(*result) prange.extend(range(p, p + npar)) msge.extend(j) p += npar if max_p is None: if len(msge) >= 2 and msge[-1] > msge[-2]: break else: if prange[-1] >= max_p: i = prange.index(max_p) + 1 prange = prange[:i] msge = msge[:i] break self.p = prange[np.argmin(msge)] return zip(prange, msge)
<SYSTEM_TASK:> Initialize from euclidean vector <END_TASK> <USER_TASK:> Description: def fromvector(cls, v): """Initialize from euclidean vector"""
w = v.normalized() return cls(w.x, w.y, w.z)
<SYSTEM_TASK:> Distance to other points on the sphere <END_TASK> <USER_TASK:> Description: def distances(self, points): """Distance to other points on the sphere"""
return [math.acos(self._pos3d.dot(p.vector)) for p in points]
<SYSTEM_TASK:> Initialize from iterable <END_TASK> <USER_TASK:> Description: def fromiterable(cls, itr): """Initialize from iterable"""
x, y, z = itr return cls(x, y, z)
<SYSTEM_TASK:> Squared norm of the vector <END_TASK> <USER_TASK:> Description: def norm2(self): """Squared norm of the vector"""
return self.x * self.x + self.y * self.y + self.z * self.z
<SYSTEM_TASK:> rotate l radians around axis u <END_TASK> <USER_TASK:> Description: def rotate(self, l, u): """rotate l radians around axis u"""
cl = math.cos(l) sl = math.sin(l) x = (cl + u.x * u.x * (1 - cl)) * self.x + (u.x * u.y * (1 - cl) - u.z * sl) * self.y + ( u.x * u.z * (1 - cl) + u.y * sl) * self.z y = (u.y * u.x * (1 - cl) + u.z * sl) * self.x + (cl + u.y * u.y * (1 - cl)) * self.y + ( u.y * u.z * (1 - cl) - u.x * sl) * self.z z = (u.z * u.x * (1 - cl) - u.y * sl) * self.x + (u.z * u.y * (1 - cl) + u.x * sl) * self.y + ( cl + u.z * u.z * (1 - cl)) * self.z self.x, self.y, self.z = x, y, z return self
<SYSTEM_TASK:> Implementation of the Cuthill-McKee algorithm. <END_TASK> <USER_TASK:> Description: def cuthill_mckee(matrix): """Implementation of the Cuthill-McKee algorithm. Permute a symmetric binary matrix into a band matrix form with a small bandwidth. Parameters ---------- matrix : ndarray, dtype=bool, shape = [n, n] The matrix is internally converted to a symmetric matrix by setting each element [i,j] to True if either [i,j] or [j,i] evaluates to true. Returns ------- order : list of int Permutation intices Examples -------- >>> A = np.array([[0,0,1,1], [0,0,0,0], [1,0,1,0], [1,0,0,0]]) >>> p = cuthill_mckee(A) >>> A array([[0, 0, 1, 1], [0, 0, 0, 0], [1, 0, 1, 0], [1, 0, 0, 0]]) >>> A[p,:][:,p] array([[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 1]]) """
matrix = np.atleast_2d(matrix) n, m = matrix.shape assert(n == m) # make sure the matrix is really symmetric. This is equivalent to # converting a directed adjacency matrix into a undirected adjacency matrix. matrix = np.logical_or(matrix, matrix.T) degree = np.sum(matrix, 0) order = [np.argmin(degree)] for i in range(n): adj = np.nonzero(matrix[order[i]])[0] adj = [a for a in adj if a not in order] if not adj: idx = [i for i in range(n) if i not in order] order.append(idx[np.argmin(degree[idx])]) else: if len(adj) == 1: order.append(adj[0]) else: adj = np.asarray(adj) i = adj[np.argsort(degree[adj])] order.extend(i.tolist()) if len(order) == n: break return order
<SYSTEM_TASK:> Calculate connectivity measures. <END_TASK> <USER_TASK:> Description: def connectivity(measure_names, b, c=None, nfft=512): """Calculate connectivity measures. Parameters ---------- measure_names : str or list of str Name(s) of the connectivity measure(s) to calculate. See :class:`Connectivity` for supported measures. b : array, shape (n_channels, n_channels * model_order) VAR model coefficients. See :ref:`var-model-coefficients` for details about the arrangement of coefficients. c : array, shape (n_channels, n_channels), optional Covariance matrix of the driving noise process. Identity matrix is used if set to None (default). nfft : int, optional Number of frequency bins to calculate. Note that these points cover the range between 0 and half the sampling rate. Returns ------- result : array, shape (n_channels, n_channels, `nfft`) An array of shape (m, m, nfft) is returned if measures is a string. If measures is a list of strings, a dictionary is returned, where each key is the name of the measure, and the corresponding values are arrays of shape (m, m, nfft). Notes ----- When using this function, it is more efficient to get several measures at once than calling the function multiple times. Examples -------- >>> c = connectivity(['DTF', 'PDC'], [[0.3, 0.6], [0.0, 0.9]]) """
con = Connectivity(b, c, nfft) try: return getattr(con, measure_names)() except TypeError: return dict((m, getattr(con, m)()) for m in measure_names)
<SYSTEM_TASK:> Partial coherence. <END_TASK> <USER_TASK:> Description: def pCOH(self): """Partial coherence. .. math:: \mathrm{pCOH}_{ij}(f) = \\frac{G_{ij}(f)} {\sqrt{G_{ii}(f) G_{jj}(f)}} References ---------- P. J. Franaszczuk, K. J. Blinowska, M. Kowalczyk. The application of parametric multichannel spectral estimates in the study of electrical brain activity. Biol. Cybernetics 51(4): 239-247, 1985. """
G = self.G() # TODO: can we do that more efficiently? return G / np.sqrt(np.einsum('ii..., jj... ->ij...', G, G))
<SYSTEM_TASK:> Partial directed coherence. <END_TASK> <USER_TASK:> Description: def PDC(self): """Partial directed coherence. .. math:: \mathrm{PDC}_{ij}(f) = \\frac{A_{ij}(f)} {\sqrt{A_{:j}'(f) A_{:j}(f)}} References ---------- L. A. Baccalá, K. Sameshima. Partial directed coherence: a new concept in neural structure determination. Biol. Cybernetics 84(6): 463-474, 2001. """
A = self.A() return np.abs(A / np.sqrt(np.sum(A.conj() * A, axis=0, keepdims=True)))
<SYSTEM_TASK:> Full frequency partial directed coherence. <END_TASK> <USER_TASK:> Description: def ffPDC(self): """Full frequency partial directed coherence. .. math:: \mathrm{ffPDC}_{ij}(f) = \\frac{A_{ij}(f)}{\sqrt{\sum_f A_{:j}'(f) A_{:j}(f)}} """
A = self.A() return np.abs(A * self.nfft / np.sqrt(np.sum(A.conj() * A, axis=(0, 2), keepdims=True)))
<SYSTEM_TASK:> Partial directed coherence factor. <END_TASK> <USER_TASK:> Description: def PDCF(self): """Partial directed coherence factor. .. math:: \mathrm{PDCF}_{ij}(f) = \\frac{A_{ij}(f)}{\sqrt{A_{:j}'(f) \mathbf{C}^{-1} A_{:j}(f)}} References ---------- L. A. Baccalá, K. Sameshima. Partial directed coherence: a new concept in neural structure determination. Biol. Cybernetics 84(6): 463-474, 2001. """
A = self.A() # TODO: can we do that more efficiently? return np.abs(A / np.sqrt(np.einsum('aj..., ab..., bj... ->j...', A.conj(), self.Cinv(), A)))
<SYSTEM_TASK:> Generalized partial directed coherence. <END_TASK> <USER_TASK:> Description: def GPDC(self): """Generalized partial directed coherence. .. math:: \mathrm{GPDC}_{ij}(f) = \\frac{|A_{ij}(f)|} {\sigma_i \sqrt{A_{:j}'(f) \mathrm{diag}(\mathbf{C})^{-1} A_{:j}(f)}} References ---------- L. Faes, S. Erla, G. Nollo. Measuring connectivity in linear multivariate processes: definitions, interpretation, and practical analysis. Comput. Math. Meth. Med. 2012: 140513, 2012. """
A = self.A() tmp = A / np.sqrt(np.einsum('aj..., a..., aj..., ii... ->ij...', A.conj(), 1 / np.diag(self.c), A, self.c)) return np.abs(tmp)
<SYSTEM_TASK:> Directed transfer function. <END_TASK> <USER_TASK:> Description: def DTF(self): """Directed transfer function. .. math:: \mathrm{DTF}_{ij}(f) = \\frac{H_{ij}(f)} {\sqrt{H_{i:}(f) H_{i:}'(f)}} References ---------- M. J. Kaminski, K. J. Blinowska. A new method of the description of the information flow in the brain structures. Biol. Cybernetics 65(3): 203-210, 1991. """
H = self.H() return np.abs(H / np.sqrt(np.sum(H * H.conj(), axis=1, keepdims=True)))
<SYSTEM_TASK:> Full frequency directed transfer function. <END_TASK> <USER_TASK:> Description: def ffDTF(self): """Full frequency directed transfer function. .. math:: \mathrm{ffDTF}_{ij}(f) = \\frac{H_{ij}(f)}{\sqrt{\sum_f H_{i:}(f) H_{i:}'(f)}} References ---------- A. Korzeniewska, M. Mańczak, M. Kaminski, K. J. Blinowska, S. Kasicki. Determination of information flow direction among brain structures by a modified directed transfer function (dDTF) method. J. Neurosci. Meth. 125(1-2): 195-207, 2003. """
H = self.H() return np.abs(H * self.nfft / np.sqrt(np.sum(H * H.conj(), axis=(1, 2), keepdims=True)))
<SYSTEM_TASK:> Generalized directed transfer function. <END_TASK> <USER_TASK:> Description: def GDTF(self): """Generalized directed transfer function. .. math:: \mathrm{GPDC}_{ij}(f) = \\frac{\sigma_j |H_{ij}(f)|} {\sqrt{H_{i:}(f) \mathrm{diag}(\mathbf{C}) H_{i:}'(f)}} References ---------- L. Faes, S. Erla, G. Nollo. Measuring connectivity in linear multivariate processes: definitions, interpretation, and practical analysis. Comput. Math. Meth. Med. 2012: 140513, 2012. """
H = self.H() tmp = H / np.sqrt(np.einsum('ia..., aa..., ia..., j... ->ij...', H.conj(), self.c, H, 1 / self.c.diagonal())) return np.abs(tmp)
<SYSTEM_TASK:> Enrich this error with additional information. <END_TASK> <USER_TASK:> Description: def enrich(self, expected=None, provided=None, path=None, validator=None): """ Enrich this error with additional information. This works with both Invalid and MultipleInvalid (thanks to `Invalid` being iterable): in the latter case, the defaults are applied to all collected errors. The specified arguments are only set on `Invalid` errors which do not have any value on the property. One exclusion is `path`: if provided, it is prepended to `Invalid.path`. This feature is especially useful when validating the whole input with multiple different schemas: ```python from good import Schema, Invalid schema = Schema(int) input = { 'user': { 'age': 10, } } try: schema(input['user']['age']) except Invalid as e: e.enrich(path=['user', 'age']) # Make the path reflect the reality raise # re-raise the error with updated fields ``` This is used when validating a value within a container. :param expected: Invalid.expected default :type expected: unicode|None :param provided: Invalid.provided default :type provided: unicode|None :param path: Prefix to prepend to Invalid.path :type path: list|None :param validator: Invalid.validator default :rtype: Invalid|MultipleInvalid """
for e in self: # defaults on fields if e.expected is None and expected is not None: e.expected = expected if e.provided is None and provided is not None: e.provided = provided if e.validator is None and validator is not None: e.validator = validator # path prefix e.path = (path or []) + e.path return self
<SYSTEM_TASK:> Unwind `MultipleErrors` to have a plain list of `Invalid` <END_TASK> <USER_TASK:> Description: def flatten(cls, errors): """ Unwind `MultipleErrors` to have a plain list of `Invalid` :type errors: list[Invalid|MultipleInvalid] :rtype: list[Invalid] """
ers = [] for e in errors: if isinstance(e, MultipleInvalid): ers.extend(cls.flatten(e.errors)) else: ers.append(e) return ers
<SYSTEM_TASK:> Warp EEG electrode locations to spherical layout. <END_TASK> <USER_TASK:> Description: def warp_locations(locations, y_center=None, return_ellipsoid=False, verbose=False): """ Warp EEG electrode locations to spherical layout. EEG Electrodes are warped to a spherical layout in three steps: 1. An ellipsoid is least-squares-fitted to the electrode locations. 2. Electrodes are displaced to the nearest point on the ellipsoid's surface. 3. The ellipsoid is transformed to a sphere, causing the new locations to lie exactly on a spherical surface with unit radius. This procedure intends to minimize electrode displacement in the original coordinate space. Simply projecting electrodes on a sphere (e.g. by normalizing the x/y/z coordinates) typically gives much larger displacements. Parameters ---------- locations : array-like, shape = [n_electrodes, 3] Eeach row of `locations` corresponds to the location of an EEG electrode in cartesian x/y/z coordinates. y_center : float, optional Fix the y-coordinate of the ellipsoid's center to this value (optional). This is useful to align the ellipsoid with the central electrodes. return_ellipsoid : bool, optional If `true` center and radii of the ellipsoid are returned. Returns ------- newlocs : array-like, shape = [n_electrodes, 3] Electrode locations on unit sphere. c : array-like, shape = [3], (only returned if `return_ellipsoid` evaluates to `True`) Center of the ellipsoid in the original location's coordinate space. r : array-like, shape = [3], (only returned if `return_ellipsoid` evaluates to `True`) Radii (x, y, z) of the ellipsoid in the original location's coordinate space. """
locations = np.asarray(locations) if y_center is None: c, r = _fit_ellipsoid_full(locations) else: c, r = _fit_ellipsoid_partial(locations, y_center) elliptic_locations = _project_on_ellipsoid(c, r, locations) if verbose: print('Head ellipsoid center:', c) print('Head ellipsoid radii:', r) distance = np.sqrt(np.sum((locations - elliptic_locations)**2, axis=1)) print('Minimum electrode displacement:', np.min(distance)) print('Average electrode displacement:', np.mean(distance)) print('Maximum electrode displacement:', np.max(distance)) spherical_locations = (elliptic_locations - c) / r if return_ellipsoid: return spherical_locations, c, r return spherical_locations
<SYSTEM_TASK:> displace locations to the nearest point on ellipsoid surface <END_TASK> <USER_TASK:> Description: def _project_on_ellipsoid(c, r, locations): """displace locations to the nearest point on ellipsoid surface"""
p0 = locations - c # original locations l2 = 1 / np.sum(p0**2 / r**2, axis=1, keepdims=True) p = p0 * np.sqrt(l2) # initial approximation (projection of points towards center of ellipsoid) fun = lambda x: np.sum((x.reshape(p0.shape) - p0)**2) # minimize distance between new and old points con = lambda x: np.sum(x.reshape(p0.shape)**2 / r**2, axis=1) - 1 # new points constrained to surface of ellipsoid res = sp.optimize.minimize(fun, p, constraints={'type': 'eq', 'fun': con}, method='SLSQP') return res['x'].reshape(p0.shape) + c
<SYSTEM_TASK:> Cut continuous signal into segments. <END_TASK> <USER_TASK:> Description: def cut_segments(x2d, tr, start, stop): """Cut continuous signal into segments. Parameters ---------- x2d : array, shape (m, n) Input data with m signals and n samples. tr : list of int Trigger positions. start : int Window start (offset relative to trigger). stop : int Window end (offset relative to trigger). Returns ------- x3d : array, shape (len(tr), m, stop-start) Segments cut from data. Individual segments are stacked along the first dimension. See also -------- cat_trials : Concatenate segments. Examples -------- >>> data = np.random.randn(5, 1000) # 5 channels, 1000 samples >>> tr = [750, 500, 250] # three segments >>> x3d = cut_segments(data, tr, 50, 100) # each segment is 50 samples >>> x3d.shape (3, 5, 50) """
if start != int(start): raise ValueError("start index must be an integer") if stop != int(stop): raise ValueError("stop index must be an integer") x2d = np.atleast_2d(x2d) tr = np.asarray(tr, dtype=int).ravel() win = np.arange(start, stop, dtype=int) return np.concatenate([x2d[np.newaxis, :, t + win] for t in tr])
<SYSTEM_TASK:> Concatenate trials along time axis. <END_TASK> <USER_TASK:> Description: def cat_trials(x3d): """Concatenate trials along time axis. Parameters ---------- x3d : array, shape (t, m, n) Segmented input data with t trials, m signals, and n samples. Returns ------- x2d : array, shape (m, t * n) Trials are concatenated along the second axis. See also -------- cut_segments : Cut segments from continuous data. Examples -------- >>> x = np.random.randn(6, 4, 150) >>> y = cat_trials(x) >>> y.shape (4, 900) """
x3d = atleast_3d(x3d) t = x3d.shape[0] return np.concatenate(np.split(x3d, t, 0), axis=2).squeeze(0)
<SYSTEM_TASK:> Segment-wise dot product. <END_TASK> <USER_TASK:> Description: def dot_special(x2d, x3d): """Segment-wise dot product. This function calculates the dot product of x2d with each trial of x3d. Parameters ---------- x2d : array, shape (p, m) Input argument. x3d : array, shape (t, m, n) Segmented input data with t trials, m signals, and n samples. The dot product with x2d is calculated for each trial. Returns ------- out : array, shape (t, p, n) Dot product of x2d with each trial of x3d. Examples -------- >>> x = np.random.randn(6, 40, 150) >>> a = np.ones((7, 40)) >>> y = dot_special(a, x) >>> y.shape (6, 7, 150) """
x3d = atleast_3d(x3d) x2d = np.atleast_2d(x2d) return np.concatenate([x2d.dot(x3d[i, ...])[np.newaxis, ...] for i in range(x3d.shape[0])])
<SYSTEM_TASK:> Phase randomization. <END_TASK> <USER_TASK:> Description: def randomize_phase(data, random_state=None): """Phase randomization. This function randomizes the spectral phase of the input data along the last dimension. Parameters ---------- data : array Input array. Returns ------- out : array Array of same shape as data. Notes ----- The algorithm randomizes the phase component of the input's complex Fourier transform. Examples -------- .. plot:: :include-source: from pylab import * from scot.datatools import randomize_phase np.random.seed(1234) s = np.sin(np.linspace(0,10*np.pi,1000)) x = np.vstack([s, np.sign(s)]) y = randomize_phase(x) subplot(2,1,1) title('Phase randomization of sine wave and rectangular function') plot(x.T + [1.5, -1.5]), axis([0,1000,-3,3]) subplot(2,1,2) plot(y.T + [1.5, -1.5]), axis([0,1000,-3,3]) plt.show() """
rng = check_random_state(random_state) data = np.asarray(data) data_freq = np.fft.rfft(data) data_freq = np.abs(data_freq) * np.exp(1j*rng.random_sample(data_freq.shape)*2*np.pi) return np.fft.irfft(data_freq, data.shape[-1])
<SYSTEM_TASK:> Compute autocovariance matrix at lag l. <END_TASK> <USER_TASK:> Description: def acm(x, l): """Compute autocovariance matrix at lag l. This function calculates the autocovariance matrix of `x` at lag `l`. Parameters ---------- x : array, shape (n_trials, n_channels, n_samples) Signal data (2D or 3D for multiple trials) l : int Lag Returns ------- c : ndarray, shape = [nchannels, n_channels] Autocovariance matrix of `x` at lag `l`. """
x = atleast_3d(x) if l > x.shape[2]-1: raise AttributeError("lag exceeds data length") ## subtract mean from each trial #for t in range(x.shape[2]): # x[:, :, t] -= np.mean(x[:, :, t], axis=0) if l == 0: a, b = x, x else: a = x[:, :, l:] b = x[:, :, 0:-l] c = np.zeros((x.shape[1], x.shape[1])) for t in range(x.shape[0]): c += a[t, :, :].dot(b[t, :, :].T) / a.shape[2] c /= x.shape[0] return c.T
<SYSTEM_TASK:> Calculate jackknife estimates of connectivity. <END_TASK> <USER_TASK:> Description: def jackknife_connectivity(measures, data, var, nfft=512, leaveout=1, n_jobs=1, verbose=0): """Calculate jackknife estimates of connectivity. For each jackknife estimate a block of trials is left out. This is repeated until each trial was left out exactly once. The number of estimates depends on the number of trials and the value of `leaveout`. It is calculated by repeats = `n_trials` // `leaveout`. .. note:: Parameter `var` will be modified by the function. Treat as undefined after the function returns. Parameters ---------- measures : str or list of str Name(s) of the connectivity measure(s) to calculate. See :class:`Connectivity` for supported measures. data : array, shape (trials, channels, samples) Time series data (multiple trials). var : VARBase-like object Instance of a VAR model. nfft : int, optional Number of frequency bins to calculate. Note that these points cover the range between 0 and half the sampling rate. leaveout : int, optional Number of trials to leave out in each estimate. n_jobs : int | None, optional Number of jobs to run in parallel. If set to None, joblib is not used at all. See `joblib.Parallel` for details. verbose : int, optional Verbosity level passed to joblib. Returns ------- result : array, shape (`repeats`, n_channels, n_channels, nfft) Values of the connectivity measure for each surrogate. If `measure_names` is a list of strings a dictionary is returned, where each key is the name of the measure, and the corresponding values are arrays of shape (`repeats`, n_channels, n_channels, nfft). """
data = atleast_3d(data) t, m, n = data.shape assert(t > 1) if leaveout < 1: leaveout = int(leaveout * t) num_blocks = t // leaveout mask = lambda block: [i for i in range(t) if i < block*leaveout or i >= (block + 1) * leaveout] par, func = parallel_loop(_calc_jackknife, n_jobs=n_jobs, verbose=verbose) output = par(func(data[mask(b), :, :], var, measures, nfft) for b in range(num_blocks)) return convert_output_(output, measures)
<SYSTEM_TASK:> Calculate bootstrap estimates of connectivity. <END_TASK> <USER_TASK:> Description: def bootstrap_connectivity(measures, data, var, nfft=512, repeats=100, num_samples=None, n_jobs=1, verbose=0, random_state=None): """Calculate bootstrap estimates of connectivity. To obtain a bootstrap estimate trials are sampled randomly with replacement from the data set. .. note:: Parameter `var` will be modified by the function. Treat as undefined after the function returns. Parameters ---------- measures : str or list of str Name(s) of the connectivity measure(s) to calculate. See :class:`Connectivity` for supported measures. data : array, shape (trials, channels, samples) Time series data (multiple trials). var : VARBase-like object Instance of a VAR model. nfft : int, optional Number of frequency bins to calculate. Note that these points cover the range between 0 and half the sampling rate. repeats : int, optional Number of bootstrap estimates to take. num_samples : int, optional Number of samples to take for each bootstrap estimates. Defaults to the same number of trials as present in the data. n_jobs : int, optional n_jobs : int | None, optional Number of jobs to run in parallel. If set to None, joblib is not used at all. See `joblib.Parallel` for details. verbose : int, optional Verbosity level passed to joblib. Returns ------- measure : array, shape (`repeats`, n_channels, n_channels, nfft) Values of the connectivity measure for each bootstrap estimate. If `measure_names` is a list of strings a dictionary is returned, where each key is the name of the measure, and the corresponding values are arrays of shape (`repeats`, n_channels, n_channels, nfft). """
rng = check_random_state(random_state) data = atleast_3d(data) n, m, t = data.shape assert(t > 1) if num_samples is None: num_samples = t mask = lambda r: rng.random_integers(0, data.shape[0]-1, num_samples) par, func = parallel_loop(_calc_bootstrap, n_jobs=n_jobs, verbose=verbose) output = par(func(data[mask(r), :, :], var, measures, nfft) for r in range(repeats)) return convert_output_(output, measures)
<SYSTEM_TASK:> Calculate significance by controlling for the false discovery rate. <END_TASK> <USER_TASK:> Description: def significance_fdr(p, alpha): """Calculate significance by controlling for the false discovery rate. This function determines which of the p-values in `p` can be considered significant. Correction for multiple comparisons is performed by controlling the false discovery rate (FDR). The FDR is the maximum fraction of p-values that are wrongly considered significant [1]_. Parameters ---------- p : array, shape (channels, channels, nfft) p-values. alpha : float Maximum false discovery rate. Returns ------- s : array, dtype=bool, shape (channels, channels, nfft) Significance of each p-value. References ---------- .. [1] Y. Benjamini, Y. Hochberg. Controlling the false discovery rate: a practical and powerful approach to multiple testing. J. Royal Stat. Soc. Series B 57(1): 289-300, 1995. """
i = np.argsort(p, axis=None) m = i.size - np.sum(np.isnan(p)) j = np.empty(p.shape, int) j.flat[i] = np.arange(1, i.size + 1) mask = p <= alpha * j / m if np.sum(mask) == 0: return mask # find largest k so that p_k <= alpha*k/m k = np.max(j[mask]) # reject all H_i for i = 0...k s = j <= k return s
<SYSTEM_TASK:> Register a human-friendly name for the given type. This will be used in Invalid errors <END_TASK> <USER_TASK:> Description: def register_type_name(t, name): """ Register a human-friendly name for the given type. This will be used in Invalid errors :param t: The type to register :type t: type :param name: Name for the type :type name: unicode """
assert isinstance(t, type) assert isinstance(name, unicode) __type_names[t] = name
<SYSTEM_TASK:> Get a human-friendly name for the given type. <END_TASK> <USER_TASK:> Description: def get_type_name(t): """ Get a human-friendly name for the given type. :type t: type|None :rtype: unicode """
# Lookup in the mapping try: return __type_names[t] except KeyError: # Specific types if issubclass(t, six.integer_types): return _(u'Integer number') # Get name from the Type itself return six.text_type(t.__name__).capitalize()
<SYSTEM_TASK:> Get a human-friendly name for the given callable. <END_TASK> <USER_TASK:> Description: def get_callable_name(c): """ Get a human-friendly name for the given callable. :param c: The callable to get the name for :type c: callable :rtype: unicode """
if hasattr(c, 'name'): return six.text_type(c.name) elif hasattr(c, '__name__'): return six.text_type(c.__name__) + u'()' else: return six.text_type(c)
<SYSTEM_TASK:> Get a human-friendly name for the given primitive. <END_TASK> <USER_TASK:> Description: def get_primitive_name(schema): """ Get a human-friendly name for the given primitive. :param schema: Schema :type schema: * :rtype: unicode """
try: return { const.COMPILED_TYPE.LITERAL: six.text_type, const.COMPILED_TYPE.TYPE: get_type_name, const.COMPILED_TYPE.ENUM: get_type_name, const.COMPILED_TYPE.CALLABLE: get_callable_name, const.COMPILED_TYPE.ITERABLE: lambda x: _(u'{type}[{content}]').format(type=get_type_name(list), content=_(u'...') if x else _(u'-')), const.COMPILED_TYPE.MAPPING: lambda x: _(u'{type}[{content}]').format(type=get_type_name(dict), content=_(u'...') if x else _(u'-')), }[primitive_type(schema)](schema) except KeyError: return six.text_type(repr(schema))
<SYSTEM_TASK:> Get schema type for the primitive argument. <END_TASK> <USER_TASK:> Description: def primitive_type(schema): """ Get schema type for the primitive argument. Note: it does treats markers & schemas as callables! :param schema: Value of a primitive type :type schema: * :return: const.COMPILED_TYPE.* :rtype: str|None """
schema_type = type(schema) # Literal if schema_type in const.literal_types: return const.COMPILED_TYPE.LITERAL # Enum elif Enum is not None and isinstance(schema, (EnumMeta, Enum)): return const.COMPILED_TYPE.ENUM # Type elif issubclass(schema_type, six.class_types): return const.COMPILED_TYPE.TYPE # Mapping elif isinstance(schema, collections.Mapping): return const.COMPILED_TYPE.MAPPING # Iterable elif isinstance(schema, collections.Iterable): return const.COMPILED_TYPE.ITERABLE # Callable elif callable(schema): return const.COMPILED_TYPE.CALLABLE # Not detected else: return None
<SYSTEM_TASK:> Join the given iterable with ',' <END_TASK> <USER_TASK:> Description: def commajoin_as_strings(iterable): """ Join the given iterable with ',' """
return _(u',').join((six.text_type(i) for i in iterable))
<SYSTEM_TASK:> Prepare multiple topo maps for cached plotting. <END_TASK> <USER_TASK:> Description: def prepare_topoplots(topo, values): """Prepare multiple topo maps for cached plotting. .. note:: Parameter `topo` is modified by the function by calling :func:`~eegtopo.topoplot.Topoplot.set_values`. Parameters ---------- topo : :class:`~eegtopo.topoplot.Topoplot` Scalp maps are created with this class values : array, shape = [n_topos, n_channels] Channel values for each topo plot Returns ------- topomaps : list of array The map for each topo plot """
values = np.atleast_2d(values) topomaps = [] for i in range(values.shape[0]): topo.set_values(values[i, :]) topo.create_map() topomaps.append(topo.get_map()) return topomaps
<SYSTEM_TASK:> Draw a topoplot in given axis. <END_TASK> <USER_TASK:> Description: def plot_topo(axis, topo, topomap, crange=None, offset=(0,0), plot_locations=True, plot_head=True): """Draw a topoplot in given axis. .. note:: Parameter `topo` is modified by the function by calling :func:`~eegtopo.topoplot.Topoplot.set_map`. Parameters ---------- axis : axis Axis to draw into. topo : :class:`~eegtopo.topoplot.Topoplot` This object draws the topo plot topomap : array, shape = [w_pixels, h_pixels] Scalp-projected data crange : [int, int], optional Range of values covered by the colormap. If set to None, [-max(abs(topomap)), max(abs(topomap))] is substituted. offset : [float, float], optional Shift the topo plot by [x,y] in axis units. plot_locations : bool, optional Plot electrode locations. plot_head : bool, optional Plot head cartoon. Returns ------- h : image Image object the map was plotted into """
topo.set_map(topomap) h = topo.plot_map(axis, crange=crange, offset=offset) if plot_locations: topo.plot_locations(axis, offset=offset) if plot_head: topo.plot_head(axis, offset=offset) return h
<SYSTEM_TASK:> Plot all scalp projections of mixing- and unmixing-maps. <END_TASK> <USER_TASK:> Description: def plot_sources(topo, mixmaps, unmixmaps, global_scale=None, fig=None): """Plot all scalp projections of mixing- and unmixing-maps. .. note:: Parameter `topo` is modified by the function by calling :func:`~eegtopo.topoplot.Topoplot.set_map`. Parameters ---------- topo : :class:`~eegtopo.topoplot.Topoplot` This object draws the topo plot mixmaps : array, shape = [w_pixels, h_pixels] Scalp-projected mixing matrix unmixmaps : array, shape = [w_pixels, h_pixels] Scalp-projected unmixing matrix global_scale : float, optional Set common color scale as given percentile of all map values to use as the maximum. `None` scales each plot individually (default). fig : Figure object, optional Figure to plot into. If set to `None`, a new figure is created. Returns ------- fig : Figure object The figure into which was plotted. """
urange, mrange = None, None m = len(mixmaps) if global_scale: tmp = np.asarray(unmixmaps) tmp = tmp[np.logical_not(np.isnan(tmp))] umax = np.percentile(np.abs(tmp), global_scale) umin = -umax urange = [umin, umax] tmp = np.asarray(mixmaps) tmp = tmp[np.logical_not(np.isnan(tmp))] mmax = np.percentile(np.abs(tmp), global_scale) mmin = -mmax mrange = [mmin, mmax] y = np.floor(np.sqrt(m * 3 / 4)) x = np.ceil(m / y) if fig is None: fig = new_figure() axes = [] for i in range(m): axes.append(fig.add_subplot(2 * y, x, i + 1)) plot_topo(axes[-1], topo, unmixmaps[i], crange=urange) axes[-1].set_title(str(i)) axes.append(fig.add_subplot(2 * y, x, m + i + 1)) plot_topo(axes[-1], topo, mixmaps[i], crange=mrange) axes[-1].set_title(str(i)) for a in axes: a.set_yticks([]) a.set_xticks([]) a.set_frame_on(False) axes[0].set_ylabel('Unmixing weights') axes[1].set_ylabel('Scalp projections') return fig
<SYSTEM_TASK:> Place topo plots in a figure suitable for connectivity visualization. <END_TASK> <USER_TASK:> Description: def plot_connectivity_topos(layout='diagonal', topo=None, topomaps=None, fig=None): """Place topo plots in a figure suitable for connectivity visualization. .. note:: Parameter `topo` is modified by the function by calling :func:`~eegtopo.topoplot.Topoplot.set_map`. Parameters ---------- layout : str 'diagonal' -> place topo plots on diagonal. otherwise -> place topo plots in left column and top row. topo : :class:`~eegtopo.topoplot.Topoplot` This object draws the topo plot topomaps : array, shape = [w_pixels, h_pixels] Scalp-projected map fig : Figure object, optional Figure to plot into. If set to `None`, a new figure is created. Returns ------- fig : Figure object The figure into which was plotted. """
m = len(topomaps) if fig is None: fig = new_figure() if layout == 'diagonal': for i in range(m): ax = fig.add_subplot(m, m, i*(1+m) + 1) plot_topo(ax, topo, topomaps[i]) ax.set_yticks([]) ax.set_xticks([]) ax.set_frame_on(False) else: for i in range(m): for j in [i+2, (i+1)*(m+1)+1]: ax = fig.add_subplot(m+1, m+1, j) plot_topo(ax, topo, topomaps[i]) ax.set_yticks([]) ax.set_xticks([]) ax.set_frame_on(False) return fig
<SYSTEM_TASK:> Plot significance. <END_TASK> <USER_TASK:> Description: def plot_connectivity_significance(s, fs=2, freq_range=(-np.inf, np.inf), diagonal=0, border=False, fig=None): """Plot significance. Significance is drawn as a background image where dark vertical stripes indicate freuquencies where a evaluates to True. Parameters ---------- a : array, shape (n_channels, n_channels, n_fft), dtype bool Significance fs : float Sampling frequency freq_range : (float, float) Frequency range to plot diagonal : {-1, 0, 1} If diagonal == -1 nothing is plotted on the diagonal (a[i,i,:] are not plotted), if diagonal == 0, a is plotted on the diagonal too (all a[i,i,:] are plotted), if diagonal == 1, a is plotted on the diagonal only (only a[i,i,:] are plotted) border : bool If border == true the leftmost column and the topmost row are left blank fig : Figure object, optional Figure to plot into. If set to `None`, a new figure is created. Returns ------- fig : Figure object The figure into which was plotted. """
a = np.atleast_3d(s) [_, m, f] = a.shape freq = np.linspace(0, fs / 2, f) left = max(freq_range[0], freq[0]) right = min(freq_range[1], freq[-1]) imext = (freq[0], freq[-1], -1e25, 1e25) if fig is None: fig = new_figure() axes = [] for i in range(m): if diagonal == 1: jrange = [i] elif diagonal == 0: jrange = range(m) else: jrange = [j for j in range(m) if j != i] for j in jrange: if border: ax = fig.add_subplot(m+1, m+1, j + (i+1) * (m+1) + 2) else: ax = fig.add_subplot(m, m, j + i * m + 1) axes.append((i, j, ax)) ax.imshow(s[i, j, np.newaxis], vmin=0, vmax=2, cmap='binary', aspect='auto', extent=imext, zorder=-999) ax.xaxis.set_major_locator(MaxNLocator(max(1, 7 - m))) ax.yaxis.set_major_locator(MaxNLocator(max(1, 7 - m))) ax.set_xlim(left, right) if 0 < i < m - 1: ax.set_xticks([]) if 0 < j < m - 1: ax.set_yticks([]) if j == 0: ax.yaxis.tick_left() if j == m-1: ax.yaxis.tick_right() _plot_labels(fig, {'x': 0.5, 'y': 0.025, 's': 'frequency (Hz)', 'horizontalalignment': 'center'}, {'x': 0.05, 'y': 0.5, 's': 'magnitude', 'horizontalalignment': 'center', 'rotation': 'vertical'}) return fig
<SYSTEM_TASK:> Draw distribution of the Portmanteu whiteness test. <END_TASK> <USER_TASK:> Description: def plot_whiteness(var, h, repeats=1000, axis=None): """ Draw distribution of the Portmanteu whiteness test. Parameters ---------- var : :class:`~scot.var.VARBase`-like object Vector autoregressive model (VAR) object whose residuals are tested for whiteness. h : int Maximum lag to include in the test. repeats : int, optional Number of surrogate estimates to draw under the null hypothesis. axis : axis, optional Axis to draw into. By default draws into :func:`matplotlib.pyplot.gca()`. Returns ------- pr : float *p*-value of whiteness under the null hypothesis """
pr, q0, q = var.test_whiteness(h, repeats, True) if axis is None: axis = current_axis() pdf, _, _ = axis.hist(q0, 30, normed=True, label='surrogate distribution') axis.plot([q,q], [0,np.max(pdf)], 'r-', label='fitted model') #df = m*m*(h-p) #x = np.linspace(np.min(q0)*0.0, np.max(q0)*2.0, 100) #y = sp.stats.chi2.pdf(x, df) #hc = axis.plot(x, y, label='chi-squared distribution (df=%i)' % df) axis.set_title('significance: p = %f'%pr) axis.set_xlabel('Li-McLeod statistic (Q)') axis.set_ylabel('probability') axis.legend() return pr
<SYSTEM_TASK:> Single-trial cross-validation schema <END_TASK> <USER_TASK:> Description: def singletrial(num_trials, skipstep=1): """ Single-trial cross-validation schema Use one trial for training, all others for testing. Parameters ---------- num_trials : int Total number of trials skipstep : int only use every `skipstep` trial for training Returns ------- gen : generator object the generator returns tuples (trainset, testset) """
for t in range(0, num_trials, skipstep): trainset = [t] testset = [i for i in range(trainset[0])] + \ [i for i in range(trainset[-1] + 1, num_trials)] testset = sort([t % num_trials for t in testset]) yield trainset, testset
<SYSTEM_TASK:> Split-set cross validation <END_TASK> <USER_TASK:> Description: def splitset(num_trials, skipstep=None): """ Split-set cross validation Use half the trials for training, and the other half for testing. Then repeat the other way round. Parameters ---------- num_trials : int Total number of trials skipstep : int unused Returns ------- gen : generator object the generator returns tuples (trainset, testset) """
split = num_trials // 2 a = list(range(0, split)) b = list(range(split, num_trials)) yield a, b yield b, a
<SYSTEM_TASK:> Assign data to the workspace. <END_TASK> <USER_TASK:> Description: def set_data(self, data, cl=None, time_offset=0): """ Assign data to the workspace. This function assigns a new data set to the workspace. Doing so invalidates currently fitted VAR models, connectivity estimates, and activations. Parameters ---------- data : array-like, shape = [n_trials, n_channels, n_samples] or [n_channels, n_samples] EEG data set cl : list of valid dict keys Class labels associated with each trial. time_offset : float, optional Trial starting time; used for labelling the x-axis of time/frequency plots. Returns ------- self : Workspace The Workspace object. """
self.data_ = atleast_3d(data) self.cl_ = np.asarray(cl if cl is not None else [None]*self.data_.shape[0]) self.time_offset_ = time_offset self.var_model_ = None self.var_cov_ = None self.connectivity_ = None self.trial_mask_ = np.ones(self.cl_.size, dtype=bool) if self.unmixing_ is not None: self.activations_ = dot_special(self.unmixing_.T, self.data_) return self
<SYSTEM_TASK:> Specify which trials to use in subsequent analysis steps. <END_TASK> <USER_TASK:> Description: def set_used_labels(self, labels): """ Specify which trials to use in subsequent analysis steps. This function masks trials based on their class labels. Parameters ---------- labels : list of class labels Marks all trials that have a label that is in the `labels` list for further processing. Returns ------- self : Workspace The Workspace object. """
mask = np.zeros(self.cl_.size, dtype=bool) for l in labels: mask = np.logical_or(mask, self.cl_ == l) self.trial_mask_ = mask return self
<SYSTEM_TASK:> Remove sources from the decomposition. <END_TASK> <USER_TASK:> Description: def remove_sources(self, sources): """ Remove sources from the decomposition. This function removes sources from the decomposition. Doing so invalidates currently fitted VAR models and connectivity estimates. Parameters ---------- sources : {slice, int, array of ints} Indices of components to remove. Returns ------- self : Workspace The Workspace object. Raises ------ RuntimeError If the :class:`Workspace` instance does not contain a source decomposition. """
if self.unmixing_ is None or self.mixing_ is None: raise RuntimeError("No sources available (run do_mvarica first)") self.mixing_ = np.delete(self.mixing_, sources, 0) self.unmixing_ = np.delete(self.unmixing_, sources, 1) if self.activations_ is not None: self.activations_ = np.delete(self.activations_, sources, 1) self.var_model_ = None self.var_cov_ = None self.connectivity_ = None self.mixmaps_ = [] self.unmixmaps_ = [] return self
<SYSTEM_TASK:> Keep only the specified sources in the decomposition. <END_TASK> <USER_TASK:> Description: def keep_sources(self, keep): """Keep only the specified sources in the decomposition. """
if self.unmixing_ is None or self.mixing_ is None: raise RuntimeError("No sources available (run do_mvarica first)") n_sources = self.mixing_.shape[0] self.remove_sources(np.setdiff1d(np.arange(n_sources), np.array(keep))) return self
<SYSTEM_TASK:> Fit a VAR model to the source activations. <END_TASK> <USER_TASK:> Description: def fit_var(self): """ Fit a VAR model to the source activations. Returns ------- self : Workspace The Workspace object. Raises ------ RuntimeError If the :class:`Workspace` instance does not contain source activations. """
if self.activations_ is None: raise RuntimeError("VAR fitting requires source activations (run do_mvarica first)") self.var_.fit(data=self.activations_[self.trial_mask_, :, :]) self.connectivity_ = Connectivity(self.var_.coef, self.var_.rescov, self.nfft_) return self
<SYSTEM_TASK:> Calculate spectral connectivity measure. <END_TASK> <USER_TASK:> Description: def get_connectivity(self, measure_name, plot=False): """ Calculate spectral connectivity measure. Parameters ---------- measure_name : str Name of the connectivity measure to calculate. See :class:`Connectivity` for supported measures. plot : {False, None, Figure object}, optional Whether and where to plot the connectivity. If set to **False**, nothing is plotted. Otherwise set to the Figure object. If set to **None**, a new figure is created. Returns ------- measure : array, shape = [n_channels, n_channels, nfft] Values of the connectivity measure. fig : Figure object Instance of the figure in which was plotted. This is only returned if `plot` is not **False**. Raises ------ RuntimeError If the :class:`Workspace` instance does not contain a fitted VAR model. """
if self.connectivity_ is None: raise RuntimeError("Connectivity requires a VAR model (run do_mvarica or fit_var first)") cm = getattr(self.connectivity_, measure_name)() cm = np.abs(cm) if np.any(np.iscomplex(cm)) else cm if plot is None or plot: fig = plot if self.plot_diagonal == 'fill': diagonal = 0 elif self.plot_diagonal == 'S': diagonal = -1 sm = np.abs(self.connectivity_.S()) sm /= np.max(sm) # scale to 1 since components are scaled arbitrarily anyway fig = self.plotting.plot_connectivity_spectrum(sm, fs=self.fs_, freq_range=self.plot_f_range, diagonal=1, border=self.plot_outside_topo, fig=fig) else: diagonal = -1 fig = self.plotting.plot_connectivity_spectrum(cm, fs=self.fs_, freq_range=self.plot_f_range, diagonal=diagonal, border=self.plot_outside_topo, fig=fig) return cm, fig return cm
<SYSTEM_TASK:> Calculate spectral connectivity measure under the assumption of no actual connectivity. <END_TASK> <USER_TASK:> Description: def get_surrogate_connectivity(self, measure_name, repeats=100, plot=False, random_state=None): """ Calculate spectral connectivity measure under the assumption of no actual connectivity. Repeatedly samples connectivity from phase-randomized data. This provides estimates of the connectivity distribution if there was no causal structure in the data. Parameters ---------- measure_name : str Name of the connectivity measure to calculate. See :class:`Connectivity` for supported measures. repeats : int, optional How many surrogate samples to take. Returns ------- measure : array, shape = [`repeats`, n_channels, n_channels, nfft] Values of the connectivity measure for each surrogate. See Also -------- :func:`scot.connectivity_statistics.surrogate_connectivity` : Calculates surrogate connectivity """
cs = surrogate_connectivity(measure_name, self.activations_[self.trial_mask_, :, :], self.var_, self.nfft_, repeats, random_state=random_state) if plot is None or plot: fig = plot if self.plot_diagonal == 'fill': diagonal = 0 elif self.plot_diagonal == 'S': diagonal = -1 sb = self.get_surrogate_connectivity('absS', repeats) sb /= np.max(sb) # scale to 1 since components are scaled arbitrarily anyway su = np.percentile(sb, 95, axis=0) fig = self.plotting.plot_connectivity_spectrum([su], fs=self.fs_, freq_range=self.plot_f_range, diagonal=1, border=self.plot_outside_topo, fig=fig) else: diagonal = -1 cu = np.percentile(cs, 95, axis=0) fig = self.plotting.plot_connectivity_spectrum([cu], fs=self.fs_, freq_range=self.plot_f_range, diagonal=diagonal, border=self.plot_outside_topo, fig=fig) return cs, fig return cs
<SYSTEM_TASK:> Calculate bootstrap estimates of spectral connectivity measures. <END_TASK> <USER_TASK:> Description: def get_bootstrap_connectivity(self, measure_names, repeats=100, num_samples=None, plot=False, random_state=None): """ Calculate bootstrap estimates of spectral connectivity measures. Bootstrapping is performed on trial level. Parameters ---------- measure_names : {str, list of str} Name(s) of the connectivity measure(s) to calculate. See :class:`Connectivity` for supported measures. repeats : int, optional How many bootstrap estimates to take. num_samples : int, optional How many samples to take for each bootstrap estimates. Defaults to the same number of trials as present in the data. Returns ------- measure : array, shape = [`repeats`, n_channels, n_channels, nfft] Values of the connectivity measure for each bootstrap estimate. If `measure_names` is a list of strings a dictionary is returned, where each key is the name of the measure, and the corresponding values are ndarrays of shape [`repeats`, n_channels, n_channels, nfft]. See Also -------- :func:`scot.connectivity_statistics.bootstrap_connectivity` : Calculates bootstrap connectivity """
if num_samples is None: num_samples = np.sum(self.trial_mask_) cb = bootstrap_connectivity(measure_names, self.activations_[self.trial_mask_, :, :], self.var_, self.nfft_, repeats, num_samples, random_state=random_state) if plot is None or plot: fig = plot if self.plot_diagonal == 'fill': diagonal = 0 elif self.plot_diagonal == 'S': diagonal = -1 sb = self.get_bootstrap_connectivity('absS', repeats, num_samples) sb /= np.max(sb) # scale to 1 since components are scaled arbitrarily anyway sm = np.median(sb, axis=0) sl = np.percentile(sb, 2.5, axis=0) su = np.percentile(sb, 97.5, axis=0) fig = self.plotting.plot_connectivity_spectrum([sm, sl, su], fs=self.fs_, freq_range=self.plot_f_range, diagonal=1, border=self.plot_outside_topo, fig=fig) else: diagonal = -1 cm = np.median(cb, axis=0) cl = np.percentile(cb, 2.5, axis=0) cu = np.percentile(cb, 97.5, axis=0) fig = self.plotting.plot_connectivity_spectrum([cm, cl, cu], fs=self.fs_, freq_range=self.plot_f_range, diagonal=diagonal, border=self.plot_outside_topo, fig=fig) return cb, fig return cb
<SYSTEM_TASK:> Plot topography of the Source decomposition. <END_TASK> <USER_TASK:> Description: def plot_source_topos(self, common_scale=None): """ Plot topography of the Source decomposition. Parameters ---------- common_scale : float, optional If set to None, each topoplot's color axis is scaled individually. Otherwise specifies the percentile (1-99) of values in all plot. This value is taken as the maximum color scale. """
if self.unmixing_ is None and self.mixing_ is None: raise RuntimeError("No sources available (run do_mvarica first)") self._prepare_plots(True, True) self.plotting.plot_sources(self.topo_, self.mixmaps_, self.unmixmaps_, common_scale)
<SYSTEM_TASK:> Plot scalp projections of the sources. <END_TASK> <USER_TASK:> Description: def plot_connectivity_topos(self, fig=None): """ Plot scalp projections of the sources. This function only plots the topos. Use in combination with connectivity plotting. Parameters ---------- fig : {None, Figure object}, optional Where to plot the topos. f set to **None**, a new figure is created. Otherwise plot into the provided figure object. Returns ------- fig : Figure object Instance of the figure in which was plotted. """
self._prepare_plots(True, False) if self.plot_outside_topo: fig = self.plotting.plot_connectivity_topos('outside', self.topo_, self.mixmaps_, fig) elif self.plot_diagonal == 'topo': fig = self.plotting.plot_connectivity_topos('diagonal', self.topo_, self.mixmaps_, fig) return fig
<SYSTEM_TASK:> Plot spectral connectivity measure under the assumption of no actual connectivity. <END_TASK> <USER_TASK:> Description: def plot_connectivity_surrogate(self, measure_name, repeats=100, fig=None): """ Plot spectral connectivity measure under the assumption of no actual connectivity. Repeatedly samples connectivity from phase-randomized data. This provides estimates of the connectivity distribution if there was no causal structure in the data. Parameters ---------- measure_name : str Name of the connectivity measure to calculate. See :class:`Connectivity` for supported measures. repeats : int, optional How many surrogate samples to take. fig : {None, Figure object}, optional Where to plot the topos. f set to **None**, a new figure is created. Otherwise plot into the provided figure object. Returns ------- fig : Figure object Instance of the figure in which was plotted. """
cb = self.get_surrogate_connectivity(measure_name, repeats) self._prepare_plots(True, False) cu = np.percentile(cb, 95, axis=0) fig = self.plotting.plot_connectivity_spectrum([cu], self.fs_, freq_range=self.plot_f_range, fig=fig) return fig
<SYSTEM_TASK:> run loops in parallel, if joblib is available. <END_TASK> <USER_TASK:> Description: def parallel_loop(func, n_jobs=1, verbose=1): """run loops in parallel, if joblib is available. Parameters ---------- func : function function to be executed in parallel n_jobs : int | None Number of jobs. If set to None, do not attempt to use joblib. verbose : int verbosity level Notes ----- Execution of the main script must be guarded with `if __name__ == '__main__':` when using parallelization. """
if n_jobs: try: from joblib import Parallel, delayed except ImportError: try: from sklearn.externals.joblib import Parallel, delayed except ImportError: n_jobs = None if not n_jobs: if verbose: print('running ', func, ' serially') par = lambda x: list(x) else: if verbose: print('running ', func, ' in parallel') func = delayed(func) par = Parallel(n_jobs=n_jobs, verbose=verbose) return par, func
<SYSTEM_TASK:> Decorator to convert throws errors to Voluptuous format. <END_TASK> <USER_TASK:> Description: def _convert_errors(func): """ Decorator to convert throws errors to Voluptuous format."""
cast_Invalid = lambda e: Invalid( u"{message}, expected {expected}".format( message=e.message, expected=e.expected) if e.expected != u'-none-' else e.message, e.path, six.text_type(e)) @wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except good.SchemaError as e: raise SchemaError(six.text_type(e)) except good.MultipleInvalid as ee: raise MultipleInvalid([cast_Invalid(e) for e in ee]) except good.Invalid as e: # Since voluptuous throws MultipleInvalid almost always -- we follow the same pattern... raise MultipleInvalid([cast_Invalid(e)]) return wrapper
<SYSTEM_TASK:> When CompiledSchema compiles this marker, it sets informational values onto it. <END_TASK> <USER_TASK:> Description: def on_compiled(self, name=None, key_schema=None, value_schema=None, as_mapping_key=None): """ When CompiledSchema compiles this marker, it sets informational values onto it. Note that arguments may be provided in two incomplete sets, e.g. (name, key_schema, None) and then (None, None, value_schema). Thus, all assignments must be handled individually. It is possible that a marker may have no `value_schema` at all: e.g. in the case of { Extra: Reject } -- `Reject` will have no value schema, but `Extra` will have compiled `Reject` as the value. :param key_schema: Compiled key schema :type key_schema: CompiledSchema|None :param value_schema: Compiled value schema :type value_schema: CompiledSchema|None :param name: Human-friendly marker name :type name: unicode|None :param as_mapping_key: Whether it's used as a mapping key? :type as_mapping_key: bool|None :rtype: Marker """
if self.name is None: self.name = name if self.key_schema is None: self.key_schema = key_schema if self.value_schema is None: self.value_schema = value_schema if as_mapping_key: self.as_mapping_key = True return self
<SYSTEM_TASK:> Append a rainbow logging handler and a formatter to the root logger <END_TASK> <USER_TASK:> Description: def colorlogs(format="short"): """Append a rainbow logging handler and a formatter to the root logger"""
try: from rainbow_logging_handler import RainbowLoggingHandler import sys # setup `RainbowLoggingHandler` logger = logging.root # same as default if format == "short": fmt = "%(message)s " else: fmt = "[%(asctime)s] %(name)s %(funcName)s():%(lineno)d\t%(message)s [%(levelname)s]" formatter = logging.Formatter(fmt) handler = RainbowLoggingHandler(sys.stderr, color_funcName=('black', 'gray', True)) handler.setFormatter(formatter) logger.addHandler(handler) except ImportError: # rainbow logger not found, that's ok pass
<SYSTEM_TASK:> main bmi runner program <END_TASK> <USER_TASK:> Description: def main(): """main bmi runner program"""
arguments = docopt.docopt(__doc__, version=__version__) colorlogs() # Read input file file wrapper = BMIWrapper( engine=arguments['<engine>'], configfile=arguments['<config>'] or '' ) # add logger if required if not arguments['--disable-logger']: logging.root.setLevel(logging.DEBUG) wrapper.set_logger(logging.root) with wrapper as model: # if siginfo is supported by OS (BSD) def handler(signum, frame): """report progress information""" t_start = model.get_start_time() t_end = model.get_end_time() t_current = model.get_current_time() total = (t_end - t_start) now = (t_current - t_start) if total > 0: logging.info("progress: %s%%", 100.0 * now / total) else: logging.info("progress: unknown") if hasattr(signal, 'SIGINFO'): # attach a siginfo handler (CTRL-t) to print progress signal.signal(signal.SIGINFO, handler) if arguments['--info']: logging.info("%s", trace(model)) t_end = model.get_end_time() t = model.get_start_time() while t < t_end: model.update(-1) t = model.get_current_time() if arguments['--info']: logging.info("%s", trace(model))
<SYSTEM_TASK:> Advance game by single move, if possible. <END_TASK> <USER_TASK:> Description: def move(self): """ Advance game by single move, if possible. @return: logical indicator if move was performed. """
if len(self.moves) == MAX_MOVES: return False elif len(self.moves) % 2: active_engine = self.black_engine active_engine_name = self.black inactive_engine = self.white_engine inactive_engine_name = self.white else: active_engine = self.white_engine active_engine_name = self.white inactive_engine = self.black_engine inactive_engine_name = self.black active_engine.setposition(self.moves) movedict = active_engine.bestmove() bestmove = movedict.get('move') info = movedict.get('info') ponder = movedict.get('ponder') self.moves.append(bestmove) if info["score"]["eval"] == "mate": matenum = info["score"]["value"] if matenum > 0: self.winner_engine = active_engine self.winner = active_engine_name elif matenum < 0: self.winner_engine = inactive_engine self.winner = inactive_engine_name return False if ponder != '(none)': return True
<SYSTEM_TASK:> Get proposed best move for current position. <END_TASK> <USER_TASK:> Description: def bestmove(self): """ Get proposed best move for current position. @return: dictionary with 'move', 'ponder', 'info' containing best move's UCI notation, ponder value and info dictionary. """
self.go() last_info = "" while True: text = self.stdout.readline().strip() split_text = text.split(' ') print(text) if split_text[0] == "info": last_info = Engine._bestmove_get_info(text) if split_text[0] == "bestmove": ponder = None if len(split_text[0]) < 3 else split_text[3] return {'move': split_text[1], 'ponder': ponder, 'info': last_info}
<SYSTEM_TASK:> Parse stockfish evaluation output as dictionary. <END_TASK> <USER_TASK:> Description: def _bestmove_get_info(text): """ Parse stockfish evaluation output as dictionary. Examples of input: "info depth 2 seldepth 3 multipv 1 score cp -656 nodes 43 nps 43000 tbhits 0 \ time 1 pv g7g6 h3g3 g6f7" "info depth 10 seldepth 12 multipv 1 score mate 5 nodes 2378 nps 1189000 tbhits 0 \ time 2 pv h3g3 g6f7 g3c7 b5d7 d1d7 f7g6 c7g3 g6h5 e6f4" """
result_dict = Engine._get_info_pv(text) result_dict.update(Engine._get_info_score(text)) single_value_fields = ['depth', 'seldepth', 'multipv', 'nodes', 'nps', 'tbhits', 'time'] for field in single_value_fields: result_dict.update(Engine._get_info_singlevalue_subfield(text, field)) return result_dict
<SYSTEM_TASK:> Used to synchronize the python engine object with the back-end engine. Sends 'isready' and waits for 'readyok.' <END_TASK> <USER_TASK:> Description: def isready(self): """ Used to synchronize the python engine object with the back-end engine. Sends 'isready' and waits for 'readyok.' """
self.put('isready') while True: text = self.stdout.readline().strip() if text == 'readyok': return text
<SYSTEM_TASK:> Compute the metrics for the project activity section of the enriched <END_TASK> <USER_TASK:> Description: def project_activity(index, start, end): """Compute the metrics for the project activity section of the enriched github pull requests index. Returns a dictionary containing a "metric" key. This key contains the metrics for this section. :param index: index object :param start: start date to get the data from :param end: end date to get the data upto :return: dictionary with the value of the metrics """
results = { "metrics": [SubmittedPRs(index, start, end), ClosedPRs(index, start, end)] } return results
<SYSTEM_TASK:> Get the single valued aggregations with respect to the <END_TASK> <USER_TASK:> Description: def aggregations(self): """Get the single valued aggregations with respect to the previous time interval."""
prev_month_start = get_prev_month(self.end, self.query.interval_) self.query.since(prev_month_start) agg = super().aggregations() if agg is None: agg = 0 # None is because NaN in ES. Let's convert to 0 return agg
<SYSTEM_TASK:> Basic query to get the metric values <END_TASK> <USER_TASK:> Description: def get_query(self, evolutionary=False): """ Basic query to get the metric values :param evolutionary: if True the metric values time series is returned. If False the aggregated metric value. :return: the DSL query to be sent to Elasticsearch """
if not evolutionary: interval = None offset = None else: interval = self.interval offset = self.offset if not interval: raise RuntimeError("Evolutionary query without an interval.") query = ElasticQuery.get_agg(field=self.FIELD_COUNT, date_field=self.FIELD_DATE, start=self.start, end=self.end, filters=self.esfilters, agg_type=self.AGG_TYPE, interval=interval, offset=offset) logger.debug("Metric: '%s' (%s); Query: %s", self.name, self.id, query) return query
<SYSTEM_TASK:> Extract from a DSL aggregated response the values for each bucket <END_TASK> <USER_TASK:> Description: def get_list(self): """ Extract from a DSL aggregated response the values for each bucket :return: a list with the values in a DSL aggregated response """
field = self.FIELD_NAME query = ElasticQuery.get_agg(field=field, date_field=self.FIELD_DATE, start=self.start, end=self.end, filters=self.esfilters) logger.debug("Metric: '%s' (%s); Query: %s", self.name, self.id, query) res = self.get_metrics_data(query) list_ = {field: [], "value": []} for bucket in res['aggregations'][str(ElasticQuery.AGGREGATION_ID)]['buckets']: list_[field].append(bucket['key']) list_['value'].append(bucket['doc_count']) return list_
<SYSTEM_TASK:> Get the metrics data from Elasticsearch given a DSL query <END_TASK> <USER_TASK:> Description: def get_metrics_data(self, query): """ Get the metrics data from Elasticsearch given a DSL query :param query: query to be sent to Elasticsearch :return: a dict with the results of executing the query """
if self.es_url.startswith("http"): url = self.es_url else: url = 'http://' + self.es_url es = Elasticsearch(url) s = Search(using=es, index=self.es_index) s = s.update_from_dict(query) try: response = s.execute() return response.to_dict() except Exception as e: print() print("In get_metrics_data: Failed to fetch data.\n Query: {}, \n Error Info: {}" .format(query, e.info)) raise
<SYSTEM_TASK:> Returns a time series of a specific class <END_TASK> <USER_TASK:> Description: def get_ts(self): """ Returns a time series of a specific class A timeseries consists of a unixtime date, labels, some other fields and the data of the specific instantiated class metric per interval. This is built on a hash table. :return: a list with a time series with the values of the metric """
query = self.get_query(True) res = self.get_metrics_data(query) # Time to convert it to our grimoire timeseries format ts = {"date": [], "value": [], "unixtime": []} agg_id = ElasticQuery.AGGREGATION_ID if 'buckets' not in res['aggregations'][str(agg_id)]: raise RuntimeError("Aggregation results have no buckets in time series results.") for bucket in res['aggregations'][str(agg_id)]['buckets']: ts['date'].append(bucket['key_as_string']) if str(agg_id + 1) in bucket: # We have a subaggregation with the value # If it is percentiles we get the median if 'values' in bucket[str(agg_id + 1)]: val = bucket[str(agg_id + 1)]['values']['50.0'] if val == 'NaN': # ES returns NaN. Convert to None for matplotlib graph val = None ts['value'].append(val) else: ts['value'].append(bucket[str(agg_id + 1)]['value']) else: ts['value'].append(bucket['doc_count']) # unixtime comes in ms from ElasticSearch ts['unixtime'].append(bucket['key'] / 1000) return ts
<SYSTEM_TASK:> Returns the aggregated value for the metric <END_TASK> <USER_TASK:> Description: def get_agg(self): """ Returns the aggregated value for the metric :return: the value of the metric """
""" Returns an aggregated value """ query = self.get_query(False) res = self.get_metrics_data(query) # We need to extract the data from the JSON res # If we have agg data use it agg_id = str(ElasticQuery.AGGREGATION_ID) if 'aggregations' in res and 'values' in res['aggregations'][agg_id]: if self.AGG_TYPE == 'median': agg = res['aggregations'][agg_id]['values']["50.0"] if agg == 'NaN': # ES returns NaN. Convert to None for matplotlib graph agg = None else: raise RuntimeError("Multivalue aggregation result not supported") elif 'aggregations' in res and 'value' in res['aggregations'][agg_id]: agg = res['aggregations'][agg_id]['value'] else: agg = res['hits']['total'] return agg
<SYSTEM_TASK:> Get the trend for the last two metric values using the interval defined in the metric <END_TASK> <USER_TASK:> Description: def get_trend(self): """ Get the trend for the last two metric values using the interval defined in the metric :return: a tuple with the metric value for the last interval and the trend percentage between the last two intervals """
""" """ # TODO: We just need the last two periods, not the full ts ts = self.get_ts() last = ts['value'][len(ts['value']) - 1] prev = ts['value'][len(ts['value']) - 2] trend = last - prev trend_percentage = None if last == 0: if prev > 0: trend_percentage = -100 else: trend_percentage = 0 else: trend_percentage = int((trend / last) * 100) return (last, trend_percentage)
<SYSTEM_TASK:> Return the response format requested by client <END_TASK> <USER_TASK:> Description: def requestedFormat(request,acceptedFormat): """Return the response format requested by client Client could specify requested format using: (options are processed in this order) - `format` field in http request - `Accept` header in http request Example: chooseFormat(request, ['text/html','application/json']) Args: acceptedFormat: list containing all the accepted format Returns: string: the user requested mime-type (if supported) Raises: ValueError: if user request a mime-type not supported """
if 'format' in request.args: fieldFormat = request.args.get('format') if fieldFormat not in acceptedFormat: raise ValueError("requested format not supported: "+ fieldFormat) return fieldFormat else: return request.accept_mimetypes.best_match(acceptedFormat)
<SYSTEM_TASK:> Decorator utility to collect flask routes in a dictionary. <END_TASK> <USER_TASK:> Description: def routes_collector(gatherer): """Decorator utility to collect flask routes in a dictionary. This function together with :func:`add_routes` provides an easy way to split flask routes declaration in multiple modules. :param gatherer: dict in which will be collected routes The decorator provided by this function should be used as the `original flask decorator <http://flask.pocoo.org/docs/latest/api/#flask.Flask.route>`_ example:: routes = [] route = routes_collector(routes) @route('/volumes/', methods=['GET', 'POST']) def volumes(): return 'page body' After you've collected your routes you can use :func:`add_routes` to register them onto the main blueprint/flask_app. """
def hatFunc(rule, **options): def decorator(f): rule_dict = {'rule':rule, 'view_func':f} rule_dict.update(options) gatherer.append(rule_dict) return decorator return hatFunc
<SYSTEM_TASK:> Batch routes registering <END_TASK> <USER_TASK:> Description: def add_routes(fapp, routes, prefix=""): """Batch routes registering Register routes to a blueprint/flask_app previously collected with :func:`routes_collector`. :param fapp: bluprint or flask_app to whom attach new routes. :param routes: dict of routes collected by :func:`routes_collector` :param prefix: url prefix under which register all routes """
for r in routes: r['rule'] = prefix + r['rule'] fapp.add_url_rule(**r)
<SYSTEM_TASK:> Determine full-with-half-maximum of a peaked set of points, x and y. <END_TASK> <USER_TASK:> Description: def fwhm(x, y, k=10): # http://stackoverflow.com/questions/10582795/finding-the-full-width-half-maximum-of-a-peak """ Determine full-with-half-maximum of a peaked set of points, x and y. Assumes that there is only one peak present in the datasset. The function uses a spline interpolation of order k. """
class MultiplePeaks(Exception): pass class NoPeaksFound(Exception): pass half_max = np.amax(y) / 2.0 s = splrep(x, y - half_max) roots = sproot(s) if len(roots) > 2: raise MultiplePeaks("The dataset appears to have multiple peaks, and " "thus the FWHM can't be determined.") elif len(roots) < 2: raise NoPeaksFound("No proper peaks were found in the data set; likely " "the dataset is flat (e.g. all zeros).") else: return roots[0], roots[1]
<SYSTEM_TASK:> Main function of smatch score calculation <END_TASK> <USER_TASK:> Description: def main(arguments): """ Main function of smatch score calculation """
global verbose global veryVerbose global iteration_num global single_score global pr_flag global match_triple_dict # set the iteration number # total iteration number = restart number + 1 iteration_num = arguments.r + 1 if arguments.ms: single_score = False if arguments.v: verbose = True if arguments.vv: veryVerbose = True if arguments.pr: pr_flag = True # significant digits to print out floatdisplay = "%%.%df" % arguments.significant for (precision, recall, best_f_score) in score_amr_pairs(args.f[0], args.f[1], justinstance=arguments.justinstance, justattribute=arguments.justattribute, justrelation=arguments.justrelation): # print("Sentence", sent_num) if pr_flag: print("Precision: " + floatdisplay % precision) print("Recall: " + floatdisplay % recall) print("F-score: " + floatdisplay % best_f_score) args.f[0].close() args.f[1].close()
<SYSTEM_TASK:> Encode a mapping type. <END_TASK> <USER_TASK:> Description: def _encode_mapping(name, value, check_keys, opts): """Encode a mapping type."""
data = b"".join([_element_to_bson(key, val, check_keys, opts) for key, val in iteritems(value)]) return b"\x03" + name + _PACK_INT(len(data) + 5) + data + b"\x00"
<SYSTEM_TASK:> Convert simplified domain expression to regular expression <END_TASK> <USER_TASK:> Description: def simToReg(self, sim): """Convert simplified domain expression to regular expression"""
# remove initial slash if present res = re.sub('^/', '', sim) res = re.sub('/$', '', res) return '^/?' + re.sub('\*', '[^/]+', res) + '/?$'
<SYSTEM_TASK:> Check if the given `domain` and `act` are allowed <END_TASK> <USER_TASK:> Description: def match(self, dom, act): """ Check if the given `domain` and `act` are allowed by this capability """
return self.match_domain(dom) and self.match_action(act)