docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Create a `showwarning` function that uses the given logger. Arguments: logger (~logging.Logger): the logger to use. Returns: function: a function that can be used as the `warnings.showwarning` callback.
def warn_logging(logger): # type: (logging.Logger) -> Callable def showwarning(message, category, filename, lineno, file=None, line=None): logger.warning(message) return showwarning
299,787
Have the function patch `warnings.showwarning` with the given logger. Arguments: logger (~logging.logger): the logger to wrap warnings with when the decorated function is called. Returns: `function`: a decorator function.
def wrap_warnings(logger): def decorator(func): @functools.wraps(func) def new_func(*args, **kwargs): showwarning = warnings.showwarning warnings.showwarning = warn_logging(logger) try: return func(*args, **kwargs) finally: warnings.showwarning = showwarning return new_func return decorator
299,788
Run from the command line interface. Arguments: argv (list): The positional arguments to read. Defaults to `sys.argv` to use CLI arguments. stream (~io.IOBase): A file where to write error messages. Leave to `None` to use the `~coloredlogs.StandardErrorHandler` for logs, and `sys.stderr` for error messages. Returns: int: An error code, or 0 if the program executed successfully.
def main(argv=None, stream=None): _print = functools.partial(print, file=stream or sys.stderr) # Parse command line arguments try: args = docopt.docopt( HELP, argv, version='instalooter {}'.format(__version__)) except docopt.DocoptExit as de: _print(de) return 1 # Print usage and exit if required (docopt does not do this !) if args['--usage']: _print(USAGE) return 0 # Set the loggers up with the requested logging level level = "ERROR" if args['--quiet'] else args.get("--loglevel", "INFO") for logger_ in (logger, login_logger, batch_logger): coloredlogs.install( level=int(level) if level.isdigit() else level, stream=stream, logger=logger_) # Check the requested logging level if args['-W'] not in WARNING_ACTIONS: _print("Unknown warning action:", args['-W']) _print(" available actions:", ', '.join(WARNING_ACTIONS)) return 1 with warnings.catch_warnings(): warnings.simplefilter(args['-W']) try: # Run in batch mode if args['batch']: # Load the batch configuration from the given file with open(args['<batch_file>']) as batch_file: batch_runner = BatchRunner(batch_file, args) # Run the batch batch_runner.run_all() return 0 # Login if requested if args['login']: try: if not args['--username']: args['--username'] = six.moves.input('Username: ') login(args) return 0 except ValueError as ve: logger.error("%s", ve) if args["--traceback"]: traceback.print_exc() return 1 # Logout if requested if args['logout']: if InstaLooter._cachefs.exists(InstaLooter._COOKIE_FILE): InstaLooter._logout() logger.success('Logged out.') else: warnings.warn('Cookie file not found.') return 0 # Normal download mode: if args['user']: looter_cls = ProfileLooter target = args['<profile>'] elif args['hashtag']: looter_cls = HashtagLooter target = args['<hashtag>'] elif args['post']: looter_cls = PostLooter target = args['<post_token>'] else: raise NotImplementedError("TODO") # Instantiate the looter looter = looter_cls( target, add_metadata=args['--add-metadata'], get_videos=args['--get-videos'], videos_only=args['--videos-only'], jobs=int(args['--jobs']) if args['--jobs'] is not None else 16, template=args['--template'], dump_json=args['--dump-json'], dump_only=args['--dump-only'], extended_dump=args['--extended-dump'] ) # Attempt to login and extract the timeframe if args['--username']: login(args) if args['--num-to-dl']: args['--num-to-dl'] = int(args['--num-to-dl']) try: if args['--time'] is not None: args['--time'] = get_times_from_cli(args['--time']) except ValueError as ve: _print("invalid format for --time parameter:", args["--time"]) _print(" (format is [D]:[D] where D is an ISO 8601 date)") return 1 logger.debug("Opening destination filesystem") dest_url = args.get('<directory>') or os.getcwd() dest_fs = fs.open_fs(dest_url, create=True) logger.notice("Starting download of `%s`", target) n = looter.download( destination=dest_fs, media_count=args['--num-to-dl'], timeframe=args['--time'], new_only=args['--new'], pgpbar_cls=None if args['--quiet'] else TqdmProgressBar, dlpbar_cls=None if args['--quiet'] else TqdmProgressBar) if n > 1: logger.success("Downloaded %i posts.", n) elif n == 1: logger.success("Downloaded %i post.", n) except (Exception, KeyboardInterrupt) as e: from .threadutils import threads_force_join, threads_count # Show error traceback if any if not isinstance(e, KeyboardInterrupt): logger.critical("%s", e) if args["--traceback"]: traceback.print_exc() else: logger.critical("Interrupted") # Close remaining threads spawned by InstaLooter.download count = threads_count() if count: logger.notice("Terminating %i remaining workers...", count) threads_force_join() # Return the error number if any errno = e.errno if hasattr(e, "errno") else None return errno if errno is not None else 1 else: return 0 finally: logger.debug("Closing destination filesystem") try: dest_fs.close() except Exception: pass
299,791
Return iterable containing columns for the given array X. Args: X: `numpy.ndarray` or `pandas.DataFrame`. Returns: iterable: columns for the given matrix.
def get_column_names(self, X): if isinstance(X, pd.DataFrame): return X.columns return range(X.shape[1])
300,849
Return a column of the given matrix. Args: X: `numpy.ndarray` or `pandas.DataFrame`. column: `int` or `str`. Returns: np.ndarray: Selected column.
def get_column(self, X, column): if isinstance(X, pd.DataFrame): return X[column].values return X[:, column]
300,850
Sets a column on the matrix X with the given value. Args: X: `numpy.ndarray` or `pandas.DataFrame`. column: `int` or `str`. value: `np.ndarray` with shape (1,) Returns: `np.ndarray` or `pandas.DataFrame` with the inserted column.
def set_column(self, X, column, value): if isinstance(X, pd.DataFrame): X.loc[:, column] = value else: X[:, column] = value return X
300,851
Compute covariance matrix with transformed data. Args: X: `numpy.ndarray` or `pandas.DataFrame`. Returns: np.ndarray
def _get_covariance(self, X): result = pd.DataFrame(index=range(len(X))) column_names = self.get_column_names(X) for column_name in column_names: column = self.get_column(X, column_name) distrib = self.distribs[column_name] # get original distrib's cdf of the column cdf = distrib.cumulative_distribution(column) if distrib.constant_value is not None: # This is to avoid np.inf in the case the column is constant. cdf = np.ones(column.shape) - EPSILON # get inverse cdf using standard normal result = self.set_column(result, column_name, stats.norm.ppf(cdf)) # remove any rows that have infinite values result = result[(result != np.inf).all(axis=1)] return pd.DataFrame(data=result).cov().values
300,852
Compute the distribution for each variable and then its covariance matrix. Args: X(numpy.ndarray or pandas.DataFrame): Data to model. Returns: None
def fit(self, X): LOGGER.debug('Fitting Gaussian Copula') column_names = self.get_column_names(X) distribution_class = import_object(self.distribution) for column_name in column_names: self.distribs[column_name] = distribution_class() column = self.get_column(X, column_name) self.distribs[column_name].fit(column) self.covariance = self._get_covariance(X) self.fitted = True
300,853
Compute probability density function for given copula family. Args: X: `numpy.ndarray` or `pandas.DataFrame` Returns: np.array: Probability density for the input values.
def probability_density(self, X): self.check_fit() # make cov positive semi-definite covariance = self.covariance * np.identity(self.covariance.shape[0]) return stats.multivariate_normal.pdf(X, cov=covariance)
300,854
Computes the cumulative distribution function for the copula Args: X: `numpy.ndarray` or `pandas.DataFrame` Returns: np.array: cumulative probability
def cumulative_distribution(self, X): self.check_fit() # Wrapper for pdf to accept vector as args def func(*args): return self.probability_density(list(args)) # Lower bound for integral, to split significant part from tail lower_bound = self.get_lower_bound() ranges = [[lower_bound, val] for val in X] return integrate.nquad(func, ranges)[0]
300,855
Creates sintentic values stadistically similar to the original dataset. Args: num_rows: `int` amount of samples to generate. Returns: np.ndarray: Sampled data.
def sample(self, num_rows=1): self.check_fit() res = {} means = np.zeros(self.covariance.shape[0]) size = (num_rows,) clean_cov = np.nan_to_num(self.covariance) samples = np.random.multivariate_normal(means, clean_cov, size=size) for i, (label, distrib) in enumerate(self.distribs.items()): cdf = stats.norm.cdf(samples[:, i]) res[label] = distrib.percent_point(cdf) return pd.DataFrame(data=res)
300,856
Fit the model. Arguments: X: `np.ndarray` of shape (n, 1). Returns: None
def fit(self, X): if isinstance(X, (pd.Series, pd.DataFrame)): self.name = X.name self.constant_value = self._get_constant_value(X) if self.constant_value is None: self.mean = np.mean(X) self.std = np.std(X) else: self._replace_constant_methods() self.fitted = True
300,861
Compute probability density. Arguments: X: `np.ndarray` of shape (n, 1). Returns: np.ndarray
def probability_density(self, X): self.check_fit() return norm.pdf(X, loc=self.mean, scale=self.std)
300,862
Cumulative distribution function for gaussian distribution. Arguments: X: `np.ndarray` of shape (n, 1). Returns: np.ndarray: Cumulative density for X.
def cumulative_distribution(self, X): self.check_fit() return norm.cdf(X, loc=self.mean, scale=self.std)
300,863
Given a cumulated distribution value, returns a value in original space. Arguments: U: `np.ndarray` of shape (n, 1) and values in [0,1] Returns: `np.ndarray`: Estimated values in original space.
def percent_point(self, U): self.check_fit() return norm.ppf(U, loc=self.mean, scale=self.std)
300,864
Returns new data point based on model. Arguments: n_samples: `int` Returns: np.ndarray: Generated samples
def sample(self, num_samples=1): self.check_fit() return np.random.normal(self.mean, self.std, num_samples)
300,865
Fit a model to the data updating the parameters. Args: X: `np.ndarray` of shape (,2). Return: None
def fit(self, X): U, V = self.split_matrix(X) self.tau = stats.kendalltau(U, V)[0] self.theta = self.compute_theta() self.check_theta()
300,871
Create a new instance from the given parameters. Args: copula_dict: `dict` with the parameters to replicate the copula. Like the output of `Bivariate.to_dict` Returns: Bivariate: Instance of the copula defined on the parameters.
def from_dict(cls, copula_dict): instance = cls(copula_dict['copula_type']) instance.theta = copula_dict['theta'] instance.tau = copula_dict['tau'] return instance
300,872
Generate specified `n_samples` of new data from model. `v~U[0,1],v~C^-1(u|v)` Args: n_samples: `int`, amount of samples to create. Returns: np.ndarray: Array of length `n_samples` with generated data from the model.
def sample(self, n_samples): if self.tau > 1 or self.tau < -1: raise ValueError("The range for correlation measure is [-1,1].") v = np.random.uniform(0, 1, n_samples) c = np.random.uniform(0, 1, n_samples) u = self.percent_point(c, v) return np.column_stack((u, v))
300,874
Select best copula function based on likelihood. Args: X: 2-dimensional `np.ndarray` Returns: tuple: `tuple(CopulaType, float)` best fit and model param.
def select_copula(cls, X): frank = Bivariate(CopulaTypes.FRANK) frank.fit(X) if frank.tau <= 0: selected_theta = frank.theta selected_copula = CopulaTypes.FRANK return selected_copula, selected_theta copula_candidates = [frank] theta_candidates = [frank.theta] try: clayton = Bivariate(CopulaTypes.CLAYTON) clayton.fit(X) copula_candidates.append(clayton) theta_candidates.append(clayton.theta) except ValueError: # Invalid theta, copula ignored pass try: gumbel = Bivariate(CopulaTypes.GUMBEL) gumbel.fit(X) copula_candidates.append(gumbel) theta_candidates.append(gumbel.theta) except ValueError: # Invalid theta, copula ignored pass z_left, L, z_right, R = cls.compute_empirical(X) left_dependence, right_dependence = cls.get_dependencies( copula_candidates, z_left, z_right) # compute L2 distance from empirical distribution cost_L = [np.sum((L - l) ** 2) for l in left_dependence] cost_R = [np.sum((R - r) ** 2) for r in right_dependence] cost_LR = np.add(cost_L, cost_R) selected_copula = np.argmax(cost_LR) selected_theta = theta_candidates[selected_copula] return CopulaTypes(selected_copula), selected_theta
300,880
Create a new instance from a file. Args: copula_path: `str` file with the serialized copula. Returns: Bivariate: Instance with the parameters stored in the file.
def load(cls, copula_path): with open(copula_path) as f: copula_dict = json.load(f) return cls.from_dict(copula_dict)
300,881
Allow methods that only accepts 1-d vectors to work with scalars. Args: function(callable): Function that accepts and returns vectors. Returns: callable: Decorated function that accepts and returns scalars.
def scalarize(function): def decorated(self, X, *args, **kwargs): scalar = not isinstance(X, np.ndarray) if scalar: X = np.array([X]) result = function(self, X, *args, **kwargs) if scalar: result = result[0] return result decorated.__doc__ = function.__doc__ return decorated
300,886
Raises an exception if the given values are not supported. Args: function(callable): Method whose unique argument is a numpy.array-like object. Returns: callable: Decorated function Raises: ValueError: If there are missing or invalid values or if the dataset is empty.
def check_valid_values(function): def decorated(self, X, *args, **kwargs): if isinstance(X, pd.DataFrame): W = X.values else: W = X if not len(W): raise ValueError('Your dataset is empty.') if W.dtype not in [np.dtype('float64'), np.dtype('int64')]: raise ValueError('There are non-numerical values in your data.') if np.isnan(W).any().any(): raise ValueError('There are nan values in your data.') return function(self, X, *args, **kwargs) return decorated
300,887
Compute density function for given copula family. Args: X: `np.ndarray` Returns: np.array: probability density
def probability_density(self, X): self.check_fit() U, V = self.split_matrix(X) if self.theta == 0: return np.multiply(U, V) else: num = np.multiply(np.multiply(-self.theta, self._g(1)), 1 + self._g(np.add(U, V))) aux = np.multiply(self._g(U), self._g(V)) + self._g(1) den = np.power(aux, 2) return num / den
300,895
Computes the cumulative distribution function for the copula, :math:`C(u, v)` Args: X: `np.ndarray` Returns: np.array: cumulative distribution
def cumulative_distribution(self, X): self.check_fit() U, V = self.split_matrix(X) num = np.multiply( np.exp(np.multiply(-self.theta, U)) - 1, np.exp(np.multiply(-self.theta, V)) - 1 ) den = np.exp(-self.theta) - 1 return -1.0 / self.theta * np.log(1 + num / den)
300,896
Compute the inverse of conditional cumulative distribution :math:`C(u|v)^-1` Args: y: `np.ndarray` value of :math:`C(u|v)`. v: `np.ndarray` given value of v.
def percent_point(self, y, V): self.check_fit() if self.theta < 0: return V else: result = [] for _y, _V in zip(y, V): minimum = fminbound(self.partial_derivative_scalar, EPSILON, 1.0, args=(_y, _V)) if isinstance(minimum, np.ndarray): minimum = minimum[0] result.append(minimum) return np.array(result)
300,897
Compute partial derivative :math:`C(u|v)` of cumulative distribution. Args: X: `np.ndarray` y: `float` Returns: np.ndarray
def partial_derivative(self, X, y=0): self.check_fit() U, V = self.split_matrix(X) if self.theta == 0: return V else: num = np.multiply(self._g(U), self._g(V)) + self._g(U) den = np.multiply(self._g(U), self._g(V)) + self._g(1) return (num / den) - y
300,898
Cumulative distribution for the degenerate case of constant distribution. Note that the output of this method will be an array whose unique values are 0 and 1. More information can be found here: https://en.wikipedia.org/wiki/Degenerate_distribution Args: X (numpy.ndarray): Values to compute cdf to. Returns: numpy.ndarray: Cumulative distribution for the given values.
def _constant_cumulative_distribution(self, X): result = np.ones(X.shape) result[np.nonzero(X < self.constant_value)] = 0 return result
300,902
Probability density for the degenerate case of constant distribution. Note that the output of this method will be an array whose unique values are 0 and 1. More information can be found here: https://en.wikipedia.org/wiki/Degenerate_distribution Args: X(numpy.ndarray): Values to compute pdf. Returns: numpy.ndarray: Probability densisty for the given values
def _constant_probability_density(self, X): result = np.zeros(X.shape) result[np.nonzero(X == self.constant_value)] = 1 return result
300,903
Fit scipy model to an array of values. Args: X(`np.ndarray` or `pd.DataFrame`): Datapoints to be estimated from. Must be 1-d Returns: None
def fit(self, X, *args, **kwargs): self.constant_value = self._get_constant_value(X) if self.constant_value is None: if self.unfittable_model: self.model = getattr(scipy.stats, self.model_class)(*args, **kwargs) else: self.model = getattr(scipy.stats, self.model_class)(X, *args, **kwargs) for name in self.METHOD_NAMES: attribute = getattr(self.__class__, name) if isinstance(attribute, str): setattr(self, name, getattr(self.model, attribute)) elif attribute is None: setattr(self, name, missing_method_scipy_wrapper(lambda x: x)) else: self._replace_constant_methods() self.fitted = True
300,905
Computes the cumulative distribution function for the copula, :math:`C(u, v)` Args: X: `np.ndarray` Returns: np.array: cumulative probability
def cumulative_distribution(self, X): self.check_fit() U, V = self.split_matrix(X) if self.theta == 1: return np.multiply(U, V) else: h = np.power(-np.log(U), self.theta) + np.power(-np.log(V), self.theta) h = -np.power(h, 1.0 / self.theta) cdfs = np.exp(h) return cdfs
300,908
Compute partial derivative :math:`C(u|v)` of cumulative density. Args: X: `np.ndarray` y: `float` Returns:
def partial_derivative(self, X, y=0): self.check_fit() U, V = self.split_matrix(X) if self.theta == 1: return V else: t1 = np.power(-np.log(U), self.theta) t2 = np.power(-np.log(V), self.theta) p1 = self.cumulative_distribution(X) p2 = np.power(t1 + t2, -1 + 1.0 / self.theta) p3 = np.power(-np.log(V), self.theta - 1) return np.divide(np.multiply(np.multiply(p1, p2), p3), V) - y
300,909
Compute probability density function for given copula family. Args: X: `np.ndarray` Returns: np.array: Probability density for the input values.
def probability_density(self, X): self.check_fit() U, V = self.split_matrix(X) a = (self.theta + 1) * np.power(np.multiply(U, V), -(self.theta + 1)) b = np.power(U, -self.theta) + np.power(V, -self.theta) - 1 c = -(2 * self.theta + 1) / self.theta return a * np.power(b, c)
300,912
Computes the cumulative distribution function for the copula, :math:`C(u, v)` Args: X: `np.ndarray` Returns: np.array: cumulative probability
def cumulative_distribution(self, X): self.check_fit() U, V = self.split_matrix(X) if (V == 0).all() or (U == 0).all(): return np.zeros(V.shape[0]) else: cdfs = [ np.power( np.power(U[i], -self.theta) + np.power(V[i], -self.theta) - 1, -1.0 / self.theta ) if (U[i] > 0 and V[i] > 0) else 0 for i in range(len(U)) ] return np.array([max(x, 0) for x in cdfs])
300,913
Compute the inverse of conditional cumulative distribution :math:`C(u|v)^-1` Args: y: `np.ndarray` value of :math:`C(u|v)`. v: `np.ndarray` given value of v.
def percent_point(self, y, V): self.check_fit() if self.theta < 0: return V else: a = np.power(y, self.theta / (-1 - self.theta)) b = np.power(V, self.theta) u = np.power((a + b - 1) / b, -1 / self.theta) return u
300,914
Compute partial derivative :math:`C(u|v)` of cumulative distribution. Args: X: `np.ndarray` y: `float` Returns: np.ndarray: Derivatives
def partial_derivative(self, X, y=0): self.check_fit() U, V = self.split_matrix(X) if self.theta == 0: return V else: A = np.power(V, -self.theta - 1) B = np.power(V, -self.theta) + np.power(U, -self.theta) - 1 h = np.power(B, (-1 - self.theta) / self.theta) return np.multiply(A, h) - y
300,915
Set attributes with provided values. Args: parameters(dict): Dictionary containing instance parameters. Returns: Truncnorm: Instance populated with given parameters.
def from_dict(cls, parameters): instance = cls() instance.fitted = parameters['fitted'] instance.constant_value = parameters['constant_value'] if instance.fitted and instance.constant_value is None: instance.model = scipy.stats.truncnorm(parameters['a'], parameters['b']) return instance
300,918
Instantiate a vine copula class. Args: :param vine_type: type of the vine copula, could be 'center','direct','regular' :type vine_type: string
def __init__(self, vine_type, *args, **kwargs): super().__init__(*args, **kwargs) self.vine_type = vine_type self.u_matrix = None self.model = GaussianKDE
300,919
Fit a vine model to the data. Args: X(numpy.ndarray): data to be fitted. truncated(int): max level to build the vine.
def fit(self, X, truncated=3): self.n_sample, self.n_var = X.shape self.columns = X.columns self.tau_mat = X.corr(method='kendall').values self.u_matrix = np.empty([self.n_sample, self.n_var]) self.truncated = truncated self.depth = self.n_var - 1 self.trees = [] self.unis, self.ppfs = [], [] for i, col in enumerate(X): uni = self.model() uni.fit(X[col]) self.u_matrix[:, i] = uni.cumulative_distribution(X[col]) self.unis.append(uni) self.ppfs.append(uni.percent_point) self.train_vine(self.vine_type) self.fitted = True
300,923
Sample new rows. Args: num_rows(int): Number of rows to sample Returns: pandas.DataFrame
def sample(self, num_rows): sampled_values = [] for i in range(num_rows): sampled_values.append(self._sample_row()) return pd.DataFrame(sampled_values, columns=self.columns)
300,927
Fits tree object. Args: :param index: index of the tree :param n_nodes: number of nodes in the tree :tau_matrix: kendall's tau matrix of the data :previous_tree: tree object of previous level :type index: int :type n_nodes: int :type tau_matrix: np.ndarray of size n_nodes*n_nodes
def fit(self, index, n_nodes, tau_matrix, previous_tree, edges=None): self.level = index + 1 self.n_nodes = n_nodes self.tau_matrix = tau_matrix self.previous_tree = previous_tree self.edges = edges or [] if not self.edges: if self.level == 1: self.u_matrix = previous_tree self._build_first_tree() else: self._build_kth_tree() self.prepare_next_tree() self.fitted = True
300,929
Check if two edges satisfy vine constraint. Args: :param edge1: edge object representing edge1 :param edge2: edge object representing edge2 :type edge1: Edge object :type edge2: Edge object Returns: Boolean True if the two edges satisfy vine constraints
def _check_contraint(self, edge1, edge2): full_node = set([edge1.L, edge1.R, edge2.L, edge2.R]) full_node.update(edge1.D) full_node.update(edge2.D) return len(full_node) == (self.level + 1)
300,930
Sort tau matrix by dependece with variable y. Args: :param y: index of variable of intrest :type y: int
def _sort_tau_by_y(self, y): # first column is the variable of interest tau_y = self.tau_matrix[:, y] tau_y[y] = np.NaN temp = np.empty([self.n_nodes, 3]) temp[:, 0] = np.arange(self.n_nodes) temp[:, 1] = tau_y temp[:, 2] = abs(tau_y) temp[np.isnan(temp)] = -10 tau_sorted = temp[temp[:, 2].argsort()[::-1]] return tau_sorted
300,932
Compute likelihood of the tree given an U matrix. Args: uni_matrix(numpy.array): univariate matrix to evaluate likelihood on. Returns: tuple[float, numpy.array]: likelihood of the current tree, next level conditional univariate matrix
def get_likelihood(self, uni_matrix): uni_dim = uni_matrix.shape[1] num_edge = len(self.edges) values = np.zeros([1, num_edge]) new_uni_matrix = np.empty([uni_dim, uni_dim]) for i in range(num_edge): edge = self.edges[i] value, left_u, right_u = edge.get_likelihood(uni_matrix) new_uni_matrix[edge.L, edge.R] = left_u new_uni_matrix[edge.R, edge.L] = right_u values[0, i] = np.log(value) return np.sum(values), new_uni_matrix
300,936
Initialize an Edge object. Args: :param left: left_node index (smaller) :param right: right_node index (larger) :param copula_name: name of the fitted copula class :param copula_theta: parameters of the fitted copula class
def __init__(self, index, left, right, copula_name, copula_theta): self.index = index self.L = left self.R = right self.D = set() # dependence_set self.parents = None self.neighbors = [] self.name = copula_name self.theta = copula_theta self.tau = None self.U = None self.likelihood = None
300,948
Find nodes connecting adjacent edges. Args: first(Edge): Edge object representing the first edge. second(Edge): Edge object representing the second edge. Returns: tuple[int, int, set[int]]: The first two values represent left and right node indicies of the new edge. The third value is the new dependence set.
def _identify_eds_ing(first, second): A = set([first.L, first.R]) A.update(first.D) B = set([second.L, second.R]) B.update(second.D) depend_set = A & B left, right = sorted(list(A ^ B)) return left, right, depend_set
300,949
Check if two edges are adjacent. Args: :param another_edge: edge object of another edge :type another_edge: edge object This function will return true if the two edges are adjacent.
def is_adjacent(self, another_edge): return ( self.L == another_edge.L or self.L == another_edge.R or self.R == another_edge.L or self.R == another_edge.R )
300,950
Sort iterable of edges first by left node indices then right. Args: edges(list[Edge]): List of edges to be sorted. Returns: list[Edge]: Sorted list by left and right node indices.
def sort_edge(edges): return sorted(edges, key=lambda x: (x.L, x.R))
300,951
Identify pair univariate value from parents. Args: left_parent(Edge): left parent right_parent(Edge): right parent Returns: tuple[np.ndarray, np.ndarray]: left and right parents univariate.
def get_conditional_uni(cls, left_parent, right_parent): left, right, _ = cls._identify_eds_ing(left_parent, right_parent) left_u = left_parent.U[0] if left_parent.L == left else left_parent.U[1] right_u = right_parent.U[0] if right_parent.L == right else right_parent.U[1] return left_u, right_u
300,952
Compute likelihood given a U matrix. Args: uni_matrix(numpy.array): Matrix to compute the likelihood. Return: tuple(np.ndarray, np.ndarray, np.array): likelihood and conditional values.
def get_likelihood(self, uni_matrix): if self.parents is None: left_u = uni_matrix[:, self.L] right_u = uni_matrix[:, self.R] else: left_ing = list(self.D - self.parents[0].D)[0] right_ing = list(self.D - self.parents[1].D)[0] left_u = uni_matrix[self.L, left_ing] right_u = uni_matrix[self.R, right_ing] copula = Bivariate(self.name) copula.theta = self.theta X_left_right = np.array([[left_u, right_u]]) X_right_left = np.array([[right_u, left_u]]) value = np.sum(copula.probability_density(X_left_right)) left_given_right = copula.partial_derivative(X_left_right) right_given_left = copula.partial_derivative(X_right_left) return value, left_given_right, right_given_left
300,954
Fit Kernel density estimation to an list of values. Args: X: 1-d `np.ndarray` or `pd.Series` or `list` datapoints to be estimated from. This function will fit a gaussian_kde model to a list of datapoints and store it as a class attribute.
def fit(self, X): self.constant_value = self._get_constant_value(X) if self.constant_value is None: self.model = scipy.stats.gaussian_kde(X) else: self._replace_constant_methods() self.fitted = True
300,957
Evaluate the estimated pdf on a point. Args: X: `float` a datapoint. :type X: float Returns: pdf: int or float with the value of estimated pdf
def probability_density(self, X): self.check_fit() if type(X) not in (int, float): raise ValueError('x must be int or float') return self.model.evaluate(X)[0]
300,958
Computes the integral of a 1-D pdf between two bounds Args: X(float): a datapoint. U(float): cdf value in [0,1], only used in get_ppf Returns: float: estimated cumulative distribution.
def cumulative_distribution(self, X, U=0): self.check_fit() low_bounds = self.model.dataset.mean() - (5 * self.model.dataset.std()) return self.model.integrate_box_1d(low_bounds, X) - U
300,959
Given a cdf value, returns a value in original space. Args: U: `int` or `float` cdf value in [0,1] Returns: float: value in original space
def percent_point(self, U): self.check_fit() if not 0 < U < 1: raise ValueError('cdf value must be in [0,1]') return scipy.optimize.brentq(self.cumulative_distribution, -1000.0, 1000.0, args=(U))
300,960
Computes the integral of a 1-D pdf between two bounds Args: X(numpy.array): Shaped (1, n), containing the datapoints. Returns: numpy.array: estimated cumulative distribution.
def cumulative_distribution(self, X): self.check_fit() low_bounds = self.model.dataset.mean() - (5 * self.model.dataset.std()) result = [] for value in X: result.append(self.model.integrate_box_1d(low_bounds, value)) return np.array(result)
300,963
Given a cdf value, returns a value in original space. Args: U(numpy.array): cdf values in [0,1] Returns: numpy.array: value in original space
def percent_point(self, U): self.check_fit() return scipy.optimize.brentq(self._brentq_cdf(U), -1000.0, 1000.0)
300,965
provides py3 compatibility by converting byte based file stream to string based file stream Arguments: fbuffer: file like objects containing bytes Returns: string buffer
def byte_adaptor(fbuffer): if six.PY3: strings = fbuffer.read().decode('latin-1') fbuffer = six.StringIO(strings) return fbuffer else: return fbuffer
304,820
convert javascript objects like true, none, NaN etc. to quoted word. Arguments: buffer: string to be converted Returns: string after conversion
def js_adaptor(buffer): buffer = re.sub('true', 'True', buffer) buffer = re.sub('false', 'False', buffer) buffer = re.sub('none', 'None', buffer) buffer = re.sub('NaN', '"NaN"', buffer) return buffer
304,821
get list of indices and codes params: as_json: True | False returns: a list | json of index codes
def get_index_list(self, as_json=False): url = self.index_url req = Request(url, None, self.headers) # raises URLError or HTTPError resp = self.opener.open(req) resp = byte_adaptor(resp) resp_list = json.load(resp)['data'] index_list = [str(item['name']) for item in resp_list] return self.render_response(index_list, as_json)
304,829
Sets an option Parameters: - key - value
def setOption(self, key, value): self.send_setOption(key, value) self.recv_setOption()
305,443
run a query synchronously and return a handle (QueryHandle). Parameters: - query - clientCtx
def executeAndWait(self, query, clientCtx): self.send_executeAndWait(query, clientCtx) return self.recv_executeAndWait()
305,495
Get the results of a query. This is non-blocking. Caller should check Results.ready to determine if the results are in yet. The call requests the batch size of fetch. Parameters: - query_id - start_over - fetch_size
def fetch(self, query_id, start_over, fetch_size): self.send_fetch(query_id, start_over, fetch_size) return self.recv_fetch()
305,498
Prints a table of artifact definitions. Args: src_dict (dict[str, ArtifactDefinition]): artifact definitions by name.
def _PrintDictAsTable(self, src_dict): key_list = list(src_dict.keys()) key_list.sort() print('|', end='') for key in key_list: print(' {0:s} |'.format(key), end='') print('') print('|', end='') for key in key_list: print(' :---: |', end='') print('') print('|', end='') for key in key_list: print(' {0!s} |'.format(src_dict[key]), end='') print('\n')
307,069
Initializes a source type. Args: names (Optional[str]): artifact definition names. Raises: FormatError: when artifact names is not set.
def __init__(self, names=None): if not names: raise errors.FormatError('Missing names value.') super(ArtifactGroupSourceType, self).__init__() self.names = names
307,074
Initializes a source type. Args: args (list[str]): arguments to the command to run. cmd (str): command to run. Raises: FormatError: when args or cmd is not set.
def __init__(self, args=None, cmd=None): if args is None or cmd is None: raise errors.FormatError('Missing args or cmd value.') super(CommandSourceType, self).__init__() self.args = args self.cmd = cmd
307,075
Initializes a source type. Args: paths (Optional[str]): paths relative to the root of the file system. separator (Optional[str]): path segment separator. Raises: FormatError: when paths is not set.
def __init__(self, paths=None, separator='/'): if not paths: raise errors.FormatError('Missing directory value.') super(DirectorySourceType, self).__init__() self.paths = paths self.separator = separator
307,076
Initializes a source type. Args: paths (Optional[str]): paths relative to the root of the file system. separator (Optional[str]): path segment separator. Raises: FormatError: when paths is not set.
def __init__(self, paths=None, separator='/'): if not paths: raise errors.FormatError('Missing paths value.') super(FileSourceType, self).__init__() self.paths = paths self.separator = separator
307,078
Initializes a source type. Args: paths (Optional[str]): paths relative to the root of the file system. separator (Optional[str]): path segment separator. Raises: FormatError: when paths is not set.
def __init__(self, paths=None, separator='/'): if not paths: raise errors.FormatError('Missing paths value.') super(PathSourceType, self).__init__() self.paths = paths self.separator = separator
307,079
Initializes a source type. Args: keys (Optional[list[str]]): key paths relative to the root of the Windows Registry. Raises: FormatError: when keys is not set.
def __init__(self, keys=None): if not keys: raise errors.FormatError('Missing keys value.') if not isinstance(keys, list): raise errors.FormatError('keys must be a list') for key in keys: self.ValidateKey(key) super(WindowsRegistryKeySourceType, self).__init__() self.keys = keys
307,080
Validates this key against supported key names. Args: key_path (str): path of a Windows Registry key. Raises: FormatError: when key is not supported.
def ValidateKey(cls, key_path): for prefix in cls.VALID_PREFIXES: if key_path.startswith(prefix): return # TODO: move check to validator. if key_path.startswith('HKEY_CURRENT_USER\\'): raise errors.FormatError( 'HKEY_CURRENT_USER\\ is not supported instead use: ' 'HKEY_USERS\\%%users.sid%%\\') raise errors.FormatError( 'Unupported Registry key path: {0:s}'.format(key_path))
307,081
Initializes a source type. Args: key_value_pairs (Optional[list[tuple[str, str]]]): key path and value name pairs, where key paths are relative to the root of the Windows Registry. Raises: FormatError: when key value pairs is not set.
def __init__(self, key_value_pairs=None): if not key_value_pairs: raise errors.FormatError('Missing key value pairs value.') if not isinstance(key_value_pairs, list): raise errors.FormatError('key_value_pairs must be a list') for pair in key_value_pairs: if not isinstance(pair, dict): raise errors.FormatError('key_value_pair must be a dict') if set(pair.keys()) != set(['key', 'value']): key_value_pairs = ', '.join([ '{0:s}: {1:s}'.format(key, value) for key, value in key_value_pairs ]) error_message = ( 'key_value_pair missing "key" and "value" keys, got: ' '{0:s}').format(key_value_pairs) raise errors.FormatError(error_message) WindowsRegistryKeySourceType.ValidateKey(pair['key']) super(WindowsRegistryValueSourceType, self).__init__() self.key_value_pairs = key_value_pairs
307,082
Initializes a source type. Args: base_object (Optional[str]): WMI base object. query (Optional[str]): WMI query. Raises: FormatError: when query is not set.
def __init__(self, base_object=None, query=None): if not query: raise errors.FormatError('Missing query value.') super(WMIQuerySourceType, self).__init__() self.base_object = base_object self.query = query
307,083
Creates a source type. Args: type_indicator (str): source type indicator. attributes (dict[str, object]): source type attributes. Returns: SourceType: a source type. Raises: FormatError: if the type indicator is not set or unsupported, or if required attributes are missing.
def CreateSourceType(cls, type_indicator, attributes): if type_indicator not in cls._source_type_classes: raise errors.FormatError( 'Unsupported type indicator: {0:s}.'.format(type_indicator)) return cls._source_type_classes[type_indicator](**attributes)
307,085
Deregisters a source type. Source types are identified based on their type indicator. Args: source_type_class (type): source type. Raises: KeyError: if a source type is not set for the corresponding type indicator.
def DeregisterSourceType(cls, source_type_class): if source_type_class.TYPE_INDICATOR not in cls._source_type_classes: raise KeyError( 'Source type not set for type: {0:s}.'.format( source_type_class.TYPE_INDICATOR)) del cls._source_type_classes[source_type_class.TYPE_INDICATOR]
307,086
Registers a source type. Source types are identified based on their type indicator. Args: source_type_class (type): source type. Raises: KeyError: if source types is already set for the corresponding type indicator.
def RegisterSourceType(cls, source_type_class): if source_type_class.TYPE_INDICATOR in cls._source_type_classes: raise KeyError( 'Source type already set for type: {0:s}.'.format( source_type_class.TYPE_INDICATOR)) cls._source_type_classes[source_type_class.TYPE_INDICATOR] = ( source_type_class)
307,087
Checks if the paths are valid MacOS paths. Args: filename (str): name of the artifacts definition file. artifact_definition (ArtifactDefinition): artifact definition. source (SourceType): source definition. paths (list[str]): paths to validate. Returns: bool: True if the MacOS paths is valid.
def _CheckMacOSPaths(self, filename, artifact_definition, source, paths): result = True paths_with_private = [] paths_with_symbolic_link_to_private = [] for path in paths: path_lower = path.lower() path_segments = path_lower.split(source.separator) if not path_segments: logging.warning(( 'Empty path defined by artifact definition: {0:s} in file: ' '{1:s}').format(artifact_definition.name, filename)) result = False elif len(path_segments) == 1: continue elif path_segments[1] in self._MACOS_PRIVATE_SUB_PATHS: paths_with_symbolic_link_to_private.append(path) elif path_segments[1] == 'private' and len(path_segments) >= 2: if path_segments[2] in self._MACOS_PRIVATE_SUB_PATHS: paths_with_private.append(path) else: logging.warning(( 'Unsupported private path: {0:s} defined by artifact definition: ' '{1:s} in file: {2:s}').format( path, artifact_definition.name, filename)) result = False for private_path in paths_with_private: if private_path[8:] not in paths_with_symbolic_link_to_private: logging.warning(( 'Missing symbolic link: {0:s} for path: {1:s} defined by artifact ' 'definition: {2:s} in file: {3:s}').format( private_path[8:], private_path, artifact_definition.name, filename)) result = False for path in paths_with_symbolic_link_to_private: private_path = '/private{0:s}'.format(path) if private_path not in paths_with_private: logging.warning(( 'Missing path: {0:s} for symbolic link: {1:s} defined by artifact ' 'definition: {2:s} in file: {3:s}').format( private_path, path, artifact_definition.name, filename)) result = False return result
307,090
Checks if a path is a valid Windows path. Args: filename (str): name of the artifacts definition file. artifact_definition (ArtifactDefinition): artifact definition. source (SourceType): source definition. path (str): path to validate. Returns: bool: True if the Windows path is valid.
def _CheckWindowsPath(self, filename, artifact_definition, source, path): result = True number_of_forward_slashes = path.count('/') number_of_backslashes = path.count('\\') if (number_of_forward_slashes < number_of_backslashes and source.separator != '\\'): logging.warning(( 'Incorrect path separator: {0:s} in path: {1:s} defined ' 'by artifact definition: {2:s} in file: {3:s}').format( source.separator, path, artifact_definition.name, filename)) result = False if source.separator != '\\': return result path_lower = path.lower() path_segments = path_lower.split(source.separator) if not path_segments: logging.warning(( 'Empty path defined by artifact definition: {0:s} in file: ' '{1:s}').format(artifact_definition.name, filename)) result = False elif path_segments[0].startswith('%%users.') and path_segments[0] not in ( '%%users.appdata%%', '%%users.homedir%%', '%%users.localappdata%%', '%%users.temp%%', '%%users.username%%', '%%users.userprofile%%'): logging.warning(( 'Unsupported "{0:s}" in path: {1:s} defined by artifact ' 'definition: {2:s} in file: {3:s}').format( path_segments[0], path, artifact_definition.name, filename)) result = False elif path_segments[0] == '%%users.homedir%%': logging.warning(( 'Replace "%%users.homedir%%" by "%%users.userprofile%%" in path: ' '{0:s} defined by artifact definition: {1:s} in file: ' '{2:s}').format(path, artifact_definition.name, filename)) result = False elif path_lower.startswith('%%users.userprofile%%\\appdata\\local\\'): logging.warning(( 'Replace "%%users.userprofile%%\\AppData\\Local" by ' '"%%users.localappdata%%" in path: {0:s} defined by artifact ' 'definition: {1:s} in file: {2:s}').format( path, artifact_definition.name, filename)) result = False elif path_lower.startswith('%%users.userprofile%%\\appdata\\roaming\\'): logging.warning(( 'Replace "%%users.userprofile%%\\AppData\\Roaming" by ' '"%%users.appdata%%" in path: {0:s} defined by artifact ' 'definition: {1:s} in file: {2:s}').format( path, artifact_definition.name, filename)) result = False elif path_lower.startswith('%%users.userprofile%%\\application data\\'): logging.warning(( 'Replace "%%users.userprofile%%\\Application Data" by ' '"%%users.appdata%%" in path: {0:s} defined by artifact ' 'definition: {1:s} in file: {2:s}').format( path, artifact_definition.name, filename)) result = False elif path_lower.startswith( '%%users.userprofile%%\\local settings\\application data\\'): logging.warning(( 'Replace "%%users.userprofile%%\\Local Settings\\Application Data" ' 'by "%%users.localappdata%%" in path: {0:s} defined by artifact ' 'definition: {1:s} in file: {2:s}').format( path, artifact_definition.name, filename)) result = False for path_segment in path_segments: if path_segment.startswith('%%') and path_segment.endswith('%%'): if (path_segment.startswith('%%environ_') and path_segment not in self._SUPPORTED_WINDOWS_ENVIRONMENT_VARIABLES): result = False logging.warning(( 'Artifact definition: {0:s} in file: {1:s} contains Windows ' 'path that contains an unuspported environment variable: ' '"{2:s}".').format( artifact_definition.name, filename, path_segment)) elif (path_segment.startswith('%%users.') and path_segment not in self._SUPPORTED_WINDOWS_USERS_VARIABLES): result = False logging.warning(( 'Artifact definition: {0:s} in file: {1:s} contains Windows ' 'path that contains an unsupported users variable: ' '"{2:s}". ').format( artifact_definition.name, filename, path_segment)) return result
307,091
Checks if a path is a valid Windows Registry key path. Args: filename (str): name of the artifacts definition file. artifact_definition (ArtifactDefinition): artifact definition. key_path (str): Windows Registry key path to validate. Returns: bool: True if the Windows Registry key path is valid.
def _CheckWindowsRegistryKeyPath( self, filename, artifact_definition, key_path): result = True key_path_segments = key_path.lower().split('\\') if key_path_segments[0] == '%%current_control_set%%': result = False logging.warning(( 'Artifact definition: {0:s} in file: {1:s} contains Windows ' 'Registry key path that starts with ' '%%CURRENT_CONTROL_SET%%. Replace %%CURRENT_CONTROL_SET%% with ' 'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet').format( artifact_definition.name, filename)) for segment_index, key_path_segment in enumerate(key_path_segments): if key_path_segment.startswith('%%') and key_path_segment.endswith('%%'): if (segment_index == 1 and key_path_segment == '%%users.sid%%' and key_path_segments[0] == 'hkey_users'): continue if key_path_segment.startswith('%%environ_'): result = False logging.warning(( 'Artifact definition: {0:s} in file: {1:s} contains Windows ' 'Registry key path that contains an environment variable: ' '"{2:s}". Usage of environment variables in key paths is not ' 'encouraged at this time.').format( artifact_definition.name, filename, key_path_segment)) elif key_path_segment.startswith('%%users.'): result = False logging.warning(( 'Artifact definition: {0:s} in file: {1:s} contains Windows ' 'Registry key path that contains a users variable: "{2:s}". ' 'Usage of users variables in key paths, except for ' '"HKEY_USERS\\%%users.sid%%", is not encouraged at this ' 'time.').format( artifact_definition.name, filename, key_path_segment)) return result
307,092
Checks if Registry key paths are not already defined by other artifacts. Note that at the moment this function will only find exact duplicate Registry key paths. Args: filename (str): name of the artifacts definition file. artifact_definition (ArtifactDefinition): artifact definition. source (SourceType): source definition. Returns: bool: True if the Registry key paths defined by the source type are used in other artifacts.
def _HasDuplicateRegistryKeyPaths( self, filename, artifact_definition, source): result = False intersection = self._artifact_registry_key_paths.intersection( set(source.keys)) if intersection: duplicate_key_paths = '\n'.join(intersection) logging.warning(( 'Artifact definition: {0:s} in file: {1:s} has duplicate ' 'Registry key paths:\n{2:s}').format( artifact_definition.name, filename, duplicate_key_paths)) result = True self._artifact_registry_key_paths.update(source.keys) return result
307,093
Validates the artifacts definition in a specific file. Args: filename (str): name of the artifacts definition file. Returns: bool: True if the file contains valid artifacts definitions.
def CheckFile(self, filename): result = True artifact_reader = reader.YamlArtifactsReader() try: for artifact_definition in artifact_reader.ReadFile(filename): try: self._artifact_registry.RegisterDefinition(artifact_definition) except KeyError: logging.warning( 'Duplicate artifact definition: {0:s} in file: {1:s}'.format( artifact_definition.name, filename)) result = False artifact_definition_supports_macos = ( definitions.SUPPORTED_OS_DARWIN in ( artifact_definition.supported_os)) artifact_definition_supports_windows = ( definitions.SUPPORTED_OS_WINDOWS in ( artifact_definition.supported_os)) for source in artifact_definition.sources: if source.type_indicator in ( definitions.TYPE_INDICATOR_FILE, definitions.TYPE_INDICATOR_PATH): if (definitions.SUPPORTED_OS_DARWIN in source.supported_os or ( artifact_definition_supports_macos and not source.supported_os)): if not self._CheckMacOSPaths( filename, artifact_definition, source, source.paths): result = False elif (artifact_definition_supports_windows or definitions.SUPPORTED_OS_WINDOWS in source.supported_os): for path in source.paths: if not self._CheckWindowsPath( filename, artifact_definition, source, path): result = False elif source.type_indicator == ( definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY): # Exempt the legacy file from duplicate checking because it has # duplicates intentionally. if (filename != self.LEGACY_PATH and self._HasDuplicateRegistryKeyPaths( filename, artifact_definition, source)): result = False for key_path in source.keys: if not self._CheckWindowsRegistryKeyPath( filename, artifact_definition, key_path): result = False elif source.type_indicator == ( definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE): for key_value_pair in source.key_value_pairs: if not self._CheckWindowsRegistryKeyPath( filename, artifact_definition, key_value_pair['key']): result = False except errors.FormatError as exception: logging.warning( 'Unable to validate file: {0:s} with error: {1!s}'.format( filename, exception)) result = False return result
307,094
Deregisters an artifact definition. Artifact definitions are identified based on their lower case name. Args: artifact_definition (ArtifactDefinition): an artifact definition. Raises: KeyError: if an artifact definition is not set for the corresponding name.
def DeregisterDefinition(self, artifact_definition): artifact_definition_name = artifact_definition.name.lower() if artifact_definition_name not in self._artifact_definitions: raise KeyError( 'Artifact definition not set for name: {0:s}.'.format( artifact_definition.name)) del self._artifact_definitions[artifact_definition_name]
307,096
Registers an artifact definition. Artifact definitions are identified based on their lower case name. Args: artifact_definition (ArtifactDefinition): an artifact definition. Raises: KeyError: if artifact definition is already set for the corresponding name.
def RegisterDefinition(self, artifact_definition): artifact_definition_name = artifact_definition.name.lower() if artifact_definition_name in self._artifact_definitions: raise KeyError( 'Artifact definition already set for name: {0:s}.'.format( artifact_definition.name)) self._artifact_definitions[artifact_definition_name] = artifact_definition self._defined_artifact_names.add(artifact_definition.name) for source in artifact_definition.sources: if source.type_indicator == definitions.TYPE_INDICATOR_ARTIFACT_GROUP: self._artifact_name_references.update(source.names)
307,097
Reads artifact definitions into the registry from files in a directory. This function does not recurse sub directories. Args: artifacts_reader (ArtifactsReader): an artifacts reader. path (str): path of the directory to read from. extension (Optional[str]): extension of the filenames to read. Raises: KeyError: if a duplicate artifact definition is encountered.
def ReadFromDirectory(self, artifacts_reader, path, extension='yaml'): for artifact_definition in artifacts_reader.ReadDirectory( path, extension=extension): self.RegisterDefinition(artifact_definition)
307,098
Reads artifact definitions into the registry from a file. Args: artifacts_reader (ArtifactsReader): an artifacts reader. filename (str): name of the file to read from.
def ReadFromFile(self, artifacts_reader, filename): for artifact_definition in artifacts_reader.ReadFile(filename): self.RegisterDefinition(artifact_definition)
307,099
Reads artifact definitions into the registry from a file-like object. Args: artifacts_reader (ArtifactsReader): an artifacts reader. file_object (file): file-like object to read from.
def ReadFileObject(self, artifacts_reader, file_object): for artifact_definition in artifacts_reader.ReadFileObject(file_object): self.RegisterDefinition(artifact_definition)
307,100
Initializes a dependency configuration. Args: name (str): name of the dependency.
def __init__(self, name): super(DependencyDefinition, self).__init__() self.dpkg_name = None self.is_optional = False self.l2tbinaries_macos_name = None self.l2tbinaries_name = None self.maximum_version = None self.minimum_version = None self.name = name self.pypi_name = None self.python2_only = False self.python3_only = False self.rpm_name = None self.version_property = None
307,101
Retrieves a value from the config parser. Args: config_parser (ConfigParser): configuration parser. section_name (str): name of the section that contains the value. value_name (str): name of the value. Returns: object: configuration value or None if the value does not exists.
def _GetConfigValue(self, config_parser, section_name, value_name): try: return config_parser.get(section_name, value_name) except configparser.NoOptionError: return None
307,102
Reads dependency definitions. Args: file_object (file): file-like object to read from. Yields: DependencyDefinition: dependency definition.
def Read(self, file_object): config_parser = configparser.RawConfigParser() # pylint: disable=deprecated-method # TODO: replace readfp by read_file, check if Python 2 compatible config_parser.readfp(file_object) for section_name in config_parser.sections(): dependency_definition = DependencyDefinition(section_name) for value_name in self._VALUE_NAMES: value = self._GetConfigValue(config_parser, section_name, value_name) setattr(dependency_definition, value_name, value) yield dependency_definition
307,103
Initializes a dependency helper. Args: configuration_file (Optional[str]): path to the dependencies configuration file.
def __init__(self, configuration_file='dependencies.ini'): super(DependencyHelper, self).__init__() self._test_dependencies = {} self.dependencies = {} dependency_reader = DependencyDefinitionReader() with open(configuration_file, 'r') as file_object: for dependency in dependency_reader.Read(file_object): self.dependencies[dependency.name] = dependency dependency = DependencyDefinition('mock') dependency.minimum_version = '0.7.1' dependency.version_property = '__version__' self._test_dependencies['mock'] = dependency
307,104
Checks the availability of a Python module. Args: dependency (DependencyDefinition): dependency definition. Returns: tuple: consists: bool: True if the Python module is available and conforms to the minimum required version, False otherwise. str: status message.
def _CheckPythonModule(self, dependency): module_object = self._ImportPythonModule(dependency.name) if not module_object: status_message = 'missing: {0:s}'.format(dependency.name) return False, status_message if not dependency.version_property: return True, dependency.name return self._CheckPythonModuleVersion( dependency.name, module_object, dependency.version_property, dependency.minimum_version, dependency.maximum_version)
307,105
Checks the version of a Python module. Args: module_object (module): Python module. module_name (str): name of the Python module. version_property (str): version attribute or function. minimum_version (str): minimum version. maximum_version (str): maximum version. Returns: tuple: consists: bool: True if the Python module is available and conforms to the minimum required version, False otherwise. str: status message.
def _CheckPythonModuleVersion( self, module_name, module_object, version_property, minimum_version, maximum_version): module_version = None if not version_property.endswith('()'): module_version = getattr(module_object, version_property, None) else: version_method = getattr( module_object, version_property[:-2], None) if version_method: module_version = version_method() if not module_version: status_message = ( 'unable to determine version information for: {0:s}').format( module_name) return False, status_message # Make sure the module version is a string. module_version = '{0!s}'.format(module_version) # Split the version string and convert every digit into an integer. # A string compare of both version strings will yield an incorrect result. # Strip any semantic suffixes such as a1, b1, pre, post, rc, dev. module_version = self._VERSION_NUMBERS_REGEX.findall(module_version)[0] if module_version[-1] == '.': module_version = module_version[:-1] try: module_version_map = list( map(int, self._VERSION_SPLIT_REGEX.split(module_version))) except ValueError: status_message = 'unable to parse module version: {0:s} {1:s}'.format( module_name, module_version) return False, status_message if minimum_version: try: minimum_version_map = list( map(int, self._VERSION_SPLIT_REGEX.split(minimum_version))) except ValueError: status_message = 'unable to parse minimum version: {0:s} {1:s}'.format( module_name, minimum_version) return False, status_message if module_version_map < minimum_version_map: status_message = ( '{0:s} version: {1!s} is too old, {2!s} or later required').format( module_name, module_version, minimum_version) return False, status_message if maximum_version: try: maximum_version_map = list( map(int, self._VERSION_SPLIT_REGEX.split(maximum_version))) except ValueError: status_message = 'unable to parse maximum version: {0:s} {1:s}'.format( module_name, maximum_version) return False, status_message if module_version_map > maximum_version_map: status_message = ( '{0:s} version: {1!s} is too recent, {2!s} or earlier ' 'required').format(module_name, module_version, maximum_version) return False, status_message status_message = '{0:s} version: {1!s}'.format(module_name, module_version) return True, status_message
307,106
Prints the check dependency status. Args: dependency (DependencyDefinition): dependency definition. result (bool): True if the Python module is available and conforms to the minimum required version, False otherwise. status_message (str): status message. verbose_output (Optional[bool]): True if output should be verbose.
def _PrintCheckDependencyStatus( self, dependency, result, status_message, verbose_output=True): if not result or dependency.is_optional: if dependency.is_optional: status_indicator = '[OPTIONAL]' else: status_indicator = '[FAILURE]' print('{0:s}\t{1:s}'.format(status_indicator, status_message)) elif verbose_output: print('[OK]\t\t{0:s}'.format(status_message))
307,108
Checks the availability of the dependencies. Args: verbose_output (Optional[bool]): True if output should be verbose. Returns: bool: True if the dependencies are available, False otherwise.
def CheckDependencies(self, verbose_output=True): print('Checking availability and versions of dependencies.') check_result = True for module_name, dependency in sorted(self.dependencies.items()): if module_name == 'sqlite3': result, status_message = self._CheckSQLite3() else: result, status_message = self._CheckPythonModule(dependency) if not result and module_name == 'lzma': dependency.name = 'backports.lzma' result, status_message = self._CheckPythonModule(dependency) if not result and not dependency.is_optional: check_result = False self._PrintCheckDependencyStatus( dependency, result, status_message, verbose_output=verbose_output) if check_result and not verbose_output: print('[OK]') print('') return check_result
307,109
Checks the availability of the dependencies when running tests. Args: verbose_output (Optional[bool]): True if output should be verbose. Returns: bool: True if the dependencies are available, False otherwise.
def CheckTestDependencies(self, verbose_output=True): if not self.CheckDependencies(verbose_output=verbose_output): return False print('Checking availability and versions of test dependencies.') check_result = True for dependency in sorted( self._test_dependencies.values(), key=lambda dependency: dependency.name): result, status_message = self._CheckPythonModule(dependency) if not result: check_result = False self._PrintCheckDependencyStatus( dependency, result, status_message, verbose_output=verbose_output) if check_result and not verbose_output: print('[OK]') print('') return check_result
307,110
Reads the optional artifact definition labels. Args: artifact_definition_values (dict[str, object]): artifact definition values. artifact_definition (ArtifactDefinition): an artifact definition. name (str): name of the artifact definition. Raises: FormatError: if there are undefined labels.
def _ReadLabels(self, artifact_definition_values, artifact_definition, name): labels = artifact_definition_values.get('labels', []) undefined_labels = set(labels).difference(self.labels) if undefined_labels: raise errors.FormatError( 'Artifact definition: {0:s} found undefined labels: {1:s}.'.format( name, ', '.join(undefined_labels))) artifact_definition.labels = labels
307,113
Reads the optional artifact or source type supported OS. Args: definition_values (dict[str, object]): artifact definition values. definition_object (ArtifactDefinition|SourceType): the definition object. name (str): name of the artifact definition. Raises: FormatError: if there are undefined supported operating systems.
def _ReadSupportedOS(self, definition_values, definition_object, name): supported_os = definition_values.get('supported_os', []) if not isinstance(supported_os, list): raise errors.FormatError( 'Invalid supported_os type: {0!s}'.format(type(supported_os))) undefined_supported_os = set(supported_os).difference(self.supported_os) if undefined_supported_os: error_string = ( 'Artifact definition: {0:s} undefined supported operating system: ' '{1:s}.').format(name, ', '.join(undefined_supported_os)) raise errors.FormatError(error_string) definition_object.supported_os = supported_os
307,114
Reads the artifact definition sources. Args: artifact_definition_values (dict[str, object]): artifact definition values. artifact_definition (ArtifactDefinition): an artifact definition. name (str): name of the artifact definition. Raises: FormatError: if the type indicator is not set or unsupported, or if required attributes are missing.
def _ReadSources(self, artifact_definition_values, artifact_definition, name): sources = artifact_definition_values.get('sources') if not sources: raise errors.FormatError( 'Invalid artifact definition: {0:s} missing sources.'.format(name)) for source in sources: type_indicator = source.get('type', None) if not type_indicator: raise errors.FormatError( 'Invalid artifact definition: {0:s} source type.'.format(name)) attributes = source.get('attributes', None) try: source_type = artifact_definition.AppendSource( type_indicator, attributes) except errors.FormatError as exception: raise errors.FormatError( 'Invalid artifact definition: {0:s}, with error: {1!s}'.format( name, exception)) # TODO: deprecate these left overs from the collector definition. if source_type: if source.get('returned_types', None): raise errors.FormatError(( 'Invalid artifact definition: {0:s} returned_types no longer ' 'supported.').format(name)) source_type.conditions = source.get('conditions', []) self._ReadSupportedOS(source, source_type, name) if set(source_type.supported_os) - set( artifact_definition.supported_os): raise errors.FormatError(( 'Invalid artifact definition: {0:s} missing ' 'supported_os.').format(name))
307,115
Reads an artifact definition from a dictionary. Args: artifact_definition_values (dict[str, object]): artifact definition values. Returns: ArtifactDefinition: an artifact definition. Raises: FormatError: if the format of the artifact definition is not set or incorrect.
def ReadArtifactDefinitionValues(self, artifact_definition_values): if not artifact_definition_values: raise errors.FormatError('Missing artifact definition values.') different_keys = ( set(artifact_definition_values) - definitions.TOP_LEVEL_KEYS) if different_keys: different_keys = ', '.join(different_keys) raise errors.FormatError('Undefined keys: {0:s}'.format(different_keys)) name = artifact_definition_values.get('name', None) if not name: raise errors.FormatError('Invalid artifact definition missing name.') # The description is assumed to be mandatory. description = artifact_definition_values.get('doc', None) if not description: raise errors.FormatError( 'Invalid artifact definition: {0:s} missing description.'.format( name)) artifact_definition = artifact.ArtifactDefinition( name, description=description) if artifact_definition_values.get('collectors', []): raise errors.FormatError( 'Invalid artifact definition: {0:s} still uses collectors.'.format( name)) urls = artifact_definition_values.get('urls', []) if not isinstance(urls, list): raise errors.FormatError( 'Invalid artifact definition: {0:s} urls is not a list.'.format( name)) # TODO: check conditions. artifact_definition.conditions = artifact_definition_values.get( 'conditions', []) artifact_definition.provides = artifact_definition_values.get( 'provides', []) self._ReadLabels(artifact_definition_values, artifact_definition, name) self._ReadSupportedOS(artifact_definition_values, artifact_definition, name) artifact_definition.urls = urls self._ReadSources(artifact_definition_values, artifact_definition, name) return artifact_definition
307,116
Reads artifact definitions from a directory. This function does not recurse sub directories. Args: path (str): path of the directory to read from. extension (Optional[str]): extension of the filenames to read. Yields: ArtifactDefinition: an artifact definition.
def ReadDirectory(self, path, extension='yaml'): if extension: glob_spec = os.path.join(path, '*.{0:s}'.format(extension)) else: glob_spec = os.path.join(path, '*') for artifact_file in glob.glob(glob_spec): for artifact_definition in self.ReadFile(artifact_file): yield artifact_definition
307,117
Reads artifact definitions from a file. Args: filename (str): name of the file to read from. Yields: ArtifactDefinition: an artifact definition.
def ReadFile(self, filename): with io.open(filename, 'r', encoding='utf-8') as file_object: for artifact_definition in self.ReadFileObject(file_object): yield artifact_definition
307,118