id
int64
11
59.9k
original
stringlengths
33
150k
modified
stringlengths
37
150k
42,920
def c_0(clique: list, graph: nx.Graph): """Generates the set :math:`C_0` of nodes that are connected to all nodes in the input clique subgraph. The set :math:`C_0` is defined in :cite:`pullan2006phased` and is used to determine nodes that can be added to the current clique to grow it into a larger one. Example usage: .. code-block:: >>> from strawberryfields.apps.graph import utils >>> import networkx as nx >>> graph = nx.complete_graph(10) >>> subgraph = [0, 1, 2, 3, 4] >>> utils.c_0(subgraph, graph) [5, 6, 7, 8, 9] Args: clique (list[int]): A subgraph specified by a list of nodes; the subgraph must be a clique. graph (nx.Graph): The input graph. Returns: list[int]: A list containing the :math:`C_0` nodes for the clique. """ if not is_clique(graph.subgraph(clique)): raise ValueError("Input subgraph is not a clique") clique = set(clique) c_0_nodes = [] non_clique_nodes = set(graph.nodes) - clique for i in non_clique_nodes: if clique.issubset(graph.neighbors(i)): c_0_nodes.append(i) return c_0_nodes
def c_0(clique: list, graph: nx.Graph): """Generates the set :math:`C_0` of nodes that are connected to all nodes in the input clique subgraph. The set :math:`C_0` is defined in :cite:`pullan2006phased` and is used to determine nodes that can be added to the current clique to grow it into a larger one. Example usage: .. code-block:: >>> from strawberryfields.apps.graph import utils >>> import networkx as nx >>> graph = nx.complete_graph(10) >>> subgraph = [0, 1, 2, 3, 4] >>> utils.c_0(subgraph, graph) [5, 6, 7, 8, 9] Args: clique (list[int]): A subgraph specified by a list of nodes; the subgraph must be a clique. graph (nx.Graph): the input graph Returns: list[int]: A list containing the :math:`C_0` nodes for the clique. """ if not is_clique(graph.subgraph(clique)): raise ValueError("Input subgraph is not a clique") clique = set(clique) c_0_nodes = [] non_clique_nodes = set(graph.nodes) - clique for i in non_clique_nodes: if clique.issubset(graph.neighbors(i)): c_0_nodes.append(i) return c_0_nodes
28,594
def plot_pair( data, group="posterior", var_names: Optional[List[str]] = None, filter_vars: Optional[str] = None, coords=None, marginals=False, figsize=None, textsize=None, kind: Union[str, List[str]] = "scatter", gridsize="auto", contour: Optional[bool] = None, plot_kwargs=None, fill_last=False, divergences=False, colorbar=False, labeller=None, ax=None, divergences_kwargs=None, scatter_kwargs=None, kde_kwargs=None, hexbin_kwargs=None, backend=None, backend_kwargs=None, marginal_kwargs=None, point_estimate=None, point_estimate_kwargs=None, point_estimate_marker_kwargs=None, reference_values=None, reference_values_kwargs=None, show=None, ): """ Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal. Parameters ---------- data: obj Any object that can be converted to an :class:`az.InferenceData` object refer to documentation of :func:`az.convert_to_dataset` for details group: str, optional Specifies which InferenceData group should be plotted. Defaults to 'posterior'. var_names: list of variable names, optional Variables to be plotted, if None all variable are plotted. Prefix the variables by ``~`` when you want to exclude them from the plot. filter_vars: {None, "like", "regex"}, optional, default=None If ``None`` (default), interpret var_names as the real variables names. If "like", interpret var_names as substrings of the real variables names. If "regex", interpret var_names as regular expressions on the real variables names. A la ``pandas.filter``. coords: mapping, optional Coordinates of var_names to be plotted. Passed to `Dataset.sel` marginals: bool, optional If True pairplot will include marginal distributions for every variable figsize: figure size tuple If None, size is (8 + numvars, 8 + numvars) textsize: int Text size for labels. If None it will be autoscaled based on figsize. kind : str or List[str] Type of plot to display (scatter, kde and/or hexbin) gridsize: int or (int, int), optional Only works for kind=hexbin. The number of hexagons in the x-direction. The corresponding number of hexagons in the y-direction is chosen such that the hexagons are approximately regular. Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons in the x-direction and the y-direction. contour : bool, optional, deprecated, Defaults to True. If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True. **Note:** this default is implemented in the body of the code, not in argument processing. fill_last : bool If True fill the last contour of the 2D KDE plot. Defaults to True. divergences: Boolean If True divergences will be plotted in a different color, only if group is either 'prior' or 'posterior'. colorbar: bool If True a colorbar will be included as part of the plot (Defaults to False). Only works when kind=hexbin labeller : labeller instance, optional Class providing the method `make_label_vert` to generate the labels in the plot. Read the :ref:`label_guide` for more details and usage examples. ax: axes, optional Matplotlib axes or bokeh figures. divergences_kwargs: dicts, optional Additional keywords passed to ``ax.scatter`` for divergences scatter_kwargs: Additional keywords passed to ``ax.plot`` when using scatter kind kde_kwargs: dict, optional Additional keywords passed to :func:`az.plot_kde` when using kde kind hexbin_kwargs: dict, optional Additional keywords passed to ``ax.hexbin`` when using hexbin kind backend: str, optional Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib". backend_kwargs: bool, optional These are kwargs specific to the backend being used. For additional documentation check the plotting method of the backend. marginal_kwargs: dict, optional Additional keywords passed to :func:`az.plot_dist`, modifying the marginal distributions plotted in the diagonal. point_estimate: str, optional Select point estimate from 'mean', 'mode' or 'median'. The point estimate will be plotted using a scatter marker and vertical/horizontal lines. point_estimate_kwargs: dict, optional Additional keywords passed to ``ax.vline``, ``ax.hline`` (matplotlib) or ``ax.square``, ``Span`` (bokeh) point_estimate_marker_kwargs: dict, optional Additional keywords passed to ax.scatter in point estimate plot. Not available in bokeh reference_values: dict, optional Reference values for the plotted variables. The Reference values will be plotted using a scatter marker reference_values_kwargs: dict, optional Additional keywords passed to ``ax.plot`` or ``ax.circle`` in reference values plot show: bool, optional Call backend show function. Returns ------- axes: matplotlib axes or bokeh figures Examples -------- KDE Pair Plot .. plot:: :context: close-figs >>> import arviz as az >>> centered = az.load_arviz_data('centered_eight') >>> coords = {'school': ['Choate', 'Deerfield']} >>> az.plot_pair(centered, >>> var_names=['theta', 'mu', 'tau'], >>> kind='kde', >>> coords=coords, >>> divergences=True, >>> textsize=18) Hexbin pair plot .. plot:: :context: close-figs >>> az.plot_pair(centered, >>> var_names=['theta', 'mu'], >>> coords=coords, >>> textsize=18, >>> kind='hexbin') Pair plot showing divergences and select variables with regular expressions .. plot:: :context: close-figs >>> az.plot_pair(centered, ... var_names=['^t', 'mu'], ... filter_vars="regex", ... coords=coords, ... divergences=True, ... textsize=18) """ valid_kinds = ["scatter", "kde", "hexbin"] kind_boolean: Union[bool, List[bool]] if isinstance(kind, str): kind_boolean = kind in valid_kinds else: kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))] if not np.all(kind_boolean): raise ValueError((f"Plot type {kind} not recognized." "Plot type must be in {valid_kinds}")) if fill_last or contour: warnings.warn( "fill_last and contour will be deprecated. Please use kde_kwargs", UserWarning, ) if plot_kwargs: warnings.warn( "plot_kwargs will be deprecated." " Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs", UserWarning, ) if coords is None: coords = {} if labeller is None: labeller = BaseLabeller() # Get posterior draws and combine chains dataset = convert_to_dataset(data, group=group) var_names = _var_names(var_names, dataset, filter_vars) plotters = list( xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True) ) flat_var_names = [ labeller.make_label_vert(var_name, sel, isel) for var_name, sel, isel, _ in plotters ] divergent_data = None diverging_mask = None # Assigning divergence group based on group param if group == "posterior": divergent_group = "sample_stats" elif group == "prior": divergent_group = "sample_stats_prior" else: divergences = False # Get diverging draws and combine chains if divergences: if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), "diverging"): divergent_data = convert_to_dataset(data, group=divergent_group) _, diverging_mask = xarray_to_ndarray( divergent_data, var_names=("diverging",), combined=True ) diverging_mask = np.squeeze(diverging_mask) else: divergences = False warnings.warn( "Divergences data not found, plotting without divergences. " "Make sure the sample method provides divergences data and " "that it is present in the `diverging` field of `sample_stats` " "or `sample_stats_prior` or set divergences=False", UserWarning, ) if gridsize == "auto": gridsize = int(dataset.dims["draw"] ** 0.35) numvars = len(flat_var_names) if numvars < 2: raise ValueError("Number of variables to be plotted must be 2 or greater.") pairplot_kwargs = dict( ax=ax, plotters=plotters, numvars=numvars, figsize=figsize, textsize=textsize, kind=kind, scatter_kwargs=scatter_kwargs, kde_kwargs=kde_kwargs, hexbin_kwargs=hexbin_kwargs, gridsize=gridsize, colorbar=colorbar, divergences=divergences, diverging_mask=diverging_mask, divergences_kwargs=divergences_kwargs, flat_var_names=flat_var_names, backend_kwargs=backend_kwargs, marginal_kwargs=marginal_kwargs, show=show, marginals=marginals, point_estimate=point_estimate, point_estimate_kwargs=point_estimate_kwargs, point_estimate_marker_kwargs=point_estimate_marker_kwargs, reference_values=reference_values, reference_values_kwargs=reference_values_kwargs, ) if backend is None: backend = rcParams["plot.backend"] backend = backend.lower() # TODO: Add backend kwargs plot = get_plotting_function("plot_pair", "pairplot", backend) ax = plot(**pairplot_kwargs) return ax
def plot_pair( data, group="posterior", var_names: Optional[List[str]] = None, filter_vars: Optional[str] = None, coords=None, marginals=False, figsize=None, textsize=None, kind: Union[str, List[str]] = "scatter", gridsize="auto", contour: Optional[bool] = None, plot_kwargs=None, fill_last=False, divergences=False, colorbar=False, labeller=None, ax=None, divergences_kwargs=None, scatter_kwargs=None, kde_kwargs=None, hexbin_kwargs=None, backend=None, backend_kwargs=None, marginal_kwargs=None, point_estimate=None, point_estimate_kwargs=None, point_estimate_marker_kwargs=None, reference_values=None, reference_values_kwargs=None, show=None, ): """ Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal. Parameters ---------- data: obj Any object that can be converted to an :class:`arviz.InferenceData` object refer to documentation of :func:`az.convert_to_dataset` for details group: str, optional Specifies which InferenceData group should be plotted. Defaults to 'posterior'. var_names: list of variable names, optional Variables to be plotted, if None all variable are plotted. Prefix the variables by ``~`` when you want to exclude them from the plot. filter_vars: {None, "like", "regex"}, optional, default=None If ``None`` (default), interpret var_names as the real variables names. If "like", interpret var_names as substrings of the real variables names. If "regex", interpret var_names as regular expressions on the real variables names. A la ``pandas.filter``. coords: mapping, optional Coordinates of var_names to be plotted. Passed to `Dataset.sel` marginals: bool, optional If True pairplot will include marginal distributions for every variable figsize: figure size tuple If None, size is (8 + numvars, 8 + numvars) textsize: int Text size for labels. If None it will be autoscaled based on figsize. kind : str or List[str] Type of plot to display (scatter, kde and/or hexbin) gridsize: int or (int, int), optional Only works for kind=hexbin. The number of hexagons in the x-direction. The corresponding number of hexagons in the y-direction is chosen such that the hexagons are approximately regular. Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons in the x-direction and the y-direction. contour : bool, optional, deprecated, Defaults to True. If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True. **Note:** this default is implemented in the body of the code, not in argument processing. fill_last : bool If True fill the last contour of the 2D KDE plot. Defaults to True. divergences: Boolean If True divergences will be plotted in a different color, only if group is either 'prior' or 'posterior'. colorbar: bool If True a colorbar will be included as part of the plot (Defaults to False). Only works when kind=hexbin labeller : labeller instance, optional Class providing the method `make_label_vert` to generate the labels in the plot. Read the :ref:`label_guide` for more details and usage examples. ax: axes, optional Matplotlib axes or bokeh figures. divergences_kwargs: dicts, optional Additional keywords passed to ``ax.scatter`` for divergences scatter_kwargs: Additional keywords passed to ``ax.plot`` when using scatter kind kde_kwargs: dict, optional Additional keywords passed to :func:`az.plot_kde` when using kde kind hexbin_kwargs: dict, optional Additional keywords passed to ``ax.hexbin`` when using hexbin kind backend: str, optional Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib". backend_kwargs: bool, optional These are kwargs specific to the backend being used. For additional documentation check the plotting method of the backend. marginal_kwargs: dict, optional Additional keywords passed to :func:`az.plot_dist`, modifying the marginal distributions plotted in the diagonal. point_estimate: str, optional Select point estimate from 'mean', 'mode' or 'median'. The point estimate will be plotted using a scatter marker and vertical/horizontal lines. point_estimate_kwargs: dict, optional Additional keywords passed to ``ax.vline``, ``ax.hline`` (matplotlib) or ``ax.square``, ``Span`` (bokeh) point_estimate_marker_kwargs: dict, optional Additional keywords passed to ax.scatter in point estimate plot. Not available in bokeh reference_values: dict, optional Reference values for the plotted variables. The Reference values will be plotted using a scatter marker reference_values_kwargs: dict, optional Additional keywords passed to ``ax.plot`` or ``ax.circle`` in reference values plot show: bool, optional Call backend show function. Returns ------- axes: matplotlib axes or bokeh figures Examples -------- KDE Pair Plot .. plot:: :context: close-figs >>> import arviz as az >>> centered = az.load_arviz_data('centered_eight') >>> coords = {'school': ['Choate', 'Deerfield']} >>> az.plot_pair(centered, >>> var_names=['theta', 'mu', 'tau'], >>> kind='kde', >>> coords=coords, >>> divergences=True, >>> textsize=18) Hexbin pair plot .. plot:: :context: close-figs >>> az.plot_pair(centered, >>> var_names=['theta', 'mu'], >>> coords=coords, >>> textsize=18, >>> kind='hexbin') Pair plot showing divergences and select variables with regular expressions .. plot:: :context: close-figs >>> az.plot_pair(centered, ... var_names=['^t', 'mu'], ... filter_vars="regex", ... coords=coords, ... divergences=True, ... textsize=18) """ valid_kinds = ["scatter", "kde", "hexbin"] kind_boolean: Union[bool, List[bool]] if isinstance(kind, str): kind_boolean = kind in valid_kinds else: kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))] if not np.all(kind_boolean): raise ValueError((f"Plot type {kind} not recognized." "Plot type must be in {valid_kinds}")) if fill_last or contour: warnings.warn( "fill_last and contour will be deprecated. Please use kde_kwargs", UserWarning, ) if plot_kwargs: warnings.warn( "plot_kwargs will be deprecated." " Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs", UserWarning, ) if coords is None: coords = {} if labeller is None: labeller = BaseLabeller() # Get posterior draws and combine chains dataset = convert_to_dataset(data, group=group) var_names = _var_names(var_names, dataset, filter_vars) plotters = list( xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True) ) flat_var_names = [ labeller.make_label_vert(var_name, sel, isel) for var_name, sel, isel, _ in plotters ] divergent_data = None diverging_mask = None # Assigning divergence group based on group param if group == "posterior": divergent_group = "sample_stats" elif group == "prior": divergent_group = "sample_stats_prior" else: divergences = False # Get diverging draws and combine chains if divergences: if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), "diverging"): divergent_data = convert_to_dataset(data, group=divergent_group) _, diverging_mask = xarray_to_ndarray( divergent_data, var_names=("diverging",), combined=True ) diverging_mask = np.squeeze(diverging_mask) else: divergences = False warnings.warn( "Divergences data not found, plotting without divergences. " "Make sure the sample method provides divergences data and " "that it is present in the `diverging` field of `sample_stats` " "or `sample_stats_prior` or set divergences=False", UserWarning, ) if gridsize == "auto": gridsize = int(dataset.dims["draw"] ** 0.35) numvars = len(flat_var_names) if numvars < 2: raise ValueError("Number of variables to be plotted must be 2 or greater.") pairplot_kwargs = dict( ax=ax, plotters=plotters, numvars=numvars, figsize=figsize, textsize=textsize, kind=kind, scatter_kwargs=scatter_kwargs, kde_kwargs=kde_kwargs, hexbin_kwargs=hexbin_kwargs, gridsize=gridsize, colorbar=colorbar, divergences=divergences, diverging_mask=diverging_mask, divergences_kwargs=divergences_kwargs, flat_var_names=flat_var_names, backend_kwargs=backend_kwargs, marginal_kwargs=marginal_kwargs, show=show, marginals=marginals, point_estimate=point_estimate, point_estimate_kwargs=point_estimate_kwargs, point_estimate_marker_kwargs=point_estimate_marker_kwargs, reference_values=reference_values, reference_values_kwargs=reference_values_kwargs, ) if backend is None: backend = rcParams["plot.backend"] backend = backend.lower() # TODO: Add backend kwargs plot = get_plotting_function("plot_pair", "pairplot", backend) ax = plot(**pairplot_kwargs) return ax
46,092
def download_file_from_google_drive(id, destination): URL = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(URL, params={'id': id}, stream=True) token = get_confirm_token(response) if token: params = {'id': id, 'confirm': token} response = session.get(URL, params=params, stream=True) save_response_content(response, destination)
def _download_file_from_google_drive(id, destination): URL = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(URL, params={'id': id}, stream=True) token = get_confirm_token(response) if token: params = {'id': id, 'confirm': token} response = session.get(URL, params=params, stream=True) save_response_content(response, destination)
2,376
def randomized_svd( M, n_components, *, n_oversamples=10, n_iter="auto", power_iteration_normalizer="auto", transpose="auto", flip_sign=True, random_state="warn", lapack_driver="gesdd" ): """Computes a truncated randomized SVD. This method solves the fixed-rank approximation problem described in the Halko et al paper (problem (1.5), p5). Parameters ---------- M : {ndarray, sparse matrix} Matrix to decompose. n_components : int Number of singular values and vectors to extract. n_oversamples : int, default=10 Additional number of random vectors to sample the range of M so as to ensure proper conditioning. The total number of random vectors used to find the range of M is n_components + n_oversamples. Smaller number can improve speed but can negatively impact the quality of approximation of singular vectors and singular values. Users might wish to increase this parameter up to `2*k - n_components` where k is the effective rank, for large matrices, noisy problems, matrices with slowly decaying spectrums, or to increase precision accuracy. See Halko et al (pages 5, 23 and 26). n_iter : int or 'auto', default='auto' Number of power iterations. It can be used to deal with very noisy problems. When 'auto', it is set to 4, unless `n_components` is small (< .1 * min(X.shape)) in which case `n_iter` is set to 7. This improves precision with few components. Note that in general users should rather increase `n_oversamples` before increasing `n_iter` as the principle of the randomized method is to avoid usage of these more costly power iterations steps. When `n_components` is equal or greater to the effective matrix rank and the spectrum does not present a slow decay, `n_iter=0` or `1` should even work fine in theory (see Halko et al paper, page 9). .. versionchanged:: 0.18 power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto' Whether the power iterations are normalized with step-by-step QR factorization (the slowest but most accurate), 'none' (the fastest but numerically unstable when `n_iter` is large, e.g. typically 5 or larger), or 'LU' factorization (numerically stable but can lose slightly in accuracy). The 'auto' mode applies no normalization if `n_iter` <= 2 and switches to LU otherwise. .. versionadded:: 0.18 transpose : bool or 'auto', default='auto' Whether the algorithm should be applied to M.T instead of M. The result should approximately be the same. The 'auto' mode will trigger the transposition if M.shape[1] > M.shape[0] since this implementation of randomized SVD tend to be a little faster in that case. .. versionchanged:: 0.18 flip_sign : bool, default=True The output of a singular value decomposition is only unique up to a permutation of the signs of the singular vectors. If `flip_sign` is set to `True`, the sign ambiguity is resolved by making the largest loadings for each component in the left singular vectors positive. random_state : int, RandomState instance or None, default='warn' The seed of the pseudo random number generator to use when shuffling the data, i.e. getting the random vectors to initialize the algorithm. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. .. versionchanged:: 1.2 The previous behavior (`random_state=0`) is deprecated, and from v1.2 the default value will be `random_state=None`. Set the value of `random_state` explicitly to suppress the deprecation warning. lapack_driver : str, {'gesdd', 'gesvd'}, default='gesdd' Whether to use the more efficient divide-and-conquer approach ('gesdd') , or more general rectangular approach ('gesvd') to compute the SVD of the matrix 'B', which is the projection of the 'M' into a the low dimensional subspace, described by Halko et. al. Notes ----- This algorithm finds a (usually very good) approximate truncated singular value decomposition using randomization to speed up the computations. It is particularly fast on large matrices on which you wish to extract only a small number of components. In order to obtain further speed up, `n_iter` can be set <=2 (at the cost of loss of precision). To increase the precision it is recommended to increase `n_oversamples`, up to `2*k-n_components` where k is the effective rank. Usually, `n_components` is chosen to be greater than k so increasing `n_oversamples` up to `n_components` should be enough. References ---------- * Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions (Algorithm 4.3) Halko, et al., 2009 https://arxiv.org/abs/0909.4061 * A randomized algorithm for the decomposition of matrices Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert * An implementation of a randomized algorithm for principal component analysis A. Szlam et al. 2014 """ if isinstance(M, (sparse.lil_matrix, sparse.dok_matrix)): warnings.warn( "Calculating SVD of a {} is expensive. " "csr_matrix is more efficient.".format(type(M).__name__), sparse.SparseEfficiencyWarning, ) if random_state == "warn": warnings.warn( "If 'random_state' is not supplied, the current default " "is to use 0 as a fixed seed. This will change to " "None in version 1.2 leading to non-deterministic results " "that better reflect nature of the randomized_svd solver. " "If you want to silence this warning, set 'random_state' " "to an integer seed or to None explicitly depending " "if you want your code to be deterministic or not.", FutureWarning, ) random_state = 0 random_state = check_random_state(random_state) n_random = n_components + n_oversamples n_samples, n_features = M.shape if n_iter == "auto": # Checks if the number of iterations is explicitly specified # Adjust n_iter. 7 was found a good compromise for PCA. See #5299 n_iter = 7 if n_components < 0.1 * min(M.shape) else 4 if transpose == "auto": transpose = n_samples < n_features if transpose: # this implementation is a bit faster with smaller shape[1] M = M.T Q = randomized_range_finder( M, size=n_random, n_iter=n_iter, power_iteration_normalizer=power_iteration_normalizer, random_state=random_state, ) # project M to the (k + p) dimensional space using the basis vectors B = safe_sparse_dot(Q.T, M) # compute the SVD on the thin matrix: (k + p) wide Uhat, s, Vt = linalg.svd( B, full_matrices=False, lapack_driver=lapack_driver ) del B U = np.dot(Q, Uhat) if flip_sign: if not transpose: U, Vt = svd_flip(U, Vt) else: # In case of transpose u_based_decision=false # to actually flip based on u and not v. U, Vt = svd_flip(U, Vt, u_based_decision=False) if transpose: # transpose back the results according to the input convention return Vt[:n_components, :].T, s[:n_components], U[:, :n_components].T else: return U[:, :n_components], s[:n_components], Vt[:n_components, :]
def randomized_svd( M, n_components, *, n_oversamples=10, n_iter="auto", power_iteration_normalizer="auto", transpose="auto", flip_sign=True, random_state="warn", lapack_driver="gesdd", ): """Computes a truncated randomized SVD. This method solves the fixed-rank approximation problem described in the Halko et al paper (problem (1.5), p5). Parameters ---------- M : {ndarray, sparse matrix} Matrix to decompose. n_components : int Number of singular values and vectors to extract. n_oversamples : int, default=10 Additional number of random vectors to sample the range of M so as to ensure proper conditioning. The total number of random vectors used to find the range of M is n_components + n_oversamples. Smaller number can improve speed but can negatively impact the quality of approximation of singular vectors and singular values. Users might wish to increase this parameter up to `2*k - n_components` where k is the effective rank, for large matrices, noisy problems, matrices with slowly decaying spectrums, or to increase precision accuracy. See Halko et al (pages 5, 23 and 26). n_iter : int or 'auto', default='auto' Number of power iterations. It can be used to deal with very noisy problems. When 'auto', it is set to 4, unless `n_components` is small (< .1 * min(X.shape)) in which case `n_iter` is set to 7. This improves precision with few components. Note that in general users should rather increase `n_oversamples` before increasing `n_iter` as the principle of the randomized method is to avoid usage of these more costly power iterations steps. When `n_components` is equal or greater to the effective matrix rank and the spectrum does not present a slow decay, `n_iter=0` or `1` should even work fine in theory (see Halko et al paper, page 9). .. versionchanged:: 0.18 power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto' Whether the power iterations are normalized with step-by-step QR factorization (the slowest but most accurate), 'none' (the fastest but numerically unstable when `n_iter` is large, e.g. typically 5 or larger), or 'LU' factorization (numerically stable but can lose slightly in accuracy). The 'auto' mode applies no normalization if `n_iter` <= 2 and switches to LU otherwise. .. versionadded:: 0.18 transpose : bool or 'auto', default='auto' Whether the algorithm should be applied to M.T instead of M. The result should approximately be the same. The 'auto' mode will trigger the transposition if M.shape[1] > M.shape[0] since this implementation of randomized SVD tend to be a little faster in that case. .. versionchanged:: 0.18 flip_sign : bool, default=True The output of a singular value decomposition is only unique up to a permutation of the signs of the singular vectors. If `flip_sign` is set to `True`, the sign ambiguity is resolved by making the largest loadings for each component in the left singular vectors positive. random_state : int, RandomState instance or None, default='warn' The seed of the pseudo random number generator to use when shuffling the data, i.e. getting the random vectors to initialize the algorithm. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. .. versionchanged:: 1.2 The previous behavior (`random_state=0`) is deprecated, and from v1.2 the default value will be `random_state=None`. Set the value of `random_state` explicitly to suppress the deprecation warning. lapack_driver : str, {'gesdd', 'gesvd'}, default='gesdd' Whether to use the more efficient divide-and-conquer approach ('gesdd') , or more general rectangular approach ('gesvd') to compute the SVD of the matrix 'B', which is the projection of the 'M' into a the low dimensional subspace, described by Halko et. al. Notes ----- This algorithm finds a (usually very good) approximate truncated singular value decomposition using randomization to speed up the computations. It is particularly fast on large matrices on which you wish to extract only a small number of components. In order to obtain further speed up, `n_iter` can be set <=2 (at the cost of loss of precision). To increase the precision it is recommended to increase `n_oversamples`, up to `2*k-n_components` where k is the effective rank. Usually, `n_components` is chosen to be greater than k so increasing `n_oversamples` up to `n_components` should be enough. References ---------- * Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions (Algorithm 4.3) Halko, et al., 2009 https://arxiv.org/abs/0909.4061 * A randomized algorithm for the decomposition of matrices Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert * An implementation of a randomized algorithm for principal component analysis A. Szlam et al. 2014 """ if isinstance(M, (sparse.lil_matrix, sparse.dok_matrix)): warnings.warn( "Calculating SVD of a {} is expensive. " "csr_matrix is more efficient.".format(type(M).__name__), sparse.SparseEfficiencyWarning, ) if random_state == "warn": warnings.warn( "If 'random_state' is not supplied, the current default " "is to use 0 as a fixed seed. This will change to " "None in version 1.2 leading to non-deterministic results " "that better reflect nature of the randomized_svd solver. " "If you want to silence this warning, set 'random_state' " "to an integer seed or to None explicitly depending " "if you want your code to be deterministic or not.", FutureWarning, ) random_state = 0 random_state = check_random_state(random_state) n_random = n_components + n_oversamples n_samples, n_features = M.shape if n_iter == "auto": # Checks if the number of iterations is explicitly specified # Adjust n_iter. 7 was found a good compromise for PCA. See #5299 n_iter = 7 if n_components < 0.1 * min(M.shape) else 4 if transpose == "auto": transpose = n_samples < n_features if transpose: # this implementation is a bit faster with smaller shape[1] M = M.T Q = randomized_range_finder( M, size=n_random, n_iter=n_iter, power_iteration_normalizer=power_iteration_normalizer, random_state=random_state, ) # project M to the (k + p) dimensional space using the basis vectors B = safe_sparse_dot(Q.T, M) # compute the SVD on the thin matrix: (k + p) wide Uhat, s, Vt = linalg.svd( B, full_matrices=False, lapack_driver=lapack_driver ) del B U = np.dot(Q, Uhat) if flip_sign: if not transpose: U, Vt = svd_flip(U, Vt) else: # In case of transpose u_based_decision=false # to actually flip based on u and not v. U, Vt = svd_flip(U, Vt, u_based_decision=False) if transpose: # transpose back the results according to the input convention return Vt[:n_components, :].T, s[:n_components], U[:, :n_components].T else: return U[:, :n_components], s[:n_components], Vt[:n_components, :]
31,348
def main() -> None: """main function, parses params and runs command functions :return: :rtype: """ ''' EXECUTION ''' #LOG('command is %s' % (demisto.command(), )) demisto.debug(f'Command being called is {demisto.command()}') try: LOG('Command being called is {command}'.format(command=demisto.command())) if demisto.command() == 'Picus-GetAccessToken': getAccessToken() elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results token = getAccessToken() demisto.results(vectorCompare(token)) elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(attackResultList(token)) elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional token = getAccessToken() demisto.results(specificThreatsResults(token)) elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses token = getAccessToken() demisto.results(peerList(token)) elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses token = getAccessToken() demisto.results(eMailPeerList(token)) elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors token = getAccessToken() demisto.results(attackAllVectors(token)) elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector token = getAccessToken() demisto.results(attackSingle(token)) elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully token = getAccessToken() demisto.results(triggerUpdate(token)) elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config token = getAccessToken() demisto.results(version(token)) elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(threatList(token)) elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(mitigationList(token)) elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters token = getAccessToken() demisto.results(mitreMatrix(token)) elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(sigmaRulesList(token)) elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination token = getAccessToken() demisto.results(vectorList(token)) elif demisto.command() == 'test-module': demisto.results(test_module()) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
def main() -> None: """main function, parses params and runs command functions :return: :rtype: """ ''' EXECUTION ''' #LOG('command is %s' % (demisto.command(), )) demisto.debug(f'Command being called is {demisto.command()}') try: LOG('Command being called is {command}'.format(command=demisto.command())) if demisto.command() == 'Picus-GetAccessToken': getAccessToken() elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results token = getAccessToken() demisto.results(vectorCompare(token)) elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(attackResultList(token)) elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional token = getAccessToken() demisto.results(specificThreatsResults(token)) elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses token = getAccessToken() demisto.results(peerList(token)) elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses token = getAccessToken() demisto.results(eMailPeerList(token)) elif demisto.command() == 'picus-attack-all-vectors': # Schedules given attack on all possible vectors token = getAccessToken() demisto.results(attackAllVectors(token)) elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector token = getAccessToken() demisto.results(attackSingle(token)) elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully token = getAccessToken() demisto.results(triggerUpdate(token)) elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config token = getAccessToken() demisto.results(version(token)) elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(threatList(token)) elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(mitigationList(token)) elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters token = getAccessToken() demisto.results(mitreMatrix(token)) elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(sigmaRulesList(token)) elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination token = getAccessToken() demisto.results(vectorList(token)) elif demisto.command() == 'test-module': demisto.results(test_module()) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
12,037
def remove_unprintable_chars(input_str): """ Remove unprintable characters from a string and returns the result. """ return "".join( c if c in string.printable else " " for c in input_str ).strip()
def remove_unprintable_chars(input_str): """ Remove unprintable characters from a string and return the result. """ return "".join( c if c in string.printable else " " for c in input_str ).strip()
53,395
def _is_type_checking_import(node: Union[nodes.Import, nodes.ImportFrom]) -> bool: """Check if an import node is guarded by a TYPE_CHECKS guard""" for ancestor in node.node_ancestors(): if isinstance(ancestor, nodes.If): if ancestor.test.as_string() in TYPING_TYPE_CHECKS_GUARDS: return True return False
def _is_type_checking_import(node: Union[nodes.Import, nodes.ImportFrom]) -> bool: """Check if an import node is guarded by a TYPE_CHECKS guard""" return any( a.test.as_string() in TYPING_TYPE_CHECKS_GUARDS for a in node.node_ancestors() if isinstance(a, nodes.If) )
48,740
def configure_orm(disable_connection_pool=False): """Configure ORM using SQLAlchemy""" log.debug("Setting up DB connection pool (PID %s)", os.getpid()) global engine global Session engine_args = prepare_engine_args(disable_connection_pool) # Allow the user to specify an encoding for their DB otherwise default # to utf-8 so jobs & users with non-latin1 characters can still use us. engine_args['encoding'] = conf.get('core', 'SQL_ENGINE_ENCODING', fallback='utf-8') if conf.has_option('core', 'sql_alchemy_connect_args'): connect_args = conf.getimport('core', 'sql_alchemy_connect_args') else: connect_args = {} engine = create_engine(SQL_ALCHEMY_CONN, connect_args=connect_args, **engine_args) setup_event_handlers(engine) Session = scoped_session( sessionmaker(autocommit=False, autoflush=False, bind=engine, expire_on_commit=False))
def configure_orm(disable_connection_pool=False): """Configure ORM using SQLAlchemy""" log.debug("Setting up DB connection pool (PID %s)", os.getpid()) global engine global Session engine_args = prepare_engine_args(disable_connection_pool) # Allow the user to specify an encoding for their DB otherwise default # to utf-8 so jobs & users with non-latin1 characters can still use us. engine_args['encoding'] = conf.get('core', 'SQL_ENGINE_ENCODING', fallback='utf-8') if conf.has_option('core', 'sql_alchemy_connect_args'): connect_args = conf.getimport('core', 'sql_alchemy_connect_args') else: connect_args = {} engine = create_engine(SQL_ALCHEMY_CONN, connect_args=connect_args, **engine_args) setup_event_handlers(engine) Session = scoped_session(sessionmaker( autocommit=False, autoflush=False, bind=engine, expire_on_commit=False, ))
32,608
def branch_create_command(client: Client, args: Dict) -> CommandResults: repo = args.get('repo', None) name = args.get('name', None) target_branch = args.get('target_branch', None) if not repo: repo = client.repository response = client.branch_create_request(name, target_branch, repo) return CommandResults( readable_output=f'The branch {name} was created successfully.', outputs_prefix='Bitbucket.Branch', outputs=response, raw_response=response )
def branch_create_command(client: Client, args: Dict) -> CommandResults: repo = args.get('repo') name = args.get('name', None) target_branch = args.get('target_branch', None) if not repo: repo = client.repository response = client.branch_create_request(name, target_branch, repo) return CommandResults( readable_output=f'The branch {name} was created successfully.', outputs_prefix='Bitbucket.Branch', outputs=response, raw_response=response )
17,696
def _get_push_dryrun(repo, remote=None): """ Returns ------- list The result of the dry-run. Will be an empty list of the dry-run failed for any reason. """ try: wannabe_gitpush = repo.push(remote=remote, git_options=['--dry-run']) except Exception as e: lgr.debug( 'Dry-run push to %r remote failed, ' 'assume no configuration: %s', remote if remote else 'default', e) wannabe_gitpush = [] return wannabe_gitpush
def _get_push_dryrun(repo, remote=None): """ Returns ------- list The result of the dry-run. Will be an empty list if the dry-run failed for any reason. """ try: wannabe_gitpush = repo.push(remote=remote, git_options=['--dry-run']) except Exception as e: lgr.debug( 'Dry-run push to %r remote failed, ' 'assume no configuration: %s', remote if remote else 'default', e) wannabe_gitpush = [] return wannabe_gitpush
8,105
def _is_writable_dir(p): """ Checks to see if a directory is writable. """ # Worried about multiple threads creating the directory at the same time. try: Path(p).mkdir(parents=True, exist_ok=True) except FileExistsError: return False else: return os.access(p, os.W_OK)
def _is_writable_dir(p): """ Checks to see if a directory is writable. """ # Worried about multiple threads creating the directory at the same time. try: Path(p).mkdir(parents=True, exist_ok=True) except FileExistsError: # raised if there's an existing file instead of a directory return False else: return os.access(p, os.W_OK)
58,364
def _check_path_matches_patterns(path, patterns): """Check if the path matches at least one of the provided patterns. """ if not patterns: return False path = path.absolute() for patt in patterns: if isinstance(patt, Path): if path == patt: return True elif patt.search(str(path)): return True return False
def _check_path_matches_patterns(path, patterns): """Check if the path matches at least one of the provided patterns. """ if not patterns: return False path = path.absolute() for patt in patterns: if str(path) == str(patt): return True elif patt.search(str(path)): return True return False
7,645
def _get_timeline_url(revision): editable = revision.editable contribution = editable.contribution return url_for('event_editing.editable', contribution.event, contribution, type=editable.type.name, _external=True)
def _get_timeline_url(revision): editable = revision.editable contribution = editable.contribution return url_for('event_editing.editable', contribution, type=editable.type.name, _external=True)
52,137
def _get_idd(deployment_id, is_external_host, kwargs): inter_deployment_dependency = create_deployment_dependency( dependency_creator_generator(COMPONENT, ctx.instance.id), source_deployment=ctx.deployment.id, target_deployment=deployment_id ) local_dependency_params = None if is_external_host: client_config = _get_desired_operation_input('client', kwargs) manager_ips = [mgr.public_ip for mgr in manager.get_rest_client().manager.get_managers()] local_dependency_params = \ inter_deployment_dependency.copy() local_dependency_params['target_deployment'] = ' ' local_dependency_params['external_target'] = { 'deployment': deployment_id, 'client_config': client_config } inter_deployment_dependency['external_source'] = { 'deployment': ctx.deployment.id, 'tenant': ctx.tenant_name, 'host': manager_ips } return inter_deployment_dependency, local_dependency_params
def _get_idd(deployment_id, is_external_host, kwargs): inter_deployment_dependency = create_deployment_dependency( dependency_creator_generator(COMPONENT, ctx.instance.id), source_deployment=ctx.deployment.id, target_deployment=deployment_id ) local_dependency = None if is_external_host: client_config = _get_desired_operation_input('client', kwargs) manager_ips = [mgr.public_ip for mgr in manager.get_rest_client().manager.get_managers()] local_dependency = create_deployment_dependency( dependency_creator_generator(COMPONENT, ctx.instance.id), source_deployment=ctx.deployment.id, target_deployment=' ', # ??? can't it be just none? external_target={ 'deployment': deployment_id, 'client_config': client_config }, external_source={ 'deployment': ctx.deployment.id, 'tenant': ctx.tenant_name, 'host': manager_ips } ) return inter_deployment_dependency, local_dependency
33,220
def test_layoutinfo_init(): args = dict(root='/made/up/path', validate=True, absolute_paths=True, index_metadata=False, derivatives=True, ignore=['code/', 'blergh/'], force_index=None) with pytest.raises(ValueError) as exc: LayoutInfo(**args) assert exc.value.message.startswith("Missing mandatory") args['config'] = ['bids', 'derivatives'] info = LayoutInfo(**args) assert info.derivatives == True assert info._derivatives == 'true'
def test_layoutinfo_init(): args = dict(root='/made/up/path', validate=True, absolute_paths=True, index_metadata=False, derivatives=True, ignore=['code/', 'blergh/'], force_index=None) with pytest.raises(ValueError) as exc: LayoutInfo(**args) assert exc.value.message.startswith("Missing mandatory") args['config'] = ['bids', 'derivatives'] info = LayoutInfo(**args) assert info.derivatives == True assert info._derivatives == 'true'
32,128
def fetch_incidents(client: Client, env: str, args=None): """ Fetch Taegis Investigations for the use with "Fetch Incidents" """ page_size = args.get("max_fetch", 200) if page_size > 200: raise ValueError("Max Fetch cannot be more then 200") include_archived = False query = """ query investigations( $page: Int, $perPage: Int, $status: [String], $createdAfter: String, $orderByField: OrderFieldInput, $orderDirection: OrderDirectionInput ) { allInvestigations( page: $page, perPage: $perPage, status: $status, createdAfter: $createdAfter, orderByField: $orderByField, orderDirection: $orderDirection ) { id tenant_id description key_findings alerts { id alert_type severity message } archived_at created_at updated_at service_desk_id service_desk_type latest_activity priority status assets { id hostnames { id hostname } tags { tag } } } } """ variables = { "orderByField": "created_at", "orderDirection": "asc", "page": 0, "perPage": page_size, "status": ["Open", "Active"] } last_run = demisto.getLastRun() demisto.debug(f"Last Fetch Incident Run: {last_run}") now = datetime.now() start_time = now - timedelta(days=1) # Default start if first ever run if last_run and "start_time" in last_run: start_time = last_run.get("start_time") variables["createdAfter"] = start_time result = client.graphql_run(query=query, variables=variables) if result.get("errors") and result["errors"]: raise ValueError(f"Error when fetching investigations: {result['errors'][0]['message']}") incidents = [] for investigation in result["data"]["allInvestigations"]: # createdAfter really means createdAtOrAfter so skip the duplicate if start_time == investigation["created_at"]: continue # Skip archived, if necessary if not include_archived and investigation["archived_at"]: demisto.debug(f"Skipping Archived Investigation: {investigation['description']} ({investigation['id']})") continue demisto.debug(f"Found New Investigation: {investigation['description']} ({investigation['id']})") incidents.append({ "name": investigation["description"], "occured": investigation["created_at"], "rawJSON": json.dumps(investigation) }) demisto.debug(f"Located {len(incidents)} Incidents") last_run = str(now) if not incidents else incidents[-1]["occured"] demisto.debug(f"Last Run/Incident Time: {last_run}") demisto.setLastRun({"start_time": last_run}) demisto.incidents(incidents) return incidents
def fetch_incidents(client: Client, env: str, args=None): """ Fetch Taegis Investigations for the use with "Fetch Incidents" """ page_size = args.get("max_fetch", 200) if page_size > 200: raise ValueError("Max Fetch cannot be more then 200") include_archived = False query = """ query investigations( $page: Int, $perPage: Int, $status: [String], $createdAfter: String, $orderByField: OrderFieldInput, $orderDirection: OrderDirectionInput ) { allInvestigations( page: $page, perPage: $perPage, status: $status, createdAfter: $createdAfter, orderByField: $orderByField, orderDirection: $orderDirection ) { id tenant_id description key_findings alerts { id alert_type severity message } archived_at created_at updated_at service_desk_id service_desk_type latest_activity priority status assets { id hostnames { id hostname } tags { tag } } } } """ variables = { "orderByField": "created_at", "orderDirection": "asc", "page": 0, "perPage": page_size, "status": ["Open", "Active"] } last_run = demisto.getLastRun() demisto.debug(f"Last Fetch Incident Run: {last_run}") now = datetime.now() start_time = now - timedelta(days=1) # Default start if first ever run if last_run and "start_time" in last_run: start_time = last_run.get("start_time") variables["createdAfter"] = start_time result = client.graphql_run(query=query, variables=variables) if result.get("errors") and result["errors"]: raise ValueError(f"Error when fetching investigations: {result['errors'][0]['message']}") incidents = [] for investigation in result["data"]["allInvestigations"]: # createdAfter really means createdAtOrAfter so skip the duplicate if start_time == investigation["created_at"]: continue # Skip archived, if necessary if not include_archived and investigation["archived_at"]: demisto.debug(f"Skipping Archived Investigation: {investigation['description']} ({investigation['id']})") continue demisto.debug(f"Found New Investigation: {investigation['description']} ({investigation['id']})") incidents.append({ "name": investigation["description"], "occured": investigation["created_at"], "rawJSON": json.dumps(investigation) }) demisto.debug(f"Located {len(incidents)} Incidents") last_run = str(now) if not incidents else incidents[-1]["occured"] demisto.debug(f"Last Run/Incident Time: {last_run}") demisto.setLastRun({"start_time": last_run}) demisto.incidents(incidents) return incidents
5,741
def bilinear(b, a, fs=1.0): r""" Return a digital IIR filter from an analog one using a bilinear transform. Transform a set of poles and zeros from the analog s-plane to the digital z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for ``s``, maintaining the shape of the frequency response. Parameters ---------- b : array_like Numerator of the analog filter transfer function. a : array_like Denominator of the analog filter transfer function. fs : float Sample rate, as ordinary frequency (e.g., hertz). No prewarping is done in this function. Returns ------- z : ndarray Numerator of the transformed digital filter transfer function. p : ndarray Denominator of the transformed digital filter transfer function. See Also -------- lp2lp, lp2hp, lp2bp, lp2bs bilinear_zpk Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> fs = 100 >>> bf = 2 * np.pi * np.array([7, 13]) >>> filts = signal.lti(*signal.butter(4, bf, btype='bandpass', ... analog=True)) >>> filtz = signal.lti(*signal.bilinear(filts.num, filts.den, fs)) >>> wz, hz = signal.freqz(filtz.num, filtz.den) >>> ws, hs = signal.freqs(filts.num, filts.den, worN=fs*wz) >>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hz).clip(1e-15)), ... label=r'$|H_z(e^{j \omega})|$') >>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hs).clip(1e-15)), ... label=r'$|H(j \omega)|$') >>> plt.legend() >>> plt.xlabel('Frequency [Hz]') >>> plt.ylabel('Magnitude [dB]') >>> plt.grid() """ _validate_fs(fs) fs = float(fs) a, b = map(atleast_1d, (a, b)) D = len(a) - 1 N = len(b) - 1 artype = float M = max([N, D]) Np = M Dp = M bprime = numpy.empty(Np + 1, artype) aprime = numpy.empty(Dp + 1, artype) for j in range(Np + 1): val = 0.0 for i in range(N + 1): for k in range(i + 1): for l in range(M - i + 1): if k + l == j: val += (comb(i, k) * comb(M - i, l) * b[N - i] * pow(2 * fs, i) * (-1) ** k) bprime[j] = real(val) for j in range(Dp + 1): val = 0.0 for i in range(D + 1): for k in range(i + 1): for l in range(M - i + 1): if k + l == j: val += (comb(i, k) * comb(M - i, l) * a[D - i] * pow(2 * fs, i) * (-1) ** k) aprime[j] = real(val) return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0): r""" Return a digital IIR filter from an analog one using a bilinear transform. Transform a set of poles and zeros from the analog s-plane to the digital z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for ``s``, maintaining the shape of the frequency response. Parameters ---------- b : array_like Numerator of the analog filter transfer function. a : array_like Denominator of the analog filter transfer function. fs : float Sample rate, as ordinary frequency (e.g., hertz). No prewarping is done in this function. Returns ------- z : ndarray Numerator of the transformed digital filter transfer function. p : ndarray Denominator of the transformed digital filter transfer function. See Also -------- lp2lp, lp2hp, lp2bp, lp2bs bilinear_zpk Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> fs = 100 >>> bf = 2 * np.pi * np.array([7, 13]) >>> filts = signal.lti(*signal.butter(4, bf, btype='bandpass', ... analog=True)) >>> filtz = signal.lti(*signal.bilinear(filts.num, filts.den, fs)) >>> wz, hz = signal.freqz(filtz.num, filtz.den) >>> ws, hs = signal.freqs(filts.num, filts.den, worN=fs*wz) >>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hz).clip(1e-15)), ... label=r'$|H_z(e^{j \omega})|$') >>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hs).clip(1e-15)), ... label=r'$|H(j \omega)|$') >>> plt.legend() >>> plt.xlabel('Frequency [Hz]') >>> plt.ylabel('Magnitude [dB]') >>> plt.grid() """ fs = _validate_fs(fs) a, b = map(atleast_1d, (a, b)) D = len(a) - 1 N = len(b) - 1 artype = float M = max([N, D]) Np = M Dp = M bprime = numpy.empty(Np + 1, artype) aprime = numpy.empty(Dp + 1, artype) for j in range(Np + 1): val = 0.0 for i in range(N + 1): for k in range(i + 1): for l in range(M - i + 1): if k + l == j: val += (comb(i, k) * comb(M - i, l) * b[N - i] * pow(2 * fs, i) * (-1) ** k) bprime[j] = real(val) for j in range(Dp + 1): val = 0.0 for i in range(D + 1): for k in range(i + 1): for l in range(M - i + 1): if k + l == j: val += (comb(i, k) * comb(M - i, l) * a[D - i] * pow(2 * fs, i) * (-1) ** k) aprime[j] = real(val) return normalize(bprime, aprime)
6,003
def if_positive(criterion, then_, else_, out=None, queue=None): """Return an array like *then_*, which, for the element at index *i*, contains *then_[i]* if *criterion[i]>0*, else *else_[i]*. """ is_then_scalar = isinstance(then_, SCALAR_CLASSES) is_else_scalar = isinstance(else_, SCALAR_CLASSES) if isinstance(criterion, SCALAR_CLASSES) and is_then_scalar and is_else_scalar: result = np.where(criterion, then_, else_) if out is not None: out[...] = result return out return result if is_then_scalar: then_ = np.array(then_) if is_else_scalar: else_ = np.array(else_) if then_.dtype != else_.dtype: raise ValueError( f"dtypes do not match: then_ is '{then_.dtype}' and " f"else_ is '{else_.dtype}'") if then_.shape == () and else_.shape == (): pass elif then_.shape != () and else_.shape != (): if not (criterion.shape == then_.shape == else_.shape): raise ValueError( f"shapes do not match: 'criterion' has shape {criterion.shape}" f", 'then_' has shape {then_.shape} and 'else_' has shape " f"{else_.shape}") elif then_.shape == (): if criterion.shape != else_.shape: raise ValueError( f"shapes do not match: 'criterion' has shape {criterion.shape}" f" and 'else_' has shape {else_.shape}") elif else_.shape == (): if criterion.shape != then_.shape: raise ValueError( f"shapes do not match: 'criterion' has shape {criterion.shape}" f" and 'then_' has shape {then_.shape}") else: raise AssertionError() if out is None: if not then_.shape == (): out = empty_like( then_, criterion.queue, allocator=criterion.allocator) else: # Use same strides as criterion cr_byte_strides = np.array(criterion.strides, dtype=np.int64) cr_item_strides = cr_byte_strides // criterion.dtype.itemsize out_strides = tuple(cr_item_strides*then_.dtype.itemsize) out = Array(criterion.queue, criterion.shape, then_.dtype, allocator=criterion.allocator, strides=out_strides) event1 = _if_positive(out, criterion, then_, else_, queue=queue) out.add_event(event1) return out
def if_positive(criterion, then_, else_, out=None, queue=None): """Return an array like *then_*, which, for the element at index *i*, contains *then_[i]* if *criterion[i]>0*, else *else_[i]*. """ is_then_scalar = isinstance(then_, SCALAR_CLASSES) is_else_scalar = isinstance(else_, SCALAR_CLASSES) if isinstance(criterion, SCALAR_CLASSES) and is_then_scalar and is_else_scalar: result = np.where(criterion, then_, else_) if out is not None: out[...] = result return out return result if is_then_scalar: then_ = np.array(then_) if is_else_scalar: else_ = np.array(else_) if then_.dtype != else_.dtype: raise ValueError( f"dtypes do not match: then_ is '{then_.dtype}' and " f"else_ is '{else_.dtype}'") if then_.shape == () and else_.shape == (): pass elif then_.shape != () and else_.shape != (): if not (criterion.shape == then_.shape == else_.shape): raise ValueError( f"shapes do not match: 'criterion' has shape {criterion.shape}" f", 'then_' has shape {then_.shape} and 'else_' has shape " f"{else_.shape}") elif then_.shape == (): if criterion.shape != else_.shape: raise ValueError( f"shapes do not match: 'criterion' has shape {criterion.shape}" f" and 'else_' has shape {else_.shape}") elif else_.shape == (): if criterion.shape != then_.shape: raise ValueError( f"shapes do not match: 'criterion' has shape {criterion.shape}" f" and 'then_' has shape {then_.shape}") else: raise AssertionError() if out is None: if then_.shape != (): out = empty_like( then_, criterion.queue, allocator=criterion.allocator) else: # Use same strides as criterion cr_byte_strides = np.array(criterion.strides, dtype=np.int64) cr_item_strides = cr_byte_strides // criterion.dtype.itemsize out_strides = tuple(cr_item_strides*then_.dtype.itemsize) out = Array(criterion.queue, criterion.shape, then_.dtype, allocator=criterion.allocator, strides=out_strides) event1 = _if_positive(out, criterion, then_, else_, queue=queue) out.add_event(event1) return out
8,447
def spectrum_from_column_mapping(table, column_mapping, wcs=None): """ Given a table and a mapping of the table column names to attributes on the Spectrum1D object, parse the information into a Spectrum1D. Parameters ---------- table : :class:`~astropy.table.Table` The table object (e.g. returned from ``Table.read('data_file')``). column_mapping : dict A dictionary describing the relation between the table columns and the arguments of the `Spectrum1D` class, along with unit information. The dictionary keys should be the table column names while the values should be a two-tuple where the first element is the associated `Spectrum1D` keyword argument, and the second element is the unit for the file column (or ``None`` to take unit from the table):: column_mapping = {'FLUX': ('flux', 'Jy'), 'WAVE': ('spectral_axis'spectral_axisu', 'um')} wcs : :class:`~astropy.wcs.WCS` or :class:`gwcs.WCS` WCS object passed to the Spectrum1D initializer. Returns ------- :class:`~specutils.Spectrum1D` The spectrum with 'spectral_axis', 'flux' and optionally 'uncertainty' as identified by `column_mapping`. """ spec_kwargs = {} # Associate columns of the file with the appropriate spectrum1d arguments for col_name, (kwarg_name, cm_unit) in column_mapping.items(): # If the table object couldn't parse any unit information, # fallback to the column mapper defined unit tab_unit = table[col_name].unit if tab_unit and cm_unit is not None: # If the table unit is defined, retrieve the quantity array for # the column kwarg_val = u.Quantity(table[col_name], tab_unit) # Attempt to convert the table unit to the user-defined unit. log.debug(f"Attempting auto-convert of table unit {tab_unit} to " f"user-provided unit {cm_unit}.") if not isinstance(cm_unit, u.Unit): cm_unit = u.Unit(cm_unit) cm_type = str(cm_unit.physical_type) if 'length in cm_type' or 'frequency' in cm_type or 'energy' in cm_type: # Spectral axis column information kwarg_val = kwarg_val.to(cm_unit, equivalencies=u.spectral()) elif 'spectral flux' in cm_type: # Flux/error column information kwarg_val = kwarg_val.to(cm_unit, equivalencies=u.spectral_density(1 * u.AA)) elif tab_unit: # The user has provided no unit in the column mapping, so we # use the unit as defined in the table object. kwarg_val = u.Quantity(table[col_name], tab_unit) elif cm_unit is not None: # In this case, the user has defined a unit in the column mapping # but no unit has been defined in the table object. kwarg_val = u.Quantity(table[col_name], cm_unit) else: # Neither the column mapping nor the table contain unit information. # This may be desired e.g. for the mask or bit flag arrays. kwarg_val = table[col_name] # Transpose > 1D data to row-major format if kwarg_val.ndim > 1: kwarg_val = kwarg_val.T spec_kwargs.setdefault(kwarg_name, kwarg_val) # Ensure that the uncertainties are a subclass of NDUncertainty if spec_kwargs.get('uncertainty') is not None: spec_kwargs['uncertainty'] = StdDevUncertainty( spec_kwargs.get('uncertainty')) return Spectrum1D(**spec_kwargs, wcs=wcs, meta={'header': table.meta})
def spectrum_from_column_mapping(table, column_mapping, wcs=None): """ Given a table and a mapping of the table column names to attributes on the Spectrum1D object, parse the information into a Spectrum1D. Parameters ---------- table : :class:`~astropy.table.Table` The table object (e.g. returned from ``Table.read('data_file')``). column_mapping : dict A dictionary describing the relation between the table columns and the arguments of the `Spectrum1D` class, along with unit information. The dictionary keys should be the table column names while the values should be a two-tuple where the first element is the associated `Spectrum1D` keyword argument, and the second element is the unit for the file column (or ``None`` to take unit from the table):: column_mapping = {'FLUX': ('flux', 'Jy'), 'WAVE': ('spectral_axis'spectral_axisu', 'um')} wcs : :class:`~astropy.wcs.WCS` or :class:`gwcs.WCS` WCS object passed to the Spectrum1D initializer. Returns ------- :class:`~specutils.Spectrum1D` The spectrum with 'spectral_axis', 'flux' and optionally 'uncertainty' as identified by `column_mapping`. """ spec_kwargs = {} # Associate columns of the file with the appropriate spectrum1d arguments for col_name, (kwarg_name, cm_unit) in column_mapping.items(): # If the table object couldn't parse any unit information, # fallback to the column mapper defined unit tab_unit = table[col_name].unit if tab_unit and cm_unit is not None: # If the table unit is defined, retrieve the quantity array for # the column kwarg_val = u.Quantity(table[col_name], tab_unit) # Attempt to convert the table unit to the user-defined unit. log.debug(f"Attempting auto-convert of table unit {tab_unit} to " f"user-provided unit {cm_unit}.") if not isinstance(cm_unit, u.Unit): cm_unit = u.Unit(cm_unit) cm_type = str(cm_unit.physical_type) if 'length' in cm_type or 'frequency' in cm_type or 'energy' in cm_type: # Spectral axis column information kwarg_val = kwarg_val.to(cm_unit, equivalencies=u.spectral()) elif 'spectral flux' in cm_type: # Flux/error column information kwarg_val = kwarg_val.to(cm_unit, equivalencies=u.spectral_density(1 * u.AA)) elif tab_unit: # The user has provided no unit in the column mapping, so we # use the unit as defined in the table object. kwarg_val = u.Quantity(table[col_name], tab_unit) elif cm_unit is not None: # In this case, the user has defined a unit in the column mapping # but no unit has been defined in the table object. kwarg_val = u.Quantity(table[col_name], cm_unit) else: # Neither the column mapping nor the table contain unit information. # This may be desired e.g. for the mask or bit flag arrays. kwarg_val = table[col_name] # Transpose > 1D data to row-major format if kwarg_val.ndim > 1: kwarg_val = kwarg_val.T spec_kwargs.setdefault(kwarg_name, kwarg_val) # Ensure that the uncertainties are a subclass of NDUncertainty if spec_kwargs.get('uncertainty') is not None: spec_kwargs['uncertainty'] = StdDevUncertainty( spec_kwargs.get('uncertainty')) return Spectrum1D(**spec_kwargs, wcs=wcs, meta={'header': table.meta})
649
def parse_stralgo(response, **options): """ Parse the response from `STRALGO` command. Without modifiers the returned value is string. When LEN is given the command returns the length of the result (i.e integer). When IDX is given the command returns a dictionary with the LCS length and all the ranges in both the strings, start and end offset for each string, where there are matches. When WITHMATCHLEN is given, each array representing a match will also have the length of the match at the beginning of the array. """ if options['len']: return int(response) if options['idx']: if options['withmatchlen']: matches = [[(int(match[-1]))] + list(map(tuple, match[:-1])) for match in response[1]] else: matches = [list(map(tuple, match)) for match in response[1]] return { str_if_bytes(response[0]): matches, str_if_bytes(response[2]): int(response[3]) } return str_if_bytes(response)
def parse_stralgo(response, **options): """ Parse the response from `STRALGO` command. Without modifiers the returned value is string. When LEN is given the command returns the length of the result (i.e integer). When IDX is given the command returns a dictionary with the LCS length and all the ranges in both the strings, start and end offset for each string, where there are matches. When WITHMATCHLEN is given, each array representing a match will also have the length of the match at the beginning of the array. """ if options.has_key('len'): return int(response) if options['idx']: if options['withmatchlen']: matches = [[(int(match[-1]))] + list(map(tuple, match[:-1])) for match in response[1]] else: matches = [list(map(tuple, match)) for match in response[1]] return { str_if_bytes(response[0]): matches, str_if_bytes(response[2]): int(response[3]) } return str_if_bytes(response)
43,887
def _extract_su2su2_prefactors(U, V): """U, V are SU(4) matrices for which there exists A, B, C, D such that (A \otimes B) V (C \otimes D) = U. The problem is to find A, B, C, D in SU(2) in an analytic and fully differentiable manner. This decomposition is possible when U and V are in the same double coset of SU(4), meaning there exists G, H in SO(4) s.t. G (Edag V E) H = (Edag U E). This is guaranteed here by how V was constructed using the _select_rotation_angles method. Then, we can use the fact that E SO(4) Edag gives us something in SU(2) x SU(2) to give A, B, C, D. """ # A lot of the work here happens in the magic basis. Essentially, we # don't look explicitly at some U = G V H, but rather at # E^\dagger U E = G E^\dagger V E H # so that we can recover # U = (E G E^\dagger) V (E H E^\dagger) = (A \otimes B) V (C \otimes D). # There is some math in the paper explaining how when we define U in this way, # we can simultaneously diagonalize functions of U and V to ensure they are # in the same coset and recover the decomposition. u = qml.math.linalg.multi_dot([Edag, U, E]) v = qml.math.linalg.multi_dot([Edag, V, E]) uuT = qml.math.dot(u, qml.math.T(u)) vvT = qml.math.dot(v, qml.math.T(v)) # First, we find a matrix p (hopefully) in SO(4) s.t. p^T u u^T p is diagonal. # Since uuT is complex and symmetric, both its real / imag parts share a set # of real-valued eigenvectors. ev_p, p = qml.math.linalg.eig(uuT.real) # We also do this for v, i.e., find q (hopefully) in SO(4) s.t. q^T v v^T q is diagonal. ev_q, q = qml.math.linalg.eig(vvT.real) # If determinant of p is not 1, it is in O(4) but not SO(4), and has # determinant -1. We can transform it to SO(4) by simply negating one # of the columns. if not qml.math.isclose(qml.math.linalg.det(p), 1.0): p[:, -1] = -p[:, -1] # Next, we are going to reorder the columns of q so that the order of the # eigenvalues matches those of p. p_product = qml.math.linalg.multi_dot([qml.math.T(p), uuT, p]) q_product = qml.math.linalg.multi_dot([qml.math.T(q), vvT, q]) p_diag = qml.math.diag(p_product) q_diag = qml.math.diag(q_product) new_q_order = [] for idx, eigval in enumerate(p_diag): are_close = [qml.math.isclose(x, eigval) for x in q_diag] if any(are_close): new_q_order.append(qml.math.argmax(are_close)) # Get the permutation matrix needed to reshuffle the columns q_perm = _perm_matrix_from_sequence(new_q_order) q = qml.math.linalg.multi_dot([q, qml.math.T(q_perm)]) # Depending on the sign of the permutation, it may be that q is in O(4) but # not SO(4). Again we can fix this by simply negating a column. q_in_so4 = qml.math.isclose(qml.math.linalg.det(q), 1.0) if not q_in_so4: q[:, -1] = -q[:, -1] # Now, we should have p, q in SO(4) such that p^T u u^T p = q^T v v^T q. # Then (v^\dag q p^T u)(v^\dag q p^T u)^T = I. # So we can set G = p q^T, H = v^\dag q p^T u to obtain G v H = u. G = qml.math.dot(p, qml.math.T(q)) H = qml.math.linalg.multi_dot([qml.math.conj(qml.math.T(v)), q, qml.math.T(p), u]) # These are still in SO(4) though - we want to convert things into SU(2) x SU(2) # so use the entangler. Since u = E^\dagger U E and v = E^\dagger V E where U, V # are the target matrices, we can reshuffle as in the docstring above, # U = (E G E^\dagger) V (E H E^\dagger) = (A \otimes B) V (C \otimes D) # where A, B, C, D are in SU(2) x SU(2). AB = qml.math.linalg.multi_dot([E, G, Edag]) CD = qml.math.linalg.multi_dot([E, H, Edag]) # Now, we just need to extract the constituent tensor products. A, B = _su2su2_to_tensor_products(AB) C, D = _su2su2_to_tensor_products(CD) return A, B, C, D
def _extract_su2su2_prefactors(U, V): r"""U, V are SU(4) matrices for which there exists A, B, C, D such that (A \otimes B) V (C \otimes D) = U. The problem is to find A, B, C, D in SU(2) in an analytic and fully differentiable manner. This decomposition is possible when U and V are in the same double coset of SU(4), meaning there exists G, H in SO(4) s.t. G (Edag V E) H = (Edag U E). This is guaranteed here by how V was constructed using the _select_rotation_angles method. Then, we can use the fact that E SO(4) Edag gives us something in SU(2) x SU(2) to give A, B, C, D. """ # A lot of the work here happens in the magic basis. Essentially, we # don't look explicitly at some U = G V H, but rather at # E^\dagger U E = G E^\dagger V E H # so that we can recover # U = (E G E^\dagger) V (E H E^\dagger) = (A \otimes B) V (C \otimes D). # There is some math in the paper explaining how when we define U in this way, # we can simultaneously diagonalize functions of U and V to ensure they are # in the same coset and recover the decomposition. u = qml.math.linalg.multi_dot([Edag, U, E]) v = qml.math.linalg.multi_dot([Edag, V, E]) uuT = qml.math.dot(u, qml.math.T(u)) vvT = qml.math.dot(v, qml.math.T(v)) # First, we find a matrix p (hopefully) in SO(4) s.t. p^T u u^T p is diagonal. # Since uuT is complex and symmetric, both its real / imag parts share a set # of real-valued eigenvectors. ev_p, p = qml.math.linalg.eig(uuT.real) # We also do this for v, i.e., find q (hopefully) in SO(4) s.t. q^T v v^T q is diagonal. ev_q, q = qml.math.linalg.eig(vvT.real) # If determinant of p is not 1, it is in O(4) but not SO(4), and has # determinant -1. We can transform it to SO(4) by simply negating one # of the columns. if not qml.math.isclose(qml.math.linalg.det(p), 1.0): p[:, -1] = -p[:, -1] # Next, we are going to reorder the columns of q so that the order of the # eigenvalues matches those of p. p_product = qml.math.linalg.multi_dot([qml.math.T(p), uuT, p]) q_product = qml.math.linalg.multi_dot([qml.math.T(q), vvT, q]) p_diag = qml.math.diag(p_product) q_diag = qml.math.diag(q_product) new_q_order = [] for idx, eigval in enumerate(p_diag): are_close = [qml.math.isclose(x, eigval) for x in q_diag] if any(are_close): new_q_order.append(qml.math.argmax(are_close)) # Get the permutation matrix needed to reshuffle the columns q_perm = _perm_matrix_from_sequence(new_q_order) q = qml.math.linalg.multi_dot([q, qml.math.T(q_perm)]) # Depending on the sign of the permutation, it may be that q is in O(4) but # not SO(4). Again we can fix this by simply negating a column. q_in_so4 = qml.math.isclose(qml.math.linalg.det(q), 1.0) if not q_in_so4: q[:, -1] = -q[:, -1] # Now, we should have p, q in SO(4) such that p^T u u^T p = q^T v v^T q. # Then (v^\dag q p^T u)(v^\dag q p^T u)^T = I. # So we can set G = p q^T, H = v^\dag q p^T u to obtain G v H = u. G = qml.math.dot(p, qml.math.T(q)) H = qml.math.linalg.multi_dot([qml.math.conj(qml.math.T(v)), q, qml.math.T(p), u]) # These are still in SO(4) though - we want to convert things into SU(2) x SU(2) # so use the entangler. Since u = E^\dagger U E and v = E^\dagger V E where U, V # are the target matrices, we can reshuffle as in the docstring above, # U = (E G E^\dagger) V (E H E^\dagger) = (A \otimes B) V (C \otimes D) # where A, B, C, D are in SU(2) x SU(2). AB = qml.math.linalg.multi_dot([E, G, Edag]) CD = qml.math.linalg.multi_dot([E, H, Edag]) # Now, we just need to extract the constituent tensor products. A, B = _su2su2_to_tensor_products(AB) C, D = _su2su2_to_tensor_products(CD) return A, B, C, D
10,488
def install(m, pkgspec, cache, upgrade=False, default_release=None, install_recommends=None, force=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS), build_dep=False, fixed=False, autoremove=False, no_remove=False, only_upgrade=False, allow_unauthenticated=False): pkg_list = [] packages = "" pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) package_names = [] for package in pkgspec: if build_dep: # Let apt decide what to install pkg_list.append("'%s'" % package) continue name, version = package_split(package) package_names.append(name) installed, installed_version, upgradable, has_files = package_status(m, name, version, cache, state='install') if (not installed and not only_upgrade) or (installed and not installed_version) or (upgrade and upgradable): pkg_list.append("'%s'" % package) if installed_version and upgradable and version: # This happens when the package is installed, a newer version is # available, and the version is a wildcard that matches both # # We do not apply the upgrade flag because we cannot specify both # a version and state=latest. (This behaviour mirrors how apt # treats a version with wildcard in the package) pkg_list.append("'%s'" % package) packages = ' '.join(pkg_list) if packages: if force: force_yes = '--force-yes' else: force_yes = '' if m.check_mode: check_arg = '--simulate' else: check_arg = '' if autoremove: autoremove = '--auto-remove' else: autoremove = '' if no_remove: no_remove = '--no-remove' else: no_remove = '' if only_upgrade: only_upgrade = '--only-upgrade' else: only_upgrade = '' if fixed: fixed = '--fix-broken' else: fixed = '' if build_dep: cmd = "%s -y %s %s %s %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, no_remove, check_arg, packages) else: cmd = "%s -y %s %s %s %s %s %s %s install %s" % \ (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, autoremove, no_remove, check_arg, packages) if default_release: cmd += " -t '%s'" % (default_release,) if install_recommends is False: cmd += " -o APT::Install-Recommends=no" elif install_recommends is True: cmd += " -o APT::Install-Recommends=yes" # install_recommends is None uses the OS default if allow_unauthenticated: cmd += " --allow-unauthenticated" with PolicyRcD(m): rc, out, err = m.run_command(cmd) if m._diff: diff = parse_diff(out) else: diff = {} status = True changed = True if build_dep: changed = APT_GET_ZERO not in out data = dict(changed=changed, stdout=out, stderr=err, diff=diff) if rc: status = False data = dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc) else: status = True data = dict(changed=False) if not build_dep: mark_installed_manually(m, package_names) return (status, data)
def install(m, pkgspec, cache, upgrade=False, default_release=None, install_recommends=None, force=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS), build_dep=False, fixed=False, autoremove=False, no_remove=False, only_upgrade=False, allow_unauthenticated=False): pkg_list = [] packages = "" pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) package_names = [] for package in pkgspec: if build_dep: # Let apt decide what to install pkg_list.append("'%s'" % package) continue name, version = package_split(package) package_names.append(name) installed, installed_version, upgradable, has_files = package_status(m, name, version, cache, state='install') if (not installed and not only_upgrade) or (installed and not installed_version) or (upgrade and upgradable): pkg_list.append("'%s'" % package) if installed_version and upgradable and version: # This happens when the package is installed, a newer version is # available, and the version is a wildcard that matches both # # We do not apply the upgrade flag because we cannot specify both # a version and state=latest. (This behaviour mirrors how apt # treats a version with wildcard in the package) pkg_list.append("'%s'" % package) packages = ' '.join(pkg_list) if packages: if force: force_yes = '--force-yes' else: force_yes = '' if m.check_mode: check_arg = '--simulate' else: check_arg = '' if autoremove: autoremove = '--auto-remove' else: autoremove = '' if no_remove: no_remove = '--no-remove' else: no_remove = '' if only_upgrade: only_upgrade = '--only-upgrade' else: only_upgrade = '' if fixed: fixed = '--fix-broken' else: fixed = '' if build_dep: cmd = "%s -y %s %s %s %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, fail_on_autoremove, check_arg, packages) else: cmd = "%s -y %s %s %s %s %s %s %s install %s" % \ (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, autoremove, no_remove, check_arg, packages) if default_release: cmd += " -t '%s'" % (default_release,) if install_recommends is False: cmd += " -o APT::Install-Recommends=no" elif install_recommends is True: cmd += " -o APT::Install-Recommends=yes" # install_recommends is None uses the OS default if allow_unauthenticated: cmd += " --allow-unauthenticated" with PolicyRcD(m): rc, out, err = m.run_command(cmd) if m._diff: diff = parse_diff(out) else: diff = {} status = True changed = True if build_dep: changed = APT_GET_ZERO not in out data = dict(changed=changed, stdout=out, stderr=err, diff=diff) if rc: status = False data = dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc) else: status = True data = dict(changed=False) if not build_dep: mark_installed_manually(m, package_names) return (status, data)
42,816
def _case_when_checks( df: pd.DataFrame, args, default ) -> tuple[list, list, pd.Series]: """ Preliminary checks on the case_when function. """ if len(args) < 2: raise ValueError( "At least two arguments are required for the `args` parameter" ) booleans = [] replacements = [] for index, value in enumerate(args): if index % 2: replacements.append(value) else: booleans.append(value) if len(booleans) != len(replacements): raise ValueError( "The number of conditions and values do not match. " f"There are {len(booleans)} conditions and {len(replacements)} " "values." ) booleans = [ apply_if_callable(condition, df) if callable(condition) else df.eval(condition) if isinstance(condition, str) else condition for condition in booleans ] replacements = [ apply_if_callable(replacement, df) if callable(replacement) else replacement for replacement in replacements ] if callable(default): default = apply_if_callable(default, df) if pd.api.types.is_scalar(default): default = pd.Series([default]).repeat(len(df)) if not is_array_like(default): raise TypeError( "The argument for the `default` parameter " "should evaluate to an array-like object, " f"instead got {type(default)}" ) if isinstance(default, pd.Index): arr_ndim = default.nlevels else: arr_ndim = default.ndim if arr_ndim != 1: raise ValueError( f"The argument for the `default` parameter " "should evaluate to a 1-D array, " f"instead got dimension of length {arr_ndim}" ) if len(default) != len(df): raise ValueError( f"The length of the argument for the `default` parameter " "is {len(default)}, " "which is different from the length of the dataframe, " f"{len(df)}" ) if not isinstance(default, pd.Series): default = pd.Series(default) default.index = df.index return booleans, replacements, default
def _case_when_checks( df: pd.DataFrame, args, default ) -> tuple[list, list, pd.Series]: """ Preliminary checks on the case_when function. """ if len(args) < 2: raise ValueError( "At least two arguments are required for the `args` parameter" ) booleans = [] replacements = [] for index, value in enumerate(args): if index % 2: replacements.append(value) else: booleans.append(value) if len(booleans) != len(replacements): raise ValueError( "The number of conditions and values do not match. " f"There are {len(booleans)} conditions and {len(replacements)} " "values." ) booleans = [ apply_if_callable(condition, df) if callable(condition) else df.eval(condition) if isinstance(condition, str) else condition for condition in booleans ] replacements = [ apply_if_callable(replacement, df) if callable(replacement) else replacement for replacement in replacements ] if callable(default): default = apply_if_callable(default, df) if pd.api.types.is_scalar(default): default = pd.Series([default]).repeat(len(df)) if not is_array_like(default): raise TypeError( "The argument for the `default` parameter " "should evaluate to an array-like object, " f"instead got {type(default)!r}" ) if isinstance(default, pd.Index): arr_ndim = default.nlevels else: arr_ndim = default.ndim if arr_ndim != 1: raise ValueError( f"The argument for the `default` parameter " "should evaluate to a 1-D array, " f"instead got dimension of length {arr_ndim}" ) if len(default) != len(df): raise ValueError( f"The length of the argument for the `default` parameter " "is {len(default)}, " "which is different from the length of the dataframe, " f"{len(df)}" ) if not isinstance(default, pd.Series): default = pd.Series(default) default.index = df.index return booleans, replacements, default
40,147
def load_config(config_file_name): ''' This function should not be used in new code. Use `config.configparser_cfg` instead. loads config of CONFIG_DIR/config_file_name. Returns config object. Note that this does return a new instance and not the instance provided by `config.configparser_cfg`. This may cause the entrys in the logging section to be wrong. ''' config = configparser.ConfigParser() config_path = f'{get_config_dir()}/{config_file_name}' if not Path(config_path).exists(): complete_shutdown(f'config file not found: {config_path}') config.read(config_path) return config
def load_config(config_file_name): ''' This function should not be used in new code. Use `config.configparser_cfg` instead. loads config of CONFIG_DIR/config_file_name. Returns config object. Note that this does return a new instance and not the instance provided by `config.configparser_cfg`. This may cause the entries in the logging section to be wrong. ''' config = configparser.ConfigParser() config_path = f'{get_config_dir()}/{config_file_name}' if not Path(config_path).exists(): complete_shutdown(f'config file not found: {config_path}') config.read(config_path) return config
45,996
def _warp(image: torch.Tensor, delta_hat: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """Convert 4-point representation introduced in :cite:`detone2016deep` to homography. Args: image: image tensor with shape :math:`(B, C, H, W)` where B = batch size, C = number of channels deltas: deltas tensor with shape :math:`(B, 4, 2)` where B = batch size Return: the warped images. """ corners = _image_shape_to_corners(image=image) homography = _four_point_to_homography(corners=corners, deltas=delta_hat) image_warped = warp_perspective(image, homography, (image.shape[-2], image.shape[-1])) return image_warped, torch.inverse(homography)
def _warp(image: torch.Tensor, delta_hat: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """Convert 4-point representation introduced in :cite:`detone2016deep` to homography. Args: image: image tensor with shape :math:`(B, C, H, W)` where B = batch size, C = number of channels deltas: deltas tensor with shape :math:`(B, 4, 2)` where B = batch size Return: the warped images. """ corners = _image_shape_to_corners(image=image) deltas_homo_corners, corners_homo_deltas = _four_point_to_homography(corners=corners, deltas=delta_hat) image_warped = warp_perspective(image, homography, (image.shape[-2], image.shape[-1])) return image_warped, torch.inverse(homography)
32,931
def span_from_scope(scope): # type: (Mapping[str, Any]) -> Optional[Span] """Retrieve the top-level ASGI span from the scope.""" if "request_spans" in scope.get("datadog", {}): try: return scope.get("datadog", {}).get("request_spans")[0] except ValueError: return {}
def span_from_scope(scope): # type: (Mapping[str, Any]) -> Optional[Span] """Retrieve the top-level ASGI span from the scope.""" return scope.get("datadog", {}).get("request_spans", [{}])[0]
11,896
def test_viewers(): for viewer in ImageShow._viewers: try: viewer.get_command("test.jpg") except NotImplementedError: pass
def test_viewers(): for viewer in ImageShow._viewers: try: viewer.get_command("test.jpg") except NotImplementedError: assert viewer.show(hopper()) == 1
31,976
def initialize_edl_context(params: dict): global EDL_ON_DEMAND_CACHE_PATH limit = try_parse_integer(params.get('edl_size'), EDL_LIMIT_ERR_MSG) query = params.get('indicators_query', '') collapse_ips = params.get('collapse_ips', DONT_COLLAPSE) url_port_stripping = params.get('url_port_stripping', False) url_protocol_stripping = params.get('url_port_stripping', False) drop_invalids = params.get('drop_invalids', False) add_comment_if_empty = params.get('add_comment_if_empty', True) mwg_type = params.get('mwg_type', "string") category_default = params.get('category_default', 'bc_category') category_attribute = params.get('category_attribute', '') fields_to_present = params.get('fields_filter', '') out_format = params.get('format', FORMAT_TEXT) csv_text = params.get('csv_text') == 'True' url_truncate = params.get('url_truncate', False) if params.get('use_legacy_query'): # workaround for "msgpack: invalid code" error fields_to_present = 'use_legacy_query' offset = 0 request_args = RequestArguments(query, out_format, limit, offset, url_port_stripping, drop_invalids, collapse_ips, add_comment_if_empty, mwg_type, category_default, category_attribute, fields_to_present, csv_text, url_protocol_stripping, url_truncate) EDL_ON_DEMAND_CACHE_PATH = demisto.uniqueFile() ctx = request_args.to_context_json() ctx[EDL_ON_DEMAND_KEY] = True set_integration_context(ctx)
def initialize_edl_context(params: dict): global EDL_ON_DEMAND_CACHE_PATH limit = try_parse_integer(params.get('edl_size'), EDL_LIMIT_ERR_MSG) query = params.get('indicators_query', '') collapse_ips = params.get('collapse_ips', DONT_COLLAPSE) url_port_stripping = params.get('url_port_stripping', False) url_protocol_stripping = params.get('url_port_stripping', False) drop_invalids = params.get('drop_invalids', False) add_comment_if_empty = params.get('add_comment_if_empty', True) mwg_type = params.get('mwg_type', "string") category_default = params.get('category_default', 'bc_category') category_attribute = params.get('category_attribute', '') fields_to_present = params.get('fields_filter', '') out_format = params.get('format', FORMAT_TEXT) csv_text = argToBoolean(params.get('csv_text')) url_truncate = params.get('url_truncate', False) if params.get('use_legacy_query'): # workaround for "msgpack: invalid code" error fields_to_present = 'use_legacy_query' offset = 0 request_args = RequestArguments(query, out_format, limit, offset, url_port_stripping, drop_invalids, collapse_ips, add_comment_if_empty, mwg_type, category_default, category_attribute, fields_to_present, csv_text, url_protocol_stripping, url_truncate) EDL_ON_DEMAND_CACHE_PATH = demisto.uniqueFile() ctx = request_args.to_context_json() ctx[EDL_ON_DEMAND_KEY] = True set_integration_context(ctx)
1,798
def test_permutation_importance_sample_weight(): # Creating data with 2 features and 1000 samples, where the target # variable is a linear combination of the two features, such that # in half of the samples the impact of feature 1 is twice the impact of # feature 2, and vice versa on the other half of the samples. np.random.seed(1) n_samples = 1000 n_features = 2 n_half_samples = int(n_samples / 2) x = np.random.normal(0.0, 0.001, (n_samples, n_features)) y = np.zeros(n_samples) y[:n_half_samples] = 2 * x[:n_half_samples, 0] + x[:n_half_samples, 1] y[n_half_samples:] = x[n_half_samples:, 0] + 2 * x[n_half_samples:, 1] # Fitting linear regression with perfect prediction lr = LinearRegression(fit_intercept=False) lr.fit(x, y) # When all samples are weighted with the same weights, the ratio of # the two features importance should equal to 1 on expectation (when using # mean absolutes error as the loss function). pi = permutation_importance(lr, x, y, random_state=1, scoring='neg_mean_absolute_error', n_repeats=1000) x1_x2_imp_ratio_w_none = pi.importances_mean[0] / pi.importances_mean[1] assert np.round(x1_x2_imp_ratio_w_none, 2) == 1.00 # When passing a vector of ones as the sample_weight, results should be # the same as in the case that sample_weight=None. w = np.ones(n_samples) pi = permutation_importance(lr, x, y, random_state=1, scoring='neg_mean_absolute_error', n_repeats=1000, sample_weight=w) x1_x2_imp_ratio_w_ones = pi.importances_mean[0] / pi.importances_mean[1] assert x1_x2_imp_ratio_w_ones == x1_x2_imp_ratio_w_none # When the ratio between the weights of the first half of the samples and # the second half of the samples approaches to infinity, the ratio of # the two features importance should equal to 2 on expectation (when using # mean absolutes error as the loss function). w = np.hstack([np.repeat(10.0 ** 10, n_half_samples), np.repeat(1.0, n_half_samples)]) lr.fit(x, y, w) pi = permutation_importance(lr, x, y, random_state=1, scoring='neg_mean_absolute_error', n_repeats=1000, sample_weight=w) x1_x2_imp_ratio_w = pi.importances_mean[0] / pi.importances_mean[1] assert np.round(x1_x2_imp_ratio_w / x1_x2_imp_ratio_w_none, 2) == 2.00
def test_permutation_importance_sample_weight(): # Creating data with 2 features and 1000 samples, where the target # variable is a linear combination of the two features, such that # in half of the samples the impact of feature 1 is twice the impact of # feature 2, and vice versa on the other half of the samples. np.random.seed(1) n_samples = 1000 n_features = 2 n_half_samples = int(n_samples / 2) x = np.random.normal(0.0, 0.001, (n_samples, n_features)) y = np.zeros(n_samples) y[:n_half_samples] = 2 * x[:n_half_samples, 0] + x[:n_half_samples, 1] y[n_half_samples:] = x[n_half_samples:, 0] + 2 * x[n_half_samples:, 1] # Fitting linear regression with perfect prediction lr = LinearRegression(fit_intercept=False) lr.fit(x, y) # When all samples are weighted with the same weights, the ratio of # the two features importance should equal to 1 on expectation (when using # mean absolutes error as the loss function). pi = permutation_importance(lr, x, y, random_state=1, scoring='neg_mean_absolute_error', n_repeats=1000) x1_x2_imp_ratio_w_none = pi.importances_mean[0] / pi.importances_mean[1] assert np.round(x1_x2_imp_ratio_w_none, 2) == 1.00 # When passing a vector of ones as the sample_weight, results should be # the same as in the case that sample_weight=None. w = np.ones(n_samples) pi = permutation_importance(lr, x, y, random_state=1, scoring='neg_mean_absolute_error', n_repeats=1000, sample_weight=w) x1_x2_imp_ratio_w_ones = pi.importances_mean[0] / pi.importances_mean[1] assert x1_x2_imp_ratio_w_ones == pytest.approx(x1_x2_imp_ratio_w_none) # When the ratio between the weights of the first half of the samples and # the second half of the samples approaches to infinity, the ratio of # the two features importance should equal to 2 on expectation (when using # mean absolutes error as the loss function). w = np.hstack([np.repeat(10.0 ** 10, n_half_samples), np.repeat(1.0, n_half_samples)]) lr.fit(x, y, w) pi = permutation_importance(lr, x, y, random_state=1, scoring='neg_mean_absolute_error', n_repeats=1000, sample_weight=w) x1_x2_imp_ratio_w = pi.importances_mean[0] / pi.importances_mean[1] assert np.round(x1_x2_imp_ratio_w / x1_x2_imp_ratio_w_none, 2) == 2.00
7,470
def match_coordinates_3d(matchcoord, catalogcoord, nthneighbor=1, storekdtree='kdtree_3d'): """ Finds the nearest 3-dimensional matches of a coordinate or coordinates in a set of catalog coordinates. This finds the 3-dimensional closest neighbor, which is only different from the on-sky distance if ``distance`` is set in either ``matchcoord`` or ``catalogcoord``. Parameters ---------- matchcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The coordinate(s) to match to the catalog. catalogcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The base catalog in which to search for matches. Typically this will be a coordinate object that is an array (i.e., ``catalogcoord.isscalar == False``) nthneighbor : int, optional Which closest neighbor to search for. Typically ``1`` is desired here, as that is correct for matching one set of coordinates to another. The next likely use case is ``2``, for matching a coordinate catalog against *itself* (``1`` is inappropriate because each point will find itself as the closest match). storekdtree : bool or str, optional If a string, will store the KD-Tree used for the computation in the ``catalogcoord``, as in ``catalogcoord.cache`` with the provided name. This dramatically speeds up subsequent calls with the same catalog. If False, the KD-Tree is discarded after use. Returns ------- idx : integer array Indices into ``catalogcoord`` to get the matched points for each ``matchcoord``. Shape matches ``matchcoord``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the closest match for each ``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``. dist3d : `~astropy.units.Quantity` The 3D distance between the closest match for each ``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``. Notes ----- This function requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail. """ if catalogcoord.isscalar or len(catalogcoord) < 1: raise ValueError('The catalog for coordinate matching cannot be a ' 'scalar or length-0.') kdt = _get_cartesian_kdtree(catalogcoord, storekdtree) # make sure coordinate systems match matchcoord = matchcoord.transform_to(catalogcoord) # make sure units match catunit = catalogcoord.cartesian.x.unit matchxyz = matchcoord.cartesian.xyz.to(catunit) matchflatxyz = matchxyz.reshape((3, np.prod(matchxyz.shape) // 3)) # Querying NaN returns garbage if np.isnan(matchflatxyz.value).sum() > 0: raise ValueError("Matching coordinates cannot contain NaN entries.") dist, idx = kdt.query(matchflatxyz.T, nthneighbor) if nthneighbor > 1: # query gives 1D arrays if k=1, 2D arrays otherwise dist = dist[:, -1] idx = idx[:, -1] sep2d = catalogcoord[idx].separation(matchcoord) return idx.reshape(matchxyz.shape[1:]), sep2d, dist.reshape(matchxyz.shape[1:]) * catunit
def match_coordinates_3d(matchcoord, catalogcoord, nthneighbor=1, storekdtree='kdtree_3d'): """ Finds the nearest 3-dimensional matches of a coordinate or coordinates in a set of catalog coordinates. This finds the 3-dimensional closest neighbor, which is only different from the on-sky distance if ``distance`` is set in either ``matchcoord`` or ``catalogcoord``. Parameters ---------- matchcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The coordinate(s) to match to the catalog. catalogcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The base catalog in which to search for matches. Typically this will be a coordinate object that is an array (i.e., ``catalogcoord.isscalar == False``) nthneighbor : int, optional Which closest neighbor to search for. Typically ``1`` is desired here, as that is correct for matching one set of coordinates to another. The next likely use case is ``2``, for matching a coordinate catalog against *itself* (``1`` is inappropriate because each point will find itself as the closest match). storekdtree : bool or str, optional If a string, will store the KD-Tree used for the computation in the ``catalogcoord``, as in ``catalogcoord.cache`` with the provided name. This dramatically speeds up subsequent calls with the same catalog. If False, the KD-Tree is discarded after use. Returns ------- idx : integer array Indices into ``catalogcoord`` to get the matched points for each ``matchcoord``. Shape matches ``matchcoord``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the closest match for each ``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``. dist3d : `~astropy.units.Quantity` The 3D distance between the closest match for each ``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``. Notes ----- This function requires `SciPy <https://www.scipy.org/>`_ to be installed or it will fail. """ if catalogcoord.isscalar or len(catalogcoord) < 1: raise ValueError('The catalog for coordinate matching cannot be a ' 'scalar or length-0.') kdt = _get_cartesian_kdtree(catalogcoord, storekdtree) # make sure coordinate systems match matchcoord = matchcoord.transform_to(catalogcoord) # make sure units match catunit = catalogcoord.cartesian.x.unit matchxyz = matchcoord.cartesian.xyz.to(catunit) matchflatxyz = matchxyz.reshape((3, np.prod(matchxyz.shape) // 3)) # Querying NaN returns garbage if np.isnan(matchflatxyz.value).any(): raise ValueError("Matching coordinates cannot contain NaN entries.") dist, idx = kdt.query(matchflatxyz.T, nthneighbor) if nthneighbor > 1: # query gives 1D arrays if k=1, 2D arrays otherwise dist = dist[:, -1] idx = idx[:, -1] sep2d = catalogcoord[idx].separation(matchcoord) return idx.reshape(matchxyz.shape[1:]), sep2d, dist.reshape(matchxyz.shape[1:]) * catunit
8,489
def create_manifest(filename, manifest, console, uac_admin=False, uac_uiaccess=False): """ Create assembly manifest. """ if not manifest: manifest = ManifestFromXMLFile(filename) # /path/NAME.exe.manifest - split extension twice to get NAME. name = os.path.basename(filename) manifest.name = os.path.splitext(os.path.splitext(name)[0])[0] elif isinstance(manifest, string_types) and "<" in manifest: # Assume XML string manifest = ManifestFromXML(manifest) elif not isinstance(manifest, Manifest): # Assume filename manifest = ManifestFromXMLFile(manifest) dep_names = set([dep.name for dep in manifest.dependentAssemblies]) if manifest.filename != filename: # Update dependent assemblies depmanifest = ManifestFromXMLFile(filename) for assembly in depmanifest.dependentAssemblies: if not assembly.name in dep_names: manifest.dependentAssemblies.append(assembly) dep_names.add(assembly.name) if (not console and not "Microsoft.Windows.Common-Controls" in dep_names): # Add Microsoft.Windows.Common-Controls to dependent assemblies manifest.dependentAssemblies.append( Manifest(type_="win32", name="Microsoft.Windows.Common-Controls", language="*", processorArchitecture=processor_architecture(), version=(6, 0, 0, 0), publicKeyToken="6595b64144ccf1df") ) if uac_admin: manifest.requestedExecutionLevel = 'requireAdministrator' else: manifest.requestedExecutionLevel = 'asInvoker' if uac_uiaccess: manifest.uiAccess = True # only write a new manifest if it is different from the old need_new = not os.path.exists(filename) if not need_new: old_xml = ManifestFromXMLFile(filename).toprettyxml().replace('\r','') new_xml = manifest.toprettyxml().replace('\r','') # this only works if PYTHONHASHSEED is set in environment need_new = (old_xml != new_xml) if need_new: manifest.writeprettyxml(filename) return manifest
def create_manifest(filename, manifest, console, uac_admin=False, uac_uiaccess=False): """ Create assembly manifest. """ if not manifest: manifest = ManifestFromXMLFile(filename) # /path/NAME.exe.manifest - split extension twice to get NAME. name = os.path.basename(filename) manifest.name = os.path.splitext(os.path.splitext(name)[0])[0] elif isinstance(manifest, string_types) and "<" in manifest: # Assume XML string manifest = ManifestFromXML(manifest) elif not isinstance(manifest, Manifest): # Assume filename manifest = ManifestFromXMLFile(manifest) dep_names = set([dep.name for dep in manifest.dependentAssemblies]) if manifest.filename != filename: # Update dependent assemblies depmanifest = ManifestFromXMLFile(filename) for assembly in depmanifest.dependentAssemblies: if not assembly.name in dep_names: manifest.dependentAssemblies.append(assembly) dep_names.add(assembly.name) if (not console and not "Microsoft.Windows.Common-Controls" in dep_names): # Add Microsoft.Windows.Common-Controls to dependent assemblies manifest.dependentAssemblies.append( Manifest(type_="win32", name="Microsoft.Windows.Common-Controls", language="*", processorArchitecture=processor_architecture(), version=(6, 0, 0, 0), publicKeyToken="6595b64144ccf1df") ) if uac_admin: manifest.requestedExecutionLevel = 'requireAdministrator' else: manifest.requestedExecutionLevel = 'asInvoker' if uac_uiaccess: manifest.uiAccess = True # only write a new manifest if it is different from the old need_new = not os.path.exists(filename) if not need_new: old_xml = ManifestFromXMLFile(filename).toprettyxml().replace('\r','') new_xml = manifest.toprettyxml().replace('\r', '') # this only works if PYTHONHASHSEED is set in environment need_new = (old_xml != new_xml) if need_new: manifest.writeprettyxml(filename) return manifest
15,991
def list_statistic_ids(hass: HomeAssistant, statistic_type: str | None = None) -> dict: """Return statistic_ids and meta data.""" entities = _get_entities(hass) statistic_ids = {} for entity_id, device_class in entities: provided_statistics = DEVICE_CLASS_STATISTICS[device_class] if statistic_type is not None and statistic_type not in provided_statistics: continue state = hass.states.get(entity_id) assert state if "sum" in provided_statistics and ATTR_LAST_RESET not in state.attributes: continue native_unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) if device_class not in UNIT_CONVERSIONS: statistic_ids[entity_id] = native_unit continue if native_unit not in UNIT_CONVERSIONS[device_class]: continue statistics_unit = DEVICE_CLASS_UNITS[device_class] statistic_ids[entity_id] = statistics_unit return statistic_ids
def list_statistic_ids(hass: HomeAssistant, statistic_type: str | None = None) -> dict: """Return statistic_ids and meta data.""" entities = _get_entities(hass) statistic_ids = {} for entity_id, device_class in entities: provided_statistics = DEVICE_CLASS_STATISTICS[device_class] if statistic_type is not None and statistic_type not in provided_statistics: continue state = hass.states.get(entity_id) assert state if "sum" in provided_statistics and ATTR_LAST_RESET not in state.attributes: continue native_unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) if device_class not in UNIT_CONVERSIONS: statistic_ids[entity_id] = {"unit": native_unit} continue if native_unit not in UNIT_CONVERSIONS[device_class]: continue statistics_unit = DEVICE_CLASS_UNITS[device_class] statistic_ids[entity_id] = statistics_unit return statistic_ids
55,014
def derivative(H, x, i, delta=0.005291772): r"""Compute the derivative :math:`\partial \hat{H}(x)/\partial x_i` of the electronic Hamiltonian with respect to the :math:`i`-th nuclear coordinate using a central difference approximation. .. math:: \frac{\partial \hat{H}(x)}{\partial x_i} \approx \frac{\hat{H}(x_i+\delta/2) - \hat{H}(x_i-\delta/2)}{\delta} Args: H (callable): function with signature ``H(x)`` that builds the electronic Hamiltonian of the molecule for a given set of nuclear coordinates ``x`` x (array[float]): 1D array with the nuclear coordinates given in Angstroms. The size of the array should be ``3*N`` where ``N`` is the number of atoms in the molecule. i (int): index of the nuclear coordinate involved in the derivative :math:`\partial \hat{H}(x)/\partial x_i` delta (float): Step size in Angstroms used to displace the nuclear coordinate. Its default value corresponds to 0.01 Bohr radius. Returns: pennylane.Hamiltonian: the derivative of the Hamiltonian :math:`\partial \hat{H}(x)/\partial x_i` **Example** >>> def H(x): ... return qml.qchem.molecular_hamiltonian(['H', 'H'], x)[0] >>> x = np.array([0., 0., 0.35, 0., 0., -0.35]) >>> print(derivative(H, x, 2)) (-0.7763135743293005) [I0] + (-0.08534360840293387) [Z0] + (-0.08534360840293387) [Z1] + (0.2669341092545041) [Z2] + (0.26693410925450134) [Z3] + (-0.025233628744274508) [Z0 Z1] + (0.0072162443961340415) [Y0 X1 X2 Y3] + (-0.0072162443961340415) [Y0 Y1 X2 X3] + (-0.0072162443961340415) [X0 X1 Y2 Y3] + (0.0072162443961340415) [X0 Y1 Y2 X3] + (-0.030654287745411964) [Z0 Z2] + (-0.023438043349280003) [Z0 Z3] + (-0.023438043349280003) [Z1 Z2] + (-0.030654287745411964) [Z1 Z3] + (-0.02494407786332001) [Z2 Z3] """ to_bohr = 1.8897261254535 # plus x_plus = x.copy() x_plus[i] += delta * 0.5 # minus x_minus = x.copy() x_minus[i] -= delta * 0.5 return (H(x_plus) - H(x_minus)) * (delta * to_bohr) ** -1
def derivative(H, x, i, delta=0.005291772): r"""Uses a finite difference approximation to compute the ``i``-th derivative :math:`\partial \hat{H}(x)/\partial x_i` of the electronic Hamiltonian evaluated at the nuclear coordinates ``x``. .. math:: \frac{\partial \hat{H}(x)}{\partial x_i} \approx \frac{\hat{H}(x_i+\delta/2) - \hat{H}(x_i-\delta/2)}{\delta} Args: H (callable): function with signature ``H(x)`` that builds the electronic Hamiltonian of the molecule for a given set of nuclear coordinates ``x`` x (array[float]): 1D array with the nuclear coordinates given in Angstroms. The size of the array should be ``3*N`` where ``N`` is the number of atoms in the molecule. i (int): index of the nuclear coordinate involved in the derivative :math:`\partial \hat{H}(x)/\partial x_i` delta (float): Step size in Angstroms used to displace the nuclear coordinate. Its default value corresponds to 0.01 Bohr radius. Returns: pennylane.Hamiltonian: the derivative of the Hamiltonian :math:`\partial \hat{H}(x)/\partial x_i` **Example** >>> def H(x): ... return qml.qchem.molecular_hamiltonian(['H', 'H'], x)[0] >>> x = np.array([0., 0., 0.35, 0., 0., -0.35]) >>> print(derivative(H, x, 2)) (-0.7763135743293005) [I0] + (-0.08534360840293387) [Z0] + (-0.08534360840293387) [Z1] + (0.2669341092545041) [Z2] + (0.26693410925450134) [Z3] + (-0.025233628744274508) [Z0 Z1] + (0.0072162443961340415) [Y0 X1 X2 Y3] + (-0.0072162443961340415) [Y0 Y1 X2 X3] + (-0.0072162443961340415) [X0 X1 Y2 Y3] + (0.0072162443961340415) [X0 Y1 Y2 X3] + (-0.030654287745411964) [Z0 Z2] + (-0.023438043349280003) [Z0 Z3] + (-0.023438043349280003) [Z1 Z2] + (-0.030654287745411964) [Z1 Z3] + (-0.02494407786332001) [Z2 Z3] """ to_bohr = 1.8897261254535 # plus x_plus = x.copy() x_plus[i] += delta * 0.5 # minus x_minus = x.copy() x_minus[i] -= delta * 0.5 return (H(x_plus) - H(x_minus)) * (delta * to_bohr) ** -1
55,609
def layout(): app_main_layout = html.Div( className='row', children=[ dcc.Store(id='needle-store'), html.Div( className='four columns needle-tabs', children=dcc.Tabs( id='tabs', value='tab-data', children=[ dcc.Tab( label='Sample Data', className='needle-tab', value='tab-data', children=[ html.Div( id='needle-dataset-header-div', children=[ html.Div( title='"Demo dataset" choice will allow you to play with the options.\n' '"UniProt dataset" choice will retrieve protein domain' 'as well as mutation data from UniProt database.\n"Upload dataset"' 'choice will let you choose your own mutation data with the option to' 'load the protein domains from pfam database.', className='needle-dataset-header-div', children=dcc.RadioItems( id='needle-dataset-select-radio', options=[ {'label': 'Demo dataset', 'value': DEMO_KEY}, {'label': 'Upload dataset', 'value': FILE_KEY}, {'label': 'UniProt dataset', 'value': DATABASE_KEY}, ], value=DEMO_KEY ) ), html.Div( title='If checked, it will allow the user to load mutation data such ' 'as the protein coordinate (x), mutation number (y) and mutation ' 'type (mutationGroups), individually from the protein domains', className='needle-dataset-header-div', children=dcc.Checklist( id='needle-protein-domains-select-checklist', options=[ { 'label': 'Load protein domains individually', 'value': INDIV_DOMS_KEY }, { 'label': 'Load protein domains from UniProt only', 'value': UNIPROT_DOMS_KEY } ], values=[] ) ), ] ), html.Div( id='needle-%s-div' % DEMO_KEY, children=[ html.H5( 'Select demo dataset' ), dcc.Dropdown( id='needle-dataset-dropdown', options=[ {'label': data['label'], 'value': i} for i, data in enumerate(DATA) ], value=0, ), ] ), html.Div( id='needle-%s-div' % DATABASE_KEY, children=[ html.H5( 'Search UniProt' ), html.Div( title='Enter the UniProt accession key ' 'of the gene you want to display.\n' 'More information on https://www.uniprot.org/', children=[ dcc.Input( id='needle-sequence-input', value='', type='text', placeholder='TP53, DDX3X, SMARCA4, ...', ), html.Button( id='needle-search-sequence-button', children='submit', n_clicks=0, n_clicks_timestamp=0, ) ] ), html.Div( id='needle-uniprot-div', children='nothing to display', ) ] ), html.Div( id='needle-%s-div' % FILE_KEY, children=[ html.H5( 'Upload mutation data json file' ), dcc.Upload( id='needle-mutdata-file-upload', className='needle-upload', children=html.Div([ 'Drag and Drop or ', html.A('Select Files') ]), ), html.Div( id='needle-mutdata-file-info-div' ), html.Div( id='needle-domain-file-div', children=[ html.H5( 'Upload protein domains json file' ), dcc.Upload( id='needle-domains-file-upload', className='needle-upload', children=html.Div([ 'Drag and Drop or ', html.A('Select Files') ]), ), html.Div( id='needle-domains-file-info-div' ), ] ), html.Div( id='needle-domain-query-info-div' ), ] ) ], ), dcc.Tab( label='Options', value='tab-options', children=[ html.Div( children=[ html.H3('Config'), html.H5('Stem thickness'), dcc.Input( id='needle-stem-thick-input', type='number', value=2, min=1, max=40 ), html.H5('Needle head size'), dcc.Input( id='needle-head-size-input', type='number', value=4, min=1, max=40, ), html.H5('Stem color'), html.Div( [ dcc.Dropdown( id='needle-stem-color-dropdown', options=[ {'label': col, 'value': col} for col in STEM_COLOR ], value=STEM_COLOR[0], ), ], ), html.H5('Head color(s)'), html.Div( [ dcc.Dropdown( id='needle-head-color-dropdown', options=[ {'label': col, 'value': col} for col in HEAD_COLORS ], value=HEAD_COLORS[0:4], multi=True, ), ], ), html.H5('Head symbol(s)'), html.Div( [ dcc.Dropdown( id='needle-head-symbol-dropdown', options=[ {'label': sym, 'value': sym} for sym in HEAD_SYMBOLS ], value=HEAD_SYMBOLS[0], multi=True ), ], ), html.H5('Constant height needles'), html.Div( [ dcc.RadioItems( id='needle-stem-height-radioitems', options=[ {'label': 'On', 'value': True}, {'label': 'Off', 'value': False}, ], value=False ), ], ), html.H5('Rangeslider Display'), html.Div( [ dcc.RadioItems( id='needle-rangeslider-radioitems', options=[ {'label': 'On', 'value': True}, {'label': 'Off', 'value': False}, ], value=True ), ], ), ], ), ], ) ], ), ), html.Div( id='needle-plot-area', className='seven columns', children=dash_bio.NeedlePlot( id='needle-plot', rangeSlider=True, ), ) ] ) return app_page_layout(app_main_layout, app_title="Dash Bio : Needleplot")
def layout(): app_main_layout = html.Div( className='row', children=[ dcc.Store(id='needle-store'), html.Div( className='four columns needle-tabs', children=dcc.Tabs( id='tabs', value='tab-data', children=[ dcc.Tab( label='Sample Data', className='needle-tab', value='tab-data', children=[ html.Div( id='needle-dataset-header-div', children=[ html.Div( title='"Demo dataset" choice will allow you to play with the options.\n \ "UniProt dataset" choice will... ' '"UniProt dataset" choice will retrieve protein domain' 'as well as mutation data from UniProt database.\n"Upload dataset"' 'choice will let you choose your own mutation data with the option to' 'load the protein domains from pfam database.', className='needle-dataset-header-div', children=dcc.RadioItems( id='needle-dataset-select-radio', options=[ {'label': 'Demo dataset', 'value': DEMO_KEY}, {'label': 'Upload dataset', 'value': FILE_KEY}, {'label': 'UniProt dataset', 'value': DATABASE_KEY}, ], value=DEMO_KEY ) ), html.Div( title='If checked, it will allow the user to load mutation data such ' 'as the protein coordinate (x), mutation number (y) and mutation ' 'type (mutationGroups), individually from the protein domains', className='needle-dataset-header-div', children=dcc.Checklist( id='needle-protein-domains-select-checklist', options=[ { 'label': 'Load protein domains individually', 'value': INDIV_DOMS_KEY }, { 'label': 'Load protein domains from UniProt only', 'value': UNIPROT_DOMS_KEY } ], values=[] ) ), ] ), html.Div( id='needle-%s-div' % DEMO_KEY, children=[ html.H5( 'Select demo dataset' ), dcc.Dropdown( id='needle-dataset-dropdown', options=[ {'label': data['label'], 'value': i} for i, data in enumerate(DATA) ], value=0, ), ] ), html.Div( id='needle-%s-div' % DATABASE_KEY, children=[ html.H5( 'Search UniProt' ), html.Div( title='Enter the UniProt accession key ' 'of the gene you want to display.\n' 'More information on https://www.uniprot.org/', children=[ dcc.Input( id='needle-sequence-input', value='', type='text', placeholder='TP53, DDX3X, SMARCA4, ...', ), html.Button( id='needle-search-sequence-button', children='submit', n_clicks=0, n_clicks_timestamp=0, ) ] ), html.Div( id='needle-uniprot-div', children='nothing to display', ) ] ), html.Div( id='needle-%s-div' % FILE_KEY, children=[ html.H5( 'Upload mutation data json file' ), dcc.Upload( id='needle-mutdata-file-upload', className='needle-upload', children=html.Div([ 'Drag and Drop or ', html.A('Select Files') ]), ), html.Div( id='needle-mutdata-file-info-div' ), html.Div( id='needle-domain-file-div', children=[ html.H5( 'Upload protein domains json file' ), dcc.Upload( id='needle-domains-file-upload', className='needle-upload', children=html.Div([ 'Drag and Drop or ', html.A('Select Files') ]), ), html.Div( id='needle-domains-file-info-div' ), ] ), html.Div( id='needle-domain-query-info-div' ), ] ) ], ), dcc.Tab( label='Options', value='tab-options', children=[ html.Div( children=[ html.H3('Config'), html.H5('Stem thickness'), dcc.Input( id='needle-stem-thick-input', type='number', value=2, min=1, max=40 ), html.H5('Needle head size'), dcc.Input( id='needle-head-size-input', type='number', value=4, min=1, max=40, ), html.H5('Stem color'), html.Div( [ dcc.Dropdown( id='needle-stem-color-dropdown', options=[ {'label': col, 'value': col} for col in STEM_COLOR ], value=STEM_COLOR[0], ), ], ), html.H5('Head color(s)'), html.Div( [ dcc.Dropdown( id='needle-head-color-dropdown', options=[ {'label': col, 'value': col} for col in HEAD_COLORS ], value=HEAD_COLORS[0:4], multi=True, ), ], ), html.H5('Head symbol(s)'), html.Div( [ dcc.Dropdown( id='needle-head-symbol-dropdown', options=[ {'label': sym, 'value': sym} for sym in HEAD_SYMBOLS ], value=HEAD_SYMBOLS[0], multi=True ), ], ), html.H5('Constant height needles'), html.Div( [ dcc.RadioItems( id='needle-stem-height-radioitems', options=[ {'label': 'On', 'value': True}, {'label': 'Off', 'value': False}, ], value=False ), ], ), html.H5('Rangeslider Display'), html.Div( [ dcc.RadioItems( id='needle-rangeslider-radioitems', options=[ {'label': 'On', 'value': True}, {'label': 'Off', 'value': False}, ], value=True ), ], ), ], ), ], ) ], ), ), html.Div( id='needle-plot-area', className='seven columns', children=dash_bio.NeedlePlot( id='needle-plot', rangeSlider=True, ), ) ] ) return app_page_layout(app_main_layout, app_title="Dash Bio : Needleplot")
46,591
def test_valid_chars_in_key_names() -> None: valid_chars = "".join( chr(i) for i in range(33, 128) if chr(i) not in INVALID_CHARS_IN_KEY_NAMES ) cfg_dict = {valid_chars: 123, "inter": f"${{{valid_chars}}}"} cfg = OmegaConf.create(cfg_dict) # Test that we can access the node made of all valid characters, both # directly and through interpolations. assert cfg[valid_chars] == 123 assert cfg.inter == 123
def test_valid_chars_in_interpolation() -> None: valid_chars = "".join( chr(i) for i in range(33, 128) if chr(i) not in INVALID_CHARS_IN_KEY_NAMES ) cfg_dict = {valid_chars: 123, "inter": f"${{{valid_chars}}}"} cfg = OmegaConf.create(cfg_dict) # Test that we can access the node made of all valid characters, both # directly and through interpolations. assert cfg[valid_chars] == 123 assert cfg.inter == 123
19,623
def execute(args, print_results=True): p, args = parse_args(args) config = get_or_merge_config(None, **args.__dict__) variants = get_package_variants(args.recipe, config, variants=args.variants) set_language_env_vars(variants) config.channel_urls = get_channel_urls(args.__dict__) config.override_channels = args.override_channels if args.output: config.verbose = False config.debug = False metadata_tuples = api.render(args.recipe, config=config, no_download_source=args.no_source, variants=args.variants) if args.file and len(metadata_tuples) > 1: log.warning("Multiple variants rendered. " "Only one will be written to the file you specified ({}).".format(args.file)) if print_results: if args.output: with LoggingContext(logging.CRITICAL + 1): paths = api.get_output_file_paths(metadata_tuples, config=config) print('\n'.join(sorted(paths))) if args.file: (m, _, _) = metadata_tuples[-1] api.output_yaml(m, args.file, suppress_outputs=True) else: logging.basicConfig(level=logging.INFO) for (m, _, _) in metadata_tuples: print("--------------") print("Hash contents:") print("--------------") pprint(m.get_hash_contents()) print("----------") print("meta.yaml:") print("----------") print(api.output_yaml(m, args.file, suppress_outputs=True)) else: return metadata_tuples
def execute(args, print_results=True): p, args = parse_args(args) config = get_or_merge_config(None, **args.__dict__) variants = get_package_variants(args.recipe, config, variants=args.variants) set_language_env_vars(variants) config.channel_urls = get_channel_urls(args.__dict__) config.override_channels = args.override_channels if args.output: config.verbose = False config.debug = False metadata_tuples = api.render(args.recipe, config=config, no_download_source=args.no_source, variants=args.variants) if args.file and len(metadata_tuples) > 1: log.warning("Multiple variants rendered. " "Only one will be written to the file you specified ({}).".format(args.file)) if print_results: if args.output: with LoggingContext(logging.CRITICAL + 1): paths = api.get_output_file_paths(metadata_tuples, config=config) print('\n'.join(sorted(paths))) if args.file: m = metadata_tuples[-1][0] api.output_yaml(m, args.file, suppress_outputs=True) else: logging.basicConfig(level=logging.INFO) for (m, _, _) in metadata_tuples: print("--------------") print("Hash contents:") print("--------------") pprint(m.get_hash_contents()) print("----------") print("meta.yaml:") print("----------") print(api.output_yaml(m, args.file, suppress_outputs=True)) else: return metadata_tuples
40,582
def enforce_autarky(n, only_crossborder=False): links_rm = [] if only_crossborder: lines_rm = n.lines.loc[ n.lines.bus0.map(n.buses.country) != n.lines.bus1.map(n.buses.country) ].index links_rm = n.links.loc[ n.links.bus0.map(n.buses.country) != n.links.bus1.map(n.buses.country) ].index else: lines_rm = n.lines.index for i in n.links.index: if n.links.loc[i,'carrier'] == 'DC': links_rm.append(i) n.mremove("Line", lines_rm) n.mremove("Link", links_rm)
def enforce_autarky(n, only_crossborder=False): links_rm = [] if only_crossborder: lines_rm = n.lines.loc[ n.lines.bus0.map(n.buses.country) != n.lines.bus1.map(n.buses.country) ].index links_rm = n.links.loc[ n.links.bus0.map(n.buses.country) != n.links.bus1.map(n.buses.country) ].index else: lines_rm = n.lines.index links_rm = n.links.loc[n.links.carrier=="DC"].index n.mremove("Line", lines_rm) n.mremove("Link", links_rm)
27,392
def get_auxreader_for(auxdata: Optional[str] = None, format: Optional = None) -> AuxReader: """Return the appropriate auxiliary reader class for *auxdata*/*format*. If *format* is provided, will attempt to find an AuxReader corresponding to that format. If *auxdata* is provided, the format will first be guessed. Parameters ---------- auxdata (Optional) The auxiliary data (e.g. filename of file containing auxiliary data). format (Optional). Known format of *auxdata*. Returns ------- :class:`~MDAnalysis.auxiliary.base.AuxReader` AuxReader class corresponding to the supplied/guessed format. Raises ------ ValueError If an AuxReader for the format (provided or guessed from *auxdata*) cannot be found. """ if not auxdata and not format: raise ValueError('Must provide either auxdata or format') if format is None: if isinstance(auxdata, str): ## assume it's a filename? format = util.guess_format(auxdata) else: ## TBA if add non-file-format readers pass format = format.upper() try: return _AUXREADERS[format] except KeyError: errmsg = f"Unknown auxiliary data format for auxdata: {auxdata}" raise ValueError(errmsg) from None else: try: return _AUXREADERS[format] except KeyError: errmsg = f"Unknown auxiliary data format {format}" raise ValueError(errmsg) from None
def get_auxreader_for(auxdata: Optional[str] = None, format: Optional[str] = None) -> Type[AuxReader]: """Return the appropriate auxiliary reader class for *auxdata*/*format*. If *format* is provided, will attempt to find an AuxReader corresponding to that format. If *auxdata* is provided, the format will first be guessed. Parameters ---------- auxdata (Optional) The auxiliary data (e.g. filename of file containing auxiliary data). format (Optional). Known format of *auxdata*. Returns ------- :class:`~MDAnalysis.auxiliary.base.AuxReader` AuxReader class corresponding to the supplied/guessed format. Raises ------ ValueError If an AuxReader for the format (provided or guessed from *auxdata*) cannot be found. """ if not auxdata and not format: raise ValueError('Must provide either auxdata or format') if format is None: if isinstance(auxdata, str): ## assume it's a filename? format = util.guess_format(auxdata) else: ## TBA if add non-file-format readers pass format = format.upper() try: return _AUXREADERS[format] except KeyError: errmsg = f"Unknown auxiliary data format for auxdata: {auxdata}" raise ValueError(errmsg) from None else: try: return _AUXREADERS[format] except KeyError: errmsg = f"Unknown auxiliary data format {format}" raise ValueError(errmsg) from None
24,872
def check_config_8(machine, old_conf, new_conf, new_new_conf): """Example code that will trigger the message Given an if construct which continues with a new if construct When the body of the first if ends with an if expression Then no message shall be triggered. """ if old_conf: if new_new_conf: machine.disable() elif old_conf.value != new_conf.value: machine.disable() machine.enable(new_conf.value) if new_conf: machine.enable(new_conf.value)
def not_triggered_if_outer_block_continues_with_if(machine, old_conf, new_conf, new_new_conf): """Example code that will trigger the message Given an if construct which continues with a new if construct When the body of the first if ends with an if expression Then no message shall be triggered. """ if old_conf: if new_new_conf: machine.disable() elif old_conf.value != new_conf.value: machine.disable() machine.enable(new_conf.value) if new_conf: machine.enable(new_conf.value)
56,606
def _cjs_dist(x, weights): """ Calculate Cumulative Jensen-Shannon distance between original draws (x) and weighted draws. """ # normalise weights weights = weights / np.sum(weights) # sort draws and weights x, w = (list(x) for x in zip(*sorted(zip(x, weights)))) bins = x[:-1] binwidth = np.diff(x) # ecdfs cdf_p = np.full(shape=len(x), fill_value=1/len(x)) cdf_p = np.cumsum(cdf_p)[:-1] cdf_q = np.cumsum(w/np.sum(w))[:-1] # integrals of ecdfs cdf_p_int = np.dot(cdf_p, binwidth) cdf_q_int = np.dot(cdf_q, binwidth) cjs_pq = np.nansum(binwidth * ( cdf_p * (np.log2(cdf_p) - np.log2(0.5 * cdf_p + 0.5 * cdf_q) ))) + 0.5 / np.log(2) * (cdf_q_int - cdf_p_int) cjs_qp = np.nansum( binwidth * cdf_q * (np.log2(cdf_q) - np.log2(0.5 * cdf_q + 0.5 * cdf_p) )) + 0.5 / np.log(2) * (cdf_p_int - cdf_q_int) bound = cdf_p_int + cdf_q_int return np.sqrt((cjs_pq + cjs_qp) / bound)
def _cjs_dist(x, weights): """ Calculate Cumulative Jensen-Shannon distance between original draws (x) and weighted draws. """ # normalise weights weights = weights / np.sum(weights) # sort draws and weights order = np.argsort(x) x = x[order] weights = weights[order] bins = x[:-1] binwidth = np.diff(x) # ecdfs cdf_p = np.full(shape=len(x), fill_value=1/len(x)) cdf_p = np.cumsum(cdf_p)[:-1] cdf_q = np.cumsum(w/np.sum(w))[:-1] # integrals of ecdfs cdf_p_int = np.dot(cdf_p, binwidth) cdf_q_int = np.dot(cdf_q, binwidth) cjs_pq = np.nansum(binwidth * ( cdf_p * (np.log2(cdf_p) - np.log2(0.5 * cdf_p + 0.5 * cdf_q) ))) + 0.5 / np.log(2) * (cdf_q_int - cdf_p_int) cjs_qp = np.nansum( binwidth * cdf_q * (np.log2(cdf_q) - np.log2(0.5 * cdf_q + 0.5 * cdf_p) )) + 0.5 / np.log(2) * (cdf_p_int - cdf_q_int) bound = cdf_p_int + cdf_q_int return np.sqrt((cjs_pq + cjs_qp) / bound)
5,909
def is_this_a_good_version_number(string: str) -> Optional[str]: try: v = Version(string) except InvalidVersion as e: return str(e) if v.local: return "Nope. PyPI refuses local release versions." if v.dev: return "No development releases on PyPI. What are you even thinking?" if v.is_prerelease and (v.pre is None or v.pre[0] != "b"): return "Only beta releases are allowed. No alphas." release = v.release expected_major = datetime.now().year % 100 if len(release) not in [2, 3]: return "Not of the form: {0}.N or {0}.N.P".format(expected_major) return None
def is_this_a_good_version_number(string: str) -> Optional[str]: try: v = Version(string) except InvalidVersion as e: return str(e) if v.local: return "Nope. PyPI refuses local release versions." if v.dev: return "No development releases on PyPI. What are you even thinking?" if v.pre and v.pre[0] != "b": return "Only beta releases are allowed. No alphas." release = v.release expected_major = datetime.now().year % 100 if len(release) not in [2, 3]: return "Not of the form: {0}.N or {0}.N.P".format(expected_major) return None
13,413
def test_10_verify_changing_a_system_dataset_is_impossible_while_AD_is_running(request): depends(request, ["second_pool"]) results = GET("/network/configuration/") assert results.status_code == 200, results.text nameserver1 = results.json()['nameserver1'] payload = { "nameserver1": ADNameServer, } results = PUT("/network/configuration/", payload) assert results.status_code == 200, results.text assert isinstance(results.json(), dict), results.text payload = { "bindpw": ADPASSWORD, "bindname": ADUSERNAME, "domainname": AD_DOMAIN, "netbiosname": hostname, "dns_timeout": 15, "verbose_logging": True, "enable": True } results = PUT("/activedirectory/", payload) assert results.status_code == 200, results.text job_status = wait_on_job(results.json()['job_id'], 180) assert job_status['state'] == 'SUCCESS', str(job_status['results']) results = GET('/activedirectory/get_state/') assert results.status_code == 200, results.text assert results.json() == 'HEALTHY', results.text results = PUT("/systemdataset/", {'pool': 'second_pool'}) assert results.status_code == 200, results.text assert isinstance(results.json(), int), results.text job_status = wait_on_job(results.json(), 120) assert job_status['state'] == 'FAILED', str(job_status['results']) results = GET("/systemdataset/") assert results.status_code == 200, results.text assert isinstance(results.json(), dict), results.text assert results.json()['pool'] == 'first_pool', results.text assert results.json()['basename'] == 'first_pool/.system', results.text leave_payload = { "username": ADUSERNAME, "password": ADPASSWORD } results = POST("/activedirectory/leave/", leave_payload) assert results.status_code == 200, results.text job_status = wait_on_job(results.json(), 180) assert job_status['state'] == 'SUCCESS', str(job_status['results']) results = PUT("/network/configuration/", {"nameserver1": nameserver1}) assert results.status_code == 200, results.text
def test_10_verify_changes_to_sysds_are_forbidden_while_AD_is_running(request): depends(request, ["second_pool"]) results = GET("/network/configuration/") assert results.status_code == 200, results.text nameserver1 = results.json()['nameserver1'] payload = { "nameserver1": ADNameServer, } results = PUT("/network/configuration/", payload) assert results.status_code == 200, results.text assert isinstance(results.json(), dict), results.text payload = { "bindpw": ADPASSWORD, "bindname": ADUSERNAME, "domainname": AD_DOMAIN, "netbiosname": hostname, "dns_timeout": 15, "verbose_logging": True, "enable": True } results = PUT("/activedirectory/", payload) assert results.status_code == 200, results.text job_status = wait_on_job(results.json()['job_id'], 180) assert job_status['state'] == 'SUCCESS', str(job_status['results']) results = GET('/activedirectory/get_state/') assert results.status_code == 200, results.text assert results.json() == 'HEALTHY', results.text results = PUT("/systemdataset/", {'pool': 'second_pool'}) assert results.status_code == 200, results.text assert isinstance(results.json(), int), results.text job_status = wait_on_job(results.json(), 120) assert job_status['state'] == 'FAILED', str(job_status['results']) results = GET("/systemdataset/") assert results.status_code == 200, results.text assert isinstance(results.json(), dict), results.text assert results.json()['pool'] == 'first_pool', results.text assert results.json()['basename'] == 'first_pool/.system', results.text leave_payload = { "username": ADUSERNAME, "password": ADPASSWORD } results = POST("/activedirectory/leave/", leave_payload) assert results.status_code == 200, results.text job_status = wait_on_job(results.json(), 180) assert job_status['state'] == 'SUCCESS', str(job_status['results']) results = PUT("/network/configuration/", {"nameserver1": nameserver1}) assert results.status_code == 200, results.text
27,242
def test_compile_toplevel(): t = ibis.table([('foo', 'double')], name='t0') # it works! expr = t.foo.sum() result = ibis.backends.bigquery.compile(expr) # FIXME: remove quotes because bigquery can't use anythig that needs # quoting? expected = """\ SELECT sum(`foo`) AS `sum` FROM t0""" # noqa assert str(result) == expected
def test_compile_toplevel(): t = ibis.table([('foo', 'double')], name='t0') # it works! expr = t.foo.sum() result = ibis.bigquery.compile(expr) # FIXME: remove quotes because bigquery can't use anythig that needs # quoting? expected = """\ SELECT sum(`foo`) AS `sum` FROM t0""" # noqa assert str(result) == expected
32,941
def is_single_span_sampled( span, # type: Span ): return span.get_metric("_dd.span_sampling.mechanism") == SamplingMechanism.SPAN_SAMPLING_RULE
def is_single_span_sampled( # type: (Span) -> bool ): return span.get_metric("_dd.span_sampling.mechanism") == SamplingMechanism.SPAN_SAMPLING_RULE
9,002
def clean_callable(func, config): """Compiles the regexes, moves commands into func.rule, fixes up docs and puts them in func._docs, and sets defaults""" nick = config.core.nick alias_nicks = config.core.alias_nicks prefix = config.core.prefix help_prefix = config.core.help_prefix func._docs = {} doc = trim_docstring(inspect.getdoc(func)) examples = [] func.thread = getattr(func, 'thread', True) if not is_triggerable(func): # Rate-limiting, priority, etc. doesn't apply to non-triggerable functions. # Adding the default attributes below is a waste of memory, as well as # potentially confusing to other code. return func.unblockable = getattr(func, 'unblockable', False) func.echo = getattr(func, 'echo', False) func.priority = getattr(func, 'priority', 'medium') func.rate = getattr(func, 'rate', 0) func.channel_rate = getattr(func, 'channel_rate', 0) func.global_rate = getattr(func, 'global_rate', 0) func.output_prefix = getattr(func, 'output_prefix', '') if not hasattr(func, 'event'): func.event = ['PRIVMSG'] else: if isinstance(func.event, basestring): func.event = [func.event.upper()] else: func.event = [event.upper() for event in func.event] if hasattr(func, 'rule'): if isinstance(func.rule, basestring): func.rule = [func.rule] func.rule = [compile_rule(nick, rule, alias_nicks) for rule in func.rule] if any(hasattr(func, attr) for attr in ['commands', 'nickname_commands', 'action_commands']): func.rule = getattr(func, 'rule', []) for command in getattr(func, 'commands', []): regexp = get_command_regexp(prefix, command) if regexp not in func.rule: func.rule.append(regexp) for command in getattr(func, 'nickname_commands', []): regexp = get_nickname_command_regexp(nick, command, alias_nicks) if regexp not in func.rule: func.rule.append(regexp) for command in getattr(func, 'action_commands', []): regexp = get_action_command_regexp(command) if regexp not in func.rule: func.rule.append(regexp) if hasattr(func, 'example'): # If no examples are flagged as user-facing, just show the first one like Sopel<7.0 did examples = [rec["example"] for rec in func.example if rec["help"]] or [func.example[0]["example"]] for i, example in enumerate(examples): example = example.replace('$nickname', nick) if example[0] != help_prefix and not example.startswith(nick): example = example.replace(default_prefix, help_prefix, 1) examples[i] = example if doc or examples: cmds = [] cmds.extend(getattr(func, 'commands', [])) cmds.extend(getattr(func, 'nickname_commands', [])) for command in cmds: func._docs[command] = (doc, examples) if hasattr(func, 'intents'): # Can be implementation-dependent _regex_type = type(re.compile('')) func.intents = [ (intent if isinstance(intent, _regex_type) else re.compile(intent, re.IGNORECASE)) for intent in func.intents ]
def clean_callable(func, config): """Compiles the regexes, moves commands into func.rule, fixes up docs and puts them in func._docs, and sets defaults""" nick = config.core.nick alias_nicks = config.core.alias_nicks prefix = config.core.prefix help_prefix = config.core.help_prefix func._docs = {} doc = inspect.getdoc(func) examples = [] func.thread = getattr(func, 'thread', True) if not is_triggerable(func): # Rate-limiting, priority, etc. doesn't apply to non-triggerable functions. # Adding the default attributes below is a waste of memory, as well as # potentially confusing to other code. return func.unblockable = getattr(func, 'unblockable', False) func.echo = getattr(func, 'echo', False) func.priority = getattr(func, 'priority', 'medium') func.rate = getattr(func, 'rate', 0) func.channel_rate = getattr(func, 'channel_rate', 0) func.global_rate = getattr(func, 'global_rate', 0) func.output_prefix = getattr(func, 'output_prefix', '') if not hasattr(func, 'event'): func.event = ['PRIVMSG'] else: if isinstance(func.event, basestring): func.event = [func.event.upper()] else: func.event = [event.upper() for event in func.event] if hasattr(func, 'rule'): if isinstance(func.rule, basestring): func.rule = [func.rule] func.rule = [compile_rule(nick, rule, alias_nicks) for rule in func.rule] if any(hasattr(func, attr) for attr in ['commands', 'nickname_commands', 'action_commands']): func.rule = getattr(func, 'rule', []) for command in getattr(func, 'commands', []): regexp = get_command_regexp(prefix, command) if regexp not in func.rule: func.rule.append(regexp) for command in getattr(func, 'nickname_commands', []): regexp = get_nickname_command_regexp(nick, command, alias_nicks) if regexp not in func.rule: func.rule.append(regexp) for command in getattr(func, 'action_commands', []): regexp = get_action_command_regexp(command) if regexp not in func.rule: func.rule.append(regexp) if hasattr(func, 'example'): # If no examples are flagged as user-facing, just show the first one like Sopel<7.0 did examples = [rec["example"] for rec in func.example if rec["help"]] or [func.example[0]["example"]] for i, example in enumerate(examples): example = example.replace('$nickname', nick) if example[0] != help_prefix and not example.startswith(nick): example = example.replace(default_prefix, help_prefix, 1) examples[i] = example if doc or examples: cmds = [] cmds.extend(getattr(func, 'commands', [])) cmds.extend(getattr(func, 'nickname_commands', [])) for command in cmds: func._docs[command] = (doc, examples) if hasattr(func, 'intents'): # Can be implementation-dependent _regex_type = type(re.compile('')) func.intents = [ (intent if isinstance(intent, _regex_type) else re.compile(intent, re.IGNORECASE)) for intent in func.intents ]
46,965
def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty." "Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN, ) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s", training_args) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub # # For CSV/JSON files, this script will use the column called 'text' or the first column. You can easily tweak this # behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file if data_args.validation_file is not None: data_files["validation"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if extension == "txt": extension = "text" datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer ) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if model_args.model_name_or_path: model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, ) else: logger.info("Training new model from scratch") model = AutoModelForCausalLM.from_config(config) model.resize_token_embeddings(len(tokenizer)) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: column_names = datasets["train"].column_names else: column_names = datasets["validation"].column_names text_column_name = "text" if "text" in column_names else column_names[0] def tokenize_function(examples): return tokenizer(examples[text_column_name]) tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, ) if data_args.block_size <= 0: block_size = tokenizer.max_len else: block_size = min(data_args.block_size, tokenizer.max_len) # Main function that will concatenate all texts from our dataset and generate chunks of block_size. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower # to preprocess. lm_datasets = tokenized_datasets.map(group_texts, batched=True, load_from_cache_file=not data_args.overwrite_cache) # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=lm_datasets["train"] if training_args.do_train else None, eval_dataset=lm_datasets["validation"] if training_args.do_eval else None, tokenizer=tokenizer, # Data collator will default to DataCollatorWithPadding, so we change it. data_collator=default_data_collator, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) trainer.save_model() # Saves the tokenizer too for easy upload # Evaluation results = {} if training_args.do_eval: logger.info("*** Evaluate ***") eval_output = trainer.evaluate() perplexity = math.exp(eval_output["eval_loss"]) results["perplexity"] = perplexity output_eval_file = os.path.join(training_args.output_dir, "eval_results_lm.txt") if trainer.is_world_process_zero(): with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key, value in results.items(): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") return results
def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty." "Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN, ) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s", training_args) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ (the dataset will be downloaded automatically from the datasets Hub # # For CSV/JSON files, this script will use the column called 'text' or the first column. You can easily tweak this # behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file if data_args.validation_file is not None: data_files["validation"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if extension == "txt": extension = "text" datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer ) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if model_args.model_name_or_path: model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, ) else: logger.info("Training new model from scratch") model = AutoModelForCausalLM.from_config(config) model.resize_token_embeddings(len(tokenizer)) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: column_names = datasets["train"].column_names else: column_names = datasets["validation"].column_names text_column_name = "text" if "text" in column_names else column_names[0] def tokenize_function(examples): return tokenizer(examples[text_column_name]) tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, ) if data_args.block_size <= 0: block_size = tokenizer.max_len else: block_size = min(data_args.block_size, tokenizer.max_len) # Main function that will concatenate all texts from our dataset and generate chunks of block_size. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower # to preprocess. lm_datasets = tokenized_datasets.map(group_texts, batched=True, load_from_cache_file=not data_args.overwrite_cache) # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=lm_datasets["train"] if training_args.do_train else None, eval_dataset=lm_datasets["validation"] if training_args.do_eval else None, tokenizer=tokenizer, # Data collator will default to DataCollatorWithPadding, so we change it. data_collator=default_data_collator, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) trainer.save_model() # Saves the tokenizer too for easy upload # Evaluation results = {} if training_args.do_eval: logger.info("*** Evaluate ***") eval_output = trainer.evaluate() perplexity = math.exp(eval_output["eval_loss"]) results["perplexity"] = perplexity output_eval_file = os.path.join(training_args.output_dir, "eval_results_lm.txt") if trainer.is_world_process_zero(): with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key, value in results.items(): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") return results
17,734
def base10_to_base62_alph_num(base10_No): '''Converst base 10 to base 62 so pdb/psf files can add may more than 9999 atoms and 999 residues.''' '''base10_No = the base-10 number that you want to convert to base-62)''' base62_No = 62 base10_No = int(base10_No) whole_no =1 remainder = changeDigit_base10_to_base62_alph_num(int(base10_No % base62_No)) base62_Values = str(remainder) power =1 while whole_no != 0: whole_no =int(base10_No / base62_No**power) if whole_no == base62_No : base62_Values = str(0)+base62_Values elif (whole_no != 0) and (whole_no > base62_No) : base62_Values = str(changeDigit_base10_to_base62_alph_num(int(whole_no % base62_No))) + base62_Values elif (whole_no != 0) and (whole_no < base62_No): base62_Values = str(changeDigit_base10_to_base62_alph_num(int(whole_no))) + base62_Values power =power+1 return base62_Values
def base10_to_base62_alph_num(base10_No): '''Converst base 10 to base 62 so pdb/psf files can add may more than 9999 atoms and 999 residues.''' '''base10_No = the base-10 number that you want to convert to base-62)''' base62_No = 62 base10_No = int(base10_No) whole_no = 1 remainder = changeDigit_base10_to_base62_alph_num(int(base10_No % base62_No)) base62_Values = str(remainder) power =1 while whole_no != 0: whole_no =int(base10_No / base62_No**power) if whole_no == base62_No : base62_Values = str(0)+base62_Values elif (whole_no != 0) and (whole_no > base62_No) : base62_Values = str(changeDigit_base10_to_base62_alph_num(int(whole_no % base62_No))) + base62_Values elif (whole_no != 0) and (whole_no < base62_No): base62_Values = str(changeDigit_base10_to_base62_alph_num(int(whole_no))) + base62_Values power =power+1 return base62_Values
7,520
def test_uint_indexing(): """ Test that accessing a row with an unsigned integer works as with a signed integer. Similarly tests that printing such a row works. This is non-trivial: adding a signed and unsigned integer in numpy results in a float, which is an invalid slice index. Regression test for gh-7464. """ t = table.Table([[1., 2., 3.]], names='a') assert t['a'][1] == 2. assert t['a'][np.int_(1)] == 2. assert t['a'][np.uint(1)] == 2. assert t[np.uint(1)]['a'] == 2. trepr = ['<Row index=1>', ' a ', 'float64', '-------', ' 2.0'] assert repr(t[1]).splitlines() == trepr assert repr(t[np.int_(1)]).splitlines() == trepr assert repr(t[np.uint(1)]).splitlines() == trepr
def test_uint_indexing(): """ Test that accessing a row with an unsigned integer works as with a signed integer. Similarly tests that printing such a row works. This is non-trivial: adding a signed and unsigned integer in numpy results in a float, which is an invalid slice index. Regression test for gh-7464. """ t = table.Table([[1., 2., 3.]], names='a') assert t['a'][1] == 2. assert t['a'][int(1)] == 2. assert t['a'][np.uint(1)] == 2. assert t[np.uint(1)]['a'] == 2. trepr = ['<Row index=1>', ' a ', 'float64', '-------', ' 2.0'] assert repr(t[1]).splitlines() == trepr assert repr(t[np.int_(1)]).splitlines() == trepr assert repr(t[np.uint(1)]).splitlines() == trepr
6,642
def get_columns(): columns = [ { "label": _("Sales Order"), "fieldname": "name", "fieldtype": "Link", "options": "Sales Order", "read_only": 1, }, { "label": _("Submitted"), "fieldname": "submitted", "fieldtype": "Date", "read_only": 1 }, { "label": _("Payment Term"), "fieldname": "payment_term", "fieldtype": "Data", "read_only": 1 }, { "label": _("Description"), "fieldname": "description", "fieldtype": "Data", "read_only": 1 }, { "label": _("Due Date"), "fieldname": "due_date", "fieldtype": "Date", "read_only": 1 }, { "label": _("Invoice Portion"), "fieldname": "invoice_portion", "fieldtype": "Percent", "read_only": 1, }, { "label": _("Payment Amount"), "fieldname": "payment_amount", "fieldtype": "Currency", "read_only": 1, }, { "label": _("Paid Amount"), "fieldname": "paid_amount", "fieldtype": "Currency", "read_only": 1 }, { "label": _("Invoices"), "fieldname": "invoices", "fieldtype": "Link", "options": "Sales Invoice", "read_only": 1, }, { "label": _("Status"), "fieldname": "status", "fieldtype": "Data", "read_only": 1 } ] return columns
def get_columns(): columns = [ { "label": _("Sales Order"), "fieldname": "name", "fieldtype": "Link", "options": "Sales Order", "read_only": 1, }, { "label": _("Submitted"), "fieldname": "submitted", "fieldtype": "Date", "read_only": 1 }, { "label": _("Payment Terms Template"), "fieldname": "payment_term", "fieldtype": "Data", "read_only": 1 }, { "label": _("Description"), "fieldname": "description", "fieldtype": "Data", "read_only": 1 }, { "label": _("Due Date"), "fieldname": "due_date", "fieldtype": "Date", "read_only": 1 }, { "label": _("Invoice Portion"), "fieldname": "invoice_portion", "fieldtype": "Percent", "read_only": 1, }, { "label": _("Payment Amount"), "fieldname": "payment_amount", "fieldtype": "Currency", "read_only": 1, }, { "label": _("Paid Amount"), "fieldname": "paid_amount", "fieldtype": "Currency", "read_only": 1 }, { "label": _("Invoices"), "fieldname": "invoices", "fieldtype": "Link", "options": "Sales Invoice", "read_only": 1, }, { "label": _("Status"), "fieldname": "status", "fieldtype": "Data", "read_only": 1 } ] return columns
31,867
def penfield_assign(analyst_ids, category, created, id, name, severity): return demisto.executeCommand("PenfieldGetAssignee", { 'analyst_ids': analyst_ids, 'category': category, 'created': created, 'id': id, 'name': name, 'severity': severity })
def penfield_assign(analyst_ids, category, created, id, name, severity): return demisto.executeCommand("penfield-get-assignee", { 'analyst_ids': analyst_ids, 'category': category, 'created': created, 'id': id, 'name': name, 'severity': severity })
46,757
def _dict_key_lookup(_dict, key, path=[]): """Look up any uses of a key in a nested dictionary. Adapted from https://stackoverflow.com/a/60377584/2589328. """ results = [] if isinstance(_dict, (dict, Namespace)): if key in _dict: results.append((path + [key], _dict[key])) for k, v in _dict.items(): results.extend(_dict_key_lookup(v, key, path=path + [k])) elif isinstance(_dict, list): for index, item in enumerate(_dict): results.extend(_dict_key_lookup(item, key, path=path + [index])) return results
def _dict_key_lookup(_dict, key, path=[]): """Look up any uses of a key in a nested dictionary. Adapted from https://stackoverflow.com/a/60377584/2589328. """ results = [] if isinstance(_dict, Mapping): if key in _dict: results.append((path + [key], _dict[key])) for k, v in _dict.items(): results.extend(_dict_key_lookup(v, key, path=path + [k])) elif isinstance(_dict, list): for index, item in enumerate(_dict): results.extend(_dict_key_lookup(item, key, path=path + [index])) return results
26,293
def _get_core_msg_lines(installed, latest) -> Tuple[List[List[str]], str]: installed_s = installed.to_version_string(skip_matcher=True) installed_line = ["installed", installed_s, ""] update_info = "" if latest is None: update_info = ( "The latest version of dbt could not be determined!\n" "Make sure that the following URL is accessible:\n" f"{PYPI_VERSION_URL}" ) return [installed_line], update_info latest_s = latest.to_version_string(skip_matcher=True) latest_line = ["latest", latest_s, green("Up to date!")] if installed > latest: latest_line[2] = green("Ahead of latest version!") elif installed < latest: latest_line[2] = yellow("Update available!") update_info = ( "Your version of dbt is out of date! " "You can find instructions for upgrading here:\n" "https://docs.getdbt.com/docs/installation" ) return [ installed_line, latest_line, ], update_info
def _get_core_msg_lines(installed, latest) -> Tuple[List[List[str]], str]: installed_s = installed.to_version_string(skip_matcher=True) installed_line = ["installed", installed_s, ""] update_info = "" if latest is None: update_info = ( "The latest version of dbt-core could not be determined.\n" "Make sure that the following URL is accessible:\n" f"{PYPI_VERSION_URL}" ) return [installed_line], update_info latest_s = latest.to_version_string(skip_matcher=True) latest_line = ["latest", latest_s, green("Up to date!")] if installed > latest: latest_line[2] = green("Ahead of latest version!") elif installed < latest: latest_line[2] = yellow("Update available!") update_info = ( "Your version of dbt is out of date! " "You can find instructions for upgrading here:\n" "https://docs.getdbt.com/docs/installation" ) return [ installed_line, latest_line, ], update_info
20,062
def test_accent(): input_data = r"""\"o""" tex = TeX() tex.input(input_data) node = tex.parse()[0] assert node.source == input_data
def test_accent(): input_data = r'\"o' tex = TeX() tex.input(input_data) node = tex.parse()[0] assert node.source == input_data
28,153
def get_runid_from_guid(conn: SomeConnection, guid: str) -> Union[int, None]: """ Get the run_id of a run based on the guid Args: conn: connection to the database guid: the guid to look up Returns: The run_id if found, else -1. Raises: RuntimeError if more than one run with the given GUID exists """ query = """ SELECT run_id FROM runs WHERE guid = ? """ cursor = conn.cursor() cursor.execute(query, (guid,)) rows = cursor.fetchall() if len(rows) == 0: run_id = -1 elif len(rows) > 1: errormssg = ('Critical consistency error: multiple runs with' f' the same GUID found! {len(rows)} runs have GUID ' f'{guid}') log.critical(errormssg) raise RuntimeError(errormssg) else: run_id = int(rows[0]['run_id']) return run_id
def get_runid_from_guid(conn: SomeConnection, guid: str) -> Union[int, None]: """ Get the run_id of a run based on the guid Args: conn: connection to the database guid: the guid to look up Returns: The run_id if found, else -1. Raises: RuntimeError if more than one run with the given GUID exists """ query = """ SELECT run_id FROM runs WHERE guid = ? """ cursor = conn.cursor() cursor.execute(query, (guid,)) rows = cursor.fetchall() if len(rows) == 0: run_id = -1 elif len(rows) > 1: errormssg = ('Critical consistency error: multiple runs with' f' the same GUID found! {len(rows)} runs have GUID ' f'{guid}') log.critical(errormssg) raise RuntimeError(errormssg) else: run_id = int(rows[0]['run_id']) return run_id
1,107
def _resolve_path(value, cwd): if isinstance(value, list): return [_resolve_path(v, cwd) for v in value] try: value = Path(value) except TypeError: pass else: if not value.is_absolute(): value = Path(cwd) / value return value
def _resolve_path(value, cwd): if isinstance(value, list): return [_resolve_path(v, cwd) for v in value] try: value = Path(value) except TypeError: pass else: if not value.is_absolute(): value = Path(cwd).absolute() / value return value
5,636
def approx_fprime(xk, f, epsilon, *args): """Finite-difference approximation of the gradient of a scalar function. Parameters ---------- xk : array_like The coordinate vector at which to determine the gradient of `f`. f : callable The function of which to determine the gradient (partial derivatives). Should take `xk` as first argument, other arguments to `f` can be supplied in ``*args``. Should return a scalar, the value of the function at `xk`. epsilon : array_like Increment to `xk` to use for determining the function gradient. If a scalar, uses the same finite difference delta for all partial derivatives. If an array, should contain one value per element of `xk`. \\*args : args, optional Any other arguments that are to be passed to `f`. Returns ------- grad : ndarray The partial derivatives of `f` to `xk`. See Also -------- check_grad : Check correctness of gradient function against approx_fprime. Notes ----- The function gradient is determined by the forward finite difference formula:: f(xk[i] + epsilon[i]) - f(xk[i]) f'[i] = --------------------------------- epsilon[i] The main use of `approx_fprime` is in scalar function optimizers like `fmin_bfgs`, to determine numerically the Jacobian of a function. Examples -------- >>> from scipy import optimize >>> def func(x, c0, c1): ... "Coordinate vector `x` should be an array of size two." ... return c0 * x[0]**2 + c1*x[1]**2 >>> x = np.ones(2) >>> c0, c1 = (1, 200) >>> eps = np.sqrt(np.finfo(float).eps) >>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1) array([ 2. , 400.00004198]) """ xk = np.asarray(xk, float) rel_step = _compute_relative_step(epsilon, xk, '2-point') f0 = f(*((xk,) + args)) if not np.isscalar(f0): try: f0 = f0.item() except (ValueError, AttributeError): raise ValueError("The user-provided " "objective function must " "return a scalar value.") return approx_derivative(f, xk, method='2-point', rel_step=rel_step, args=args, f0=f0)
def approx_fprime(xk, f, epsilon, *args): """Finite-difference approximation of the gradient of a scalar function. Parameters ---------- xk : array_like The coordinate vector at which to determine the gradient of `f`. f : callable The function of which to determine the gradient (partial derivatives). Should take `xk` as first argument, other arguments to `f` can be supplied in ``*args``. Should return a scalar, the value of the function at `xk`. epsilon : array_like Increment to `xk` to use for determining the function gradient. If a scalar, uses the same finite difference delta for all partial derivatives. If an array, should contain one value per element of `xk`. \\*args : args, optional Any other arguments that are to be passed to `f`. Returns ------- grad : ndarray The partial derivatives of `f` to `xk`. See Also -------- check_grad : Check correctness of gradient function against approx_fprime. Notes ----- The function gradient is determined by the forward finite difference formula:: f(xk[i] + epsilon[i]) - f(xk[i]) f'[i] = --------------------------------- epsilon[i] The main use of `approx_fprime` is in scalar function optimizers like `fmin_bfgs`, to determine numerically the Jacobian of a function. Examples -------- >>> from scipy import optimize >>> def func(x, c0, c1): ... "Coordinate vector `x` should be an array of size two." ... return c0 * x[0]**2 + c1*x[1]**2 >>> x = np.ones(2) >>> c0, c1 = (1, 200) >>> eps = np.sqrt(np.finfo(float).eps) >>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1) array([ 2. , 400.00004198]) """ xk = np.asarray(xk, float) rel_step = _compute_relative_step(epsilon, xk, '2-point') f0 = f(xk, *args) if not np.isscalar(f0): try: f0 = f0.item() except (ValueError, AttributeError): raise ValueError("The user-provided " "objective function must " "return a scalar value.") return approx_derivative(f, xk, method='2-point', rel_step=rel_step, args=args, f0=f0)
10,708
def _iterator_codegen(resty): """The common codegen for iterator intrinsic. Populates the iterator struct and incref. """ def codegen(context, builder, sig, args): [d] = args [td] = sig.args iterhelper = context.make_helper(builder, resty) iterhelper.parent = d iterhelper.state = iterhelper.state.type(None) return impl_ret_borrowed( context, builder, resty, iterhelper._getvalue(), ) return codegen
def _iterator_codegen(resty): """The common codegen for iterator intrinsics. Populates the iterator struct and incref. """ def codegen(context, builder, sig, args): [d] = args [td] = sig.args iterhelper = context.make_helper(builder, resty) iterhelper.parent = d iterhelper.state = iterhelper.state.type(None) return impl_ret_borrowed( context, builder, resty, iterhelper._getvalue(), ) return codegen
272
def assign_step_methods(model, step=None, methods=STEP_METHODS, step_kwargs=None): """Assign model variables to appropriate step methods. Passing a specified model will auto-assign its constituent stochastic variables to step methods based on the characteristics of the variables. This function is intended to be called automatically from `sample()`, but may be called manually. Each step method passed should have a `competence()` method that returns an ordinal competence value corresponding to the variable passed to it. This value quantifies the appropriateness of the step method for sampling the variable. Parameters ---------- model : Model object A fully-specified model object step : step function or vector of step functions One or more step functions that have been assigned to some subset of the model's parameters. Defaults to `None` (no assigned variables). methods : vector of step method classes The set of step methods from which the function may choose. Defaults to the main step methods provided by PyMC3. step_kwargs : dict Parameters for the samplers. Keys are the lower case names of the step method, values a dict of arguments. Returns ------- methods : list List of step methods associated with the model's variables. """ steps = [] assigned_vars = set() if step is not None: try: steps += list(step) except TypeError: steps.append(step) for step in steps: try: assigned_vars = assigned_vars.union(set(step.vars)) except AttributeError: for method in step.methods: assigned_vars = assigned_vars.union(set(method.vars)) # Use competence classmethods to select step methods for remaining # variables selected_steps = defaultdict(list) for var in model.free_RVs: if var not in assigned_vars: # determine if a gradient can be computed has_gradient = var.dtype not in discrete_types if has_gradient: try: tg.grad(model.logpt, var) except (AttributeError, NotImplementedError, tg.NullTypeGradError): has_gradient = False # select the best method selected = max(methods, key=lambda method, var=var, has_gradient=has_gradient: method._competence(var, has_gradient)) selected_steps[selected].append(var) return instantiate_steppers(model, steps, selected_steps, step_kwargs)
def assign_step_methods(model, step=None, methods=STEP_METHODS, step_kwargs=None): """Assign model variables to appropriate step methods. Passing a specified model will auto-assign its constituent stochastic variables to step methods based on the characteristics of the variables. This function is intended to be called automatically from `sample()`, but may be called manually. Each step method passed should have a `competence()` method that returns an ordinal competence value corresponding to the variable passed to it. This value quantifies the appropriateness of the step method for sampling the variable. Parameters ---------- model : Model object A fully-specified model object step : step function or vector of step functions One or more step functions that have been assigned to some subset of the model's parameters. Defaults to ``None`` (no assigned variables). methods : vector of step method classes The set of step methods from which the function may choose. Defaults to the main step methods provided by PyMC3. step_kwargs : dict Parameters for the samplers. Keys are the lower case names of the step method, values a dict of arguments. Returns ------- methods : list List of step methods associated with the model's variables. """ steps = [] assigned_vars = set() if step is not None: try: steps += list(step) except TypeError: steps.append(step) for step in steps: try: assigned_vars = assigned_vars.union(set(step.vars)) except AttributeError: for method in step.methods: assigned_vars = assigned_vars.union(set(method.vars)) # Use competence classmethods to select step methods for remaining # variables selected_steps = defaultdict(list) for var in model.free_RVs: if var not in assigned_vars: # determine if a gradient can be computed has_gradient = var.dtype not in discrete_types if has_gradient: try: tg.grad(model.logpt, var) except (AttributeError, NotImplementedError, tg.NullTypeGradError): has_gradient = False # select the best method selected = max(methods, key=lambda method, var=var, has_gradient=has_gradient: method._competence(var, has_gradient)) selected_steps[selected].append(var) return instantiate_steppers(model, steps, selected_steps, step_kwargs)
57,821
def url_command(client, args, url_suspicious_score_threshold, url_malicious_score_threshold, reliability): urls = argToList(args.get("url"), ",") results = [] for url in urls: url_encoded = urllib.parse.quote(url, safe="") result = client.get_url_reputation(url_encoded) result['url'] = url human_readable = tableToMarkdown(f"IPQualityScore Results for {url}", result, result.keys()) if result.get('fraud_score', 0) >= url_malicious_score_threshold: score = 3 result['Malicious'] = {'Vendor': 'IPQualityScore'} elif result.get('fraud_score', 0) >= url_suspicious_score_threshold: score = 2 else: score = 0 reputation = Common.DBotScore( indicator=url, indicator_type=DBotScoreType.URL, score=score, integration_name='IPQualityScore', reliability=get_reputation_reliability(reliability) ) ip_context = Common.URL( url=url, dbot_score=reputation ) results.append(CommandResults( readable_output=human_readable, indicator=ip_context, outputs_prefix='IPQualityScore.Url', outputs_key_field='url', outputs=result, raw_response=result)) return results
def url_command(client, args, url_suspicious_score_threshold, url_malicious_score_threshold, reliability): urls = argToList(args.get("url"), ",") results = [] for url in urls: url_encoded = urllib.parse.quote(url, safe="") result = client.get_url_reputation(url_encoded) result['url'] = url human_readable = tableToMarkdown(f"IPQualityScore Results for {url}", result, result.keys()) if result.get('fraud_score', 0) >= url_malicious_score_threshold: score = 3 result['Malicious'] = {'Vendor': 'IPQualityScore'} elif result.get('fraud_score', 0) >= url_suspicious_score_threshold: score = 2 else: score = 0 reputation = Common.DBotScore( indicator=url, indicator_type=DBotScoreType.URL, score=score, integration_name='IPQualityScore', reliability=get_reputation_reliability(reliability) ) url_context = Common.URL( url=url, dbot_score=reputation ) results.append(CommandResults( readable_output=human_readable, indicator=url_context, outputs_prefix='IPQualityScore.Url', outputs_key_field='url', outputs=result, raw_response=result)) return results
3,672
def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('fft', parent_package, top_path) config.add_data_dir('tests') # AIX needs to be told to use large file support - at all times defs = [('_LARGE_FILES', None)] if sys.platform[:3] == "aix" else [] # Configure pocketfft_internal config.add_extension('_pocketfft_internal', sources=['_pocketfft.c'], define_macros=defs ) return config
def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('fft', parent_package, top_path) config.add_data_dir('tests') # AIX needs to be told to use large file support - at all times defs = [('_LARGE_FILES', None)] if sys.platform[:3] == "aix" else [] # Configure pocketfft_internal config.add_extension('_pocketfft_internal', sources=['_pocketfft.c'], define_macros=defs, ) return config
56,385
def relu6(x): """Rectifier Unit function clipped at 6. It computes .. math:: \\text{ReLU6}(x) = \\min(\\max(0, x), 6). Args: x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable. A :math:`(s_1, s_2, ..., s_n)`-shaped float array. Returns: ~chainer.Variable: Output variable. A :math:`(s_1, s_2, ..., s_n)`-shaped float array. .. admonition:: Example >>> x = np.random.uniform(-100, 100, (10, 20)).astype(np.float32) >>> np.any(x < 0) True >>> np.any(x > 6) True >>> y = F.relu6(x) >>> np.any(y.array < 0) False >>> np.any(y.array > 6) False """ y, = ClippedReLU(6.0).apply((x,)) return y
def relu6(x): """Rectifier Unit function clipped at 6. It computes .. math:: \\text{ReLU6}(x) = \\min(\\max(0, x), 6). Args: x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable. A :math:`(s_1, s_2, ..., s_n)`-shaped float array. Returns: ~chainer.Variable: Output variable. A :math:`(s_1, s_2, ..., s_n)`-shaped float array. .. seealso:: :func:`chainer.functions.clipped_relu` .. admonition:: Example >>> x = np.random.uniform(-100, 100, (10, 20)).astype(np.float32) >>> np.any(x < 0) True >>> np.any(x > 6) True >>> y = F.relu6(x) >>> np.any(y.array < 0) False >>> np.any(y.array > 6) False """ y, = ClippedReLU(6.0).apply((x,)) return y
28,582
def plot_ppc( data, kind="kde", alpha=None, mean=True, observed=True, color=None, colors=None, grid=None, figsize=None, textsize=None, data_pairs=None, var_names=None, filter_vars=None, coords=None, flatten=None, flatten_pp=None, num_pp_samples=None, random_seed=None, jitter=None, animated=False, animation_kwargs=None, legend=True, labeller=None, ax=None, backend=None, backend_kwargs=None, group="posterior", show=None, ): """ Plot for posterior/prior predictive checks. Parameters ---------- data: :class:`arviz.InferenceData` object InferenceData object containing the observed and posterior/prior predictive data. kind: str Type of plot to display ("kde", "cumulative", or "scatter"). Defaults to `kde`. alpha: float Opacity of posterior/prior predictive density curves. Defaults to `0.2` for ``kind`` = kde and cumulative, for scatter defaults to `0.7`. mean: bool Whether or not to plot the mean posterior/prior predictive distribution. Defaults to ``True``. observed: bool, default ``True`` Whether or not to plot the observed data. color: str Valid matplotlib ``color``. Defaults to `C0`. color: list List with valid matplotlib colors corresponding to the posterior/prior predictive distribution, observed data and mean of the posterior/prior predictive distribution. Defaults to ["C0", "k", "C1"]. grid : tuple Number of rows and columns. Defaults to None, the rows and columns are automatically inferred. figsize: tuple Figure size. If None, it will be defined automatically. textsize: float Text size scaling factor for labels, titles and lines. If None, it will be autoscaled based on ``figsize``. data_pairs: dict Dictionary containing relations between observed data and posterior/prior predictive data. Dictionary structure: - key = data var_name - value = posterior/prior predictive var_name For example, ``data_pairs = {'y' : 'y_hat'}`` If None, it will assume that the observed data and the posterior/prior predictive data have the same variable name. var_names: list of variable names Variables to be plotted, if `None` all variable are plotted. Prefix the variables by ``~`` when you want to exclude them from the plot. filter_vars: {None, "like", "regex"}, optional, default=None If `None` (default), interpret var_names as the real variables names. If "like", interpret var_names as substrings of the real variables names. If "regex", interpret var_names as regular expressions on the real variables names. A la ``pandas.filter``. coords: dict Dictionary mapping dimensions to selected coordinates to be plotted. Dimensions without a mapping specified will include all coordinates for that dimension. Defaults to including all coordinates for all dimensions if None. flatten: list List of dimensions to flatten in observed_data. Only flattens across the coordinates specified in the ``coords`` argument. Defaults to flattening all of the dimensions. flatten_pp: list List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens across the coordinates specified in the ``coords`` argument. Defaults to flattening all of the dimensions. Dimensions should match flatten excluding dimensions for ``data_pairs`` parameters. If ``flatten`` is defined and ``flatten_pp`` is None, then ``flatten_pp`` = `flatten`. num_pp_samples: int The number of posterior/prior predictive samples to plot. For ``kind`` = 'scatter' and `animation` = ``False`` if defaults to a maximum of 5 samples and will set jitter to `0.7`. unless defined. Otherwise it defaults to all provided samples. random_seed: int Random number generator seed passed to ``numpy.random.seed`` to allow reproducibility of the plot. By default, no seed will be provided and the plot will change each call if a random sample is specified by ``num_pp_samples``. jitter: float If ``kind`` is "scatter", jitter will add random uniform noise to the height of the ppc samples and observed data. By default `0`. animated: bool Create an animation of one posterior/prior predictive sample per frame. Defaults to ``False``. Only works with matploblib backend. To run animations inside a notebook you have to use the `nbAgg` matplotlib's backend. Try with `%matplotlib notebook` or `%matplotlib nbAgg`. You can switch back to the default matplotlib's backend with `%matplotlib inline` or `%matplotlib auto`. If switching back and forth between matplotlib's backend, you may need to run twice the cell with the animation. If you experience problems rendering the animation try setting `animation_kwargs({'blit':False}) or changing the matplotlib's backend (e.g. to TkAgg) If you run the animation from a script write `ax, ani = az.plot_ppc(.)` animation_kwargs : dict Keywords passed to ``animation.FuncAnimation``. Ignored with matplotlib backend. legend : bool Add legend to figure. By default ``True``. labeller : labeller instance, optional Class providing the method ``make_pp_label`` to generate the labels in the plot titles. Read the :ref:`label_guide` for more details and usage examples. ax: numpy array-like of matplotlib axes or bokeh figures, optional A 2D array of locations into which to plot the densities. If not supplied, Arviz will create its own array of plot areas (and return it). backend: str, optional Select plotting backend {"matplotlib","bokeh"}. Default to "matplotlib". backend_kwargs: bool, optional These are kwargs specific to the backend being used, passed to :func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`. For additional documentation check the plotting method of the backend. group: {"prior", "posterior"}, optional Specifies which InferenceData group should be plotted. Defaults to `'posterior'`. Other value can be `'prior'`. show: bool, optional Call backend show function. Returns ------- axes: matplotlib axes or bokeh figures See Also -------- plot_bvp: Plot Bayesian p-value for observed data and Posterior/Prior predictive. Examples -------- Plot the observed data KDE overlaid on posterior predictive KDEs. .. plot:: :context: close-figs >>> import arviz as az >>> data = az.load_arviz_data('radon') >>> az.plot_ppc(data, data_pairs={"y":"y"}) Plot the overlay with empirical CDFs. .. plot:: :context: close-figs >>> az.plot_ppc(data, kind='cumulative') Use the ``coords`` and ``flatten`` parameters to plot selected variable dimensions across multiple plots. We will now modify the dimension `obs_id` to contain indicate the name of the county where the measure was taken. The change has to be done on both ``posterior_predictive`` and ``observed_data`` groups, which is why we will use :meth:`~arviz.InferenceData.map` to apply the same function to both groups. Afterwards, we will select the counties to be plotted with the ``coords`` arg. .. plot:: :context: close-figs >>> obs_county = data.posterior["County"][data.constant_data["county_idx"]] >>> data = data.assign_coords(obs_id=obs_county, groups="observed_vars") >>> az.plot_ppc(data, coords={'obs_id': ['ANOKA', 'BELTRAMI']}, flatten=[]) Plot the overlay using a stacked scatter plot that is particularly useful when the sample sizes are small. .. plot:: :context: close-figs >>> az.plot_ppc(data, kind='scatter', flatten=[], >>> coords={'obs_id': ['AITKIN', 'BELTRAMI']}) Plot random posterior predictive sub-samples. .. plot:: :context: close-figs >>> az.plot_ppc(data, num_pp_samples=30, random_seed=7) """ if group not in ("posterior", "prior"): raise TypeError("`group` argument must be either `posterior` or `prior`") for groups in (f"{group}_predictive", "observed_data"): if not hasattr(data, groups): raise TypeError(f'`data` argument must have the group "{groups}" for ppcplot') if kind.lower() not in ("kde", "cumulative", "scatter"): raise TypeError("`kind` argument must be either `kde`, `cumulative`, or `scatter`") if colors is None: colors = ["C0", "k", "C1"] if isinstance(colors, str): raise TypeError("colors should be a list with 3 items.") if len(colors) != 3: raise ValueError("colors should be a list with 3 items.") if color is not None: warnings.warn("color has been deprecated in favor of colors", FutureWarning) colors[0] = color if data_pairs is None: data_pairs = {} if backend is None: backend = rcParams["plot.backend"] backend = backend.lower() if backend == "bokeh": if animated: raise TypeError("Animation option is only supported with matplotlib backend.") observed_data = data.observed_data if group == "posterior": predictive_dataset = data.posterior_predictive elif group == "prior": predictive_dataset = data.prior_predictive if var_names is None: var_names = list(observed_data.data_vars) var_names = _var_names(var_names, observed_data, filter_vars) pp_var_names = [data_pairs.get(var, var) for var in var_names] pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars) if flatten_pp is None and flatten is None: flatten_pp = list(predictive_dataset.dims.keys()) elif flatten_pp is None: flatten_pp = flatten if flatten is None: flatten = list(observed_data.dims.keys()) if coords is None: coords = {} if labeller is None: labeller = BaseLabeller() if random_seed is not None: np.random.seed(random_seed) total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"] if num_pp_samples is None: if kind == "scatter" and not animated: num_pp_samples = min(5, total_pp_samples) else: num_pp_samples = total_pp_samples if ( not isinstance(num_pp_samples, Integral) or num_pp_samples < 1 or num_pp_samples > total_pp_samples ): raise TypeError( "`num_pp_samples` must be an integer between 1 and " + f"{total_pp_samples}." ) pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False) for key in coords.keys(): coords[key] = np.where(np.in1d(observed_data[key], coords[key]))[0] obs_plotters = filter_plotters_list( list( xarray_var_iter( observed_data.isel(coords), skip_dims=set(flatten), var_names=var_names, combined=True, ) ), "plot_ppc", ) length_plotters = len(obs_plotters) pp_plotters = [ tup for _, tup in zip( range(length_plotters), xarray_var_iter( predictive_dataset.isel(coords), var_names=pp_var_names, skip_dims=set(flatten_pp), combined=True, ), ) ] rows, cols = default_grid(length_plotters, grid=grid) ppcplot_kwargs = dict( ax=ax, length_plotters=length_plotters, rows=rows, cols=cols, figsize=figsize, animated=animated, obs_plotters=obs_plotters, pp_plotters=pp_plotters, predictive_dataset=predictive_dataset, pp_sample_ix=pp_sample_ix, kind=kind, alpha=alpha, colors=colors, jitter=jitter, textsize=textsize, mean=mean, observed=observed, total_pp_samples=total_pp_samples, legend=legend, labeller=labeller, group=group, animation_kwargs=animation_kwargs, num_pp_samples=num_pp_samples, backend_kwargs=backend_kwargs, show=show, ) # TODO: Add backend kwargs plot = get_plotting_function("plot_ppc", "ppcplot", backend) axes = plot(**ppcplot_kwargs) return axes
def plot_ppc( data, kind="kde", alpha=None, mean=True, observed=True, color=None, colors=None, grid=None, figsize=None, textsize=None, data_pairs=None, var_names=None, filter_vars=None, coords=None, flatten=None, flatten_pp=None, num_pp_samples=None, random_seed=None, jitter=None, animated=False, animation_kwargs=None, legend=True, labeller=None, ax=None, backend=None, backend_kwargs=None, group="posterior", show=None, ): """ Plot for posterior/prior predictive checks. Parameters ---------- data: :class:`arviz.InferenceData` object InferenceData object containing the observed and posterior/prior predictive data. kind: str Type of plot to display ("kde", "cumulative", or "scatter"). Defaults to `kde`. alpha: float Opacity of posterior/prior predictive density curves. Defaults to `0.2` for ``kind`` = kde and cumulative, for scatter defaults to `0.7`. mean: bool Whether or not to plot the mean posterior/prior predictive distribution. Defaults to ``True``. observed: bool, default True Whether or not to plot the observed data. color: str Valid matplotlib ``color``. Defaults to `C0`. color: list List with valid matplotlib colors corresponding to the posterior/prior predictive distribution, observed data and mean of the posterior/prior predictive distribution. Defaults to ["C0", "k", "C1"]. grid : tuple Number of rows and columns. Defaults to None, the rows and columns are automatically inferred. figsize: tuple Figure size. If None, it will be defined automatically. textsize: float Text size scaling factor for labels, titles and lines. If None, it will be autoscaled based on ``figsize``. data_pairs: dict Dictionary containing relations between observed data and posterior/prior predictive data. Dictionary structure: - key = data var_name - value = posterior/prior predictive var_name For example, ``data_pairs = {'y' : 'y_hat'}`` If None, it will assume that the observed data and the posterior/prior predictive data have the same variable name. var_names: list of variable names Variables to be plotted, if `None` all variable are plotted. Prefix the variables by ``~`` when you want to exclude them from the plot. filter_vars: {None, "like", "regex"}, optional, default=None If `None` (default), interpret var_names as the real variables names. If "like", interpret var_names as substrings of the real variables names. If "regex", interpret var_names as regular expressions on the real variables names. A la ``pandas.filter``. coords: dict Dictionary mapping dimensions to selected coordinates to be plotted. Dimensions without a mapping specified will include all coordinates for that dimension. Defaults to including all coordinates for all dimensions if None. flatten: list List of dimensions to flatten in observed_data. Only flattens across the coordinates specified in the ``coords`` argument. Defaults to flattening all of the dimensions. flatten_pp: list List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens across the coordinates specified in the ``coords`` argument. Defaults to flattening all of the dimensions. Dimensions should match flatten excluding dimensions for ``data_pairs`` parameters. If ``flatten`` is defined and ``flatten_pp`` is None, then ``flatten_pp`` = `flatten`. num_pp_samples: int The number of posterior/prior predictive samples to plot. For ``kind`` = 'scatter' and `animation` = ``False`` if defaults to a maximum of 5 samples and will set jitter to `0.7`. unless defined. Otherwise it defaults to all provided samples. random_seed: int Random number generator seed passed to ``numpy.random.seed`` to allow reproducibility of the plot. By default, no seed will be provided and the plot will change each call if a random sample is specified by ``num_pp_samples``. jitter: float If ``kind`` is "scatter", jitter will add random uniform noise to the height of the ppc samples and observed data. By default `0`. animated: bool Create an animation of one posterior/prior predictive sample per frame. Defaults to ``False``. Only works with matploblib backend. To run animations inside a notebook you have to use the `nbAgg` matplotlib's backend. Try with `%matplotlib notebook` or `%matplotlib nbAgg`. You can switch back to the default matplotlib's backend with `%matplotlib inline` or `%matplotlib auto`. If switching back and forth between matplotlib's backend, you may need to run twice the cell with the animation. If you experience problems rendering the animation try setting `animation_kwargs({'blit':False}) or changing the matplotlib's backend (e.g. to TkAgg) If you run the animation from a script write `ax, ani = az.plot_ppc(.)` animation_kwargs : dict Keywords passed to ``animation.FuncAnimation``. Ignored with matplotlib backend. legend : bool Add legend to figure. By default ``True``. labeller : labeller instance, optional Class providing the method ``make_pp_label`` to generate the labels in the plot titles. Read the :ref:`label_guide` for more details and usage examples. ax: numpy array-like of matplotlib axes or bokeh figures, optional A 2D array of locations into which to plot the densities. If not supplied, Arviz will create its own array of plot areas (and return it). backend: str, optional Select plotting backend {"matplotlib","bokeh"}. Default to "matplotlib". backend_kwargs: bool, optional These are kwargs specific to the backend being used, passed to :func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`. For additional documentation check the plotting method of the backend. group: {"prior", "posterior"}, optional Specifies which InferenceData group should be plotted. Defaults to `'posterior'`. Other value can be `'prior'`. show: bool, optional Call backend show function. Returns ------- axes: matplotlib axes or bokeh figures See Also -------- plot_bvp: Plot Bayesian p-value for observed data and Posterior/Prior predictive. Examples -------- Plot the observed data KDE overlaid on posterior predictive KDEs. .. plot:: :context: close-figs >>> import arviz as az >>> data = az.load_arviz_data('radon') >>> az.plot_ppc(data, data_pairs={"y":"y"}) Plot the overlay with empirical CDFs. .. plot:: :context: close-figs >>> az.plot_ppc(data, kind='cumulative') Use the ``coords`` and ``flatten`` parameters to plot selected variable dimensions across multiple plots. We will now modify the dimension `obs_id` to contain indicate the name of the county where the measure was taken. The change has to be done on both ``posterior_predictive`` and ``observed_data`` groups, which is why we will use :meth:`~arviz.InferenceData.map` to apply the same function to both groups. Afterwards, we will select the counties to be plotted with the ``coords`` arg. .. plot:: :context: close-figs >>> obs_county = data.posterior["County"][data.constant_data["county_idx"]] >>> data = data.assign_coords(obs_id=obs_county, groups="observed_vars") >>> az.plot_ppc(data, coords={'obs_id': ['ANOKA', 'BELTRAMI']}, flatten=[]) Plot the overlay using a stacked scatter plot that is particularly useful when the sample sizes are small. .. plot:: :context: close-figs >>> az.plot_ppc(data, kind='scatter', flatten=[], >>> coords={'obs_id': ['AITKIN', 'BELTRAMI']}) Plot random posterior predictive sub-samples. .. plot:: :context: close-figs >>> az.plot_ppc(data, num_pp_samples=30, random_seed=7) """ if group not in ("posterior", "prior"): raise TypeError("`group` argument must be either `posterior` or `prior`") for groups in (f"{group}_predictive", "observed_data"): if not hasattr(data, groups): raise TypeError(f'`data` argument must have the group "{groups}" for ppcplot') if kind.lower() not in ("kde", "cumulative", "scatter"): raise TypeError("`kind` argument must be either `kde`, `cumulative`, or `scatter`") if colors is None: colors = ["C0", "k", "C1"] if isinstance(colors, str): raise TypeError("colors should be a list with 3 items.") if len(colors) != 3: raise ValueError("colors should be a list with 3 items.") if color is not None: warnings.warn("color has been deprecated in favor of colors", FutureWarning) colors[0] = color if data_pairs is None: data_pairs = {} if backend is None: backend = rcParams["plot.backend"] backend = backend.lower() if backend == "bokeh": if animated: raise TypeError("Animation option is only supported with matplotlib backend.") observed_data = data.observed_data if group == "posterior": predictive_dataset = data.posterior_predictive elif group == "prior": predictive_dataset = data.prior_predictive if var_names is None: var_names = list(observed_data.data_vars) var_names = _var_names(var_names, observed_data, filter_vars) pp_var_names = [data_pairs.get(var, var) for var in var_names] pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars) if flatten_pp is None and flatten is None: flatten_pp = list(predictive_dataset.dims.keys()) elif flatten_pp is None: flatten_pp = flatten if flatten is None: flatten = list(observed_data.dims.keys()) if coords is None: coords = {} if labeller is None: labeller = BaseLabeller() if random_seed is not None: np.random.seed(random_seed) total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"] if num_pp_samples is None: if kind == "scatter" and not animated: num_pp_samples = min(5, total_pp_samples) else: num_pp_samples = total_pp_samples if ( not isinstance(num_pp_samples, Integral) or num_pp_samples < 1 or num_pp_samples > total_pp_samples ): raise TypeError( "`num_pp_samples` must be an integer between 1 and " + f"{total_pp_samples}." ) pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False) for key in coords.keys(): coords[key] = np.where(np.in1d(observed_data[key], coords[key]))[0] obs_plotters = filter_plotters_list( list( xarray_var_iter( observed_data.isel(coords), skip_dims=set(flatten), var_names=var_names, combined=True, ) ), "plot_ppc", ) length_plotters = len(obs_plotters) pp_plotters = [ tup for _, tup in zip( range(length_plotters), xarray_var_iter( predictive_dataset.isel(coords), var_names=pp_var_names, skip_dims=set(flatten_pp), combined=True, ), ) ] rows, cols = default_grid(length_plotters, grid=grid) ppcplot_kwargs = dict( ax=ax, length_plotters=length_plotters, rows=rows, cols=cols, figsize=figsize, animated=animated, obs_plotters=obs_plotters, pp_plotters=pp_plotters, predictive_dataset=predictive_dataset, pp_sample_ix=pp_sample_ix, kind=kind, alpha=alpha, colors=colors, jitter=jitter, textsize=textsize, mean=mean, observed=observed, total_pp_samples=total_pp_samples, legend=legend, labeller=labeller, group=group, animation_kwargs=animation_kwargs, num_pp_samples=num_pp_samples, backend_kwargs=backend_kwargs, show=show, ) # TODO: Add backend kwargs plot = get_plotting_function("plot_ppc", "ppcplot", backend) axes = plot(**ppcplot_kwargs) return axes
43,601
def decompose_hamiltonian(H): """Decomposes a hamiltonian into tensor product of pauli matrices Args: H (matrix): dimensions 2**n Yields: list: coefficients for every tensor product of pauli matrix combinations list: tensor product of pauli matrix combinations """ N = int(np.log2(len(H))) if len(H) - 2 ** N != 0: raise ValueError("Hamiltonian should be in the form (n^2 x n^2), for any n>=1") # paulis = [qml.Identity, qml.PauliX, qml.PauliY, qml.PauliZ] obs = [] coeffs = [] # for term in itertools.product(paulis, repeat=N): matrices = [i._matrix() for i in term] coeff = np.trace(functools.reduce(np.kron, matrices) @ H) / (2 ** N) # if not np.allclose(coeff, 0): coeffs.append(coeff) # if not all(t is qml.Identity for t in term): obs.append( functools.reduce( operator.matmul, [t(i) for i, t in enumerate(term) if t is not qml.Identity] ) ) else: obs.append(functools.reduce(operator.matmul, [t(i) for i, t in enumerate(term)])) # obs.append(functools.reduce(operator.matmul, [t(i) for i, t in enumerate(term)])) # return coeffs, obs
def decompose_hamiltonian(H): """Decomposes a hamiltonian into tensor product of pauli matrices Args: H (array[complex]): an Hermitian matrix of dimension :math:`2^N\times 2^N` Yields: list: coefficients for every tensor product of pauli matrix combinations list: tensor product of pauli matrix combinations """ N = int(np.log2(len(H))) if len(H) - 2 ** N != 0: raise ValueError("Hamiltonian should be in the form (n^2 x n^2), for any n>=1") # paulis = [qml.Identity, qml.PauliX, qml.PauliY, qml.PauliZ] obs = [] coeffs = [] # for term in itertools.product(paulis, repeat=N): matrices = [i._matrix() for i in term] coeff = np.trace(functools.reduce(np.kron, matrices) @ H) / (2 ** N) # if not np.allclose(coeff, 0): coeffs.append(coeff) # if not all(t is qml.Identity for t in term): obs.append( functools.reduce( operator.matmul, [t(i) for i, t in enumerate(term) if t is not qml.Identity] ) ) else: obs.append(functools.reduce(operator.matmul, [t(i) for i, t in enumerate(term)])) # obs.append(functools.reduce(operator.matmul, [t(i) for i, t in enumerate(term)])) # return coeffs, obs
12,315
def w_state(N=3, *, dtype=_data.Dense): """ Returns the N-qubit W-state: ``[ |100..0> + |010..0> + |001..0> + ... |000..1> ] / sqrt(n)`` Parameters ---------- N : int (default=3) Number of qubits in state dtype : type or str Storage representation. Any data-layer known to `qutip.data.to` is accepted. Returns ------- W : :obj:`~qobj` N-qubit W-state """ inds = np.zeros(N, dtype=int) inds[0] = 1 state = basis([2]*N, list(inds), dtype=dtype) for kk in range(1, N): state += basis([2]*N, list(np.roll(inds, kk)), dtype=dtype) return np.sqrt(1 / N) * state
def w_state(N=3, *, dtype=_data.Dense): """ Returns the N-qubit W-state: ``[ |100..0> + |010..0> + |001..0> + ... |000..1> ] / sqrt(n)`` Parameters ---------- N : int (default=3) Number of qubits in state dtype : type or str Storage representation. Any data-layer known to `qutip.data.to` is accepted. Returns ------- W : :obj:`~Qobj` N-qubit W-state """ inds = np.zeros(N, dtype=int) inds[0] = 1 state = basis([2]*N, list(inds), dtype=dtype) for kk in range(1, N): state += basis([2]*N, list(np.roll(inds, kk)), dtype=dtype) return np.sqrt(1 / N) * state
8,756
def etymology(word): # @@ <nsh> sbp, would it be possible to have a flag for .ety to get 2nd/etc # entries? - http://swhack.com/logs/2006-07-19#T15-05-29 if len(word) == 0: return 'No word added.' if len(word) > 25: return "Word too long: %s[…]" % word[:10] ety = get(ETYURI % web.quote(word)) if ety.status_code != 200: return None # Let's find it start = ety.text.find("word__defination") start = ety.text.find("<p>", start) stop = ety.text.find("</p>", start) sentence = ety.text[start + 3:stop] # Clean up sentence = unescape(sentence) sentence = sub('<[^<]+?>', '', sentence) maxlength = 275 if len(sentence) > maxlength: sentence = sentence[:maxlength] words = sentence[:-5].split(' ') words.pop() sentence = ' '.join(words) + ' […]' sentence = '"' + sentence.replace('"', "'") + '"' return sentence + ' - ' + (ETYURI % web.quote(word))
def etymology(word): # @@ <nsh> sbp, would it be possible to have a flag for .ety to get 2nd/etc # entries? - http://swhack.com/logs/2006-07-19#T15-05-29 if len(word) == 0: return 'No word added.' if len(word) > 25: raise ValueError('Word too long: %s[…]' % word[:10]) ety = get(ETYURI % web.quote(word)) if ety.status_code != 200: return None # Let's find it start = ety.text.find("word__defination") start = ety.text.find("<p>", start) stop = ety.text.find("</p>", start) sentence = ety.text[start + 3:stop] # Clean up sentence = unescape(sentence) sentence = sub('<[^<]+?>', '', sentence) maxlength = 275 if len(sentence) > maxlength: sentence = sentence[:maxlength] words = sentence[:-5].split(' ') words.pop() sentence = ' '.join(words) + ' […]' sentence = '"' + sentence.replace('"', "'") + '"' return sentence + ' - ' + (ETYURI % web.quote(word))
13,558
def QR_iteration(H, shifts): """Perform the QR iteration. Performs a QR step for each shift provided in `shifts`. `H` is assumed to be an unreduced upper Hessenberg matrix. If a complex shift occurs a double step is peformed in order to avoid complex arithmetic. Parameters ---------- H The |NumPy array| H which is an unreduced upper Hessenberg matrix. shifts A |NumPy array| which contains the shifts that are to be applied in the QR steps. Returns ------- Hs A |NumPy array| in upper Hessenberg form such that it holds :math:`H Q_s = Q_s H_s`. Qs The product of the orthogonal matrices computed in each QR step. """ Qs = np.eye(len(H)) i = 0 while i < len(shifts) - 1: s = shifts[i] if shifts[i].imag != 0: Q, R = np.linalg.qr(H @ H - 2 * np.real(s) * H + np.abs(s)**2 * np.eye(len(H))) i = i + 2 else: Q, R = np.linalg.qr(H - s * np.eye(len(H))) i = i + 1 Qs = Qs @ Q H = Q.T @ H @ Q return H, Qs
def QR_iteration(H, shifts): """Perform the QR iteration. Performs a QR step for each shift provided in `shifts`. `H` is assumed to be an unreduced upper Hessenberg matrix. If a complex shift occurs a double step is peformed in order to avoid complex arithmetic. Parameters ---------- H The |NumPy array| H which is an unreduced upper Hessenberg matrix. shifts A |NumPy array| which contains the shifts that are to be applied in the QR steps. Returns ------- Hs A |NumPy array| in upper Hessenberg form such that it holds :math:`H Q_s = Q_s H_s`. Qs The product of the orthogonal matrices computed in each QR step. """ Qs = np.eye(len(H)) i = 0 while i < len(shifts) - 1: s = shifts[i] if shifts[i].imag != 0: Q, _ = np.linalg.qr(H @ H - 2 * s.real * H + np.abs(s)**2 * np.eye(len(H))) i = i + 2 else: Q, R = np.linalg.qr(H - s * np.eye(len(H))) i = i + 1 Qs = Qs @ Q H = Q.T @ H @ Q return H, Qs
43,932
def _hermite_coulomb(t, u, v, n, p, dr): """Evaluate Hermite integral needed to compute the nuclear attraction and electron repulsion integrals. These integrals are computed recursively starting from the Boys function [`Helgaker (1995) p817 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]: .. math:: R_{000}^n = (-2p)^n F_n(pR_{CP}^2), where :math:`F_n` is the Boys function, :math:`p` is computed from the exponents of the two Gaussian functions as :math:`p = \alpha + \beta`, and :math:`R_{CP}` is the distance between the center of the composite Gaussian centered at :math:`P` and the electrostatic potential at :math:`C`. The following recursive equations are used to compute the evaluate the higher order Hermite integrals .. math:: R_{t+1, u, v}^n = t R_{t-1, u, v}^{n+1} + x R_{t, u, v}^{n+1} R_{t, u+1, v}^n = u R_{t, u-1, v}^{n+1} + y R_{t, u, v}^{n+1} R_{t, u, v+1}^n = v R_{t, u, v-1}^{n+1} + z R_{t, u, v}^{n+1} where :math:`x`, :math:`y` and :math:`z` are the Cartesian components of :math:`R_{CP}`. Args: t (integer): order of Hermite derivative in x u (integer): order of Hermite derivative in y v (float): order of Hermite derivative in z n (integer): order of the Boys function p (float): sum of the Gaussian exponents dr (array[float]): distance between the center of the composite Gaussian and the nucleus Returns: array[float]: value of the Hermite integral """ x, y, z = dr[0], dr[1], dr[2] T = p * (dr ** 2).sum(axis=0) r = 0 if t == u == v == 0: f = [] for term in T.flatten(): f.append(_boys(n, term)) return ((-2 * p) ** n) * anp.array(f).reshape(T.shape) if t == u == 0: if v > 1: r = r + (v - 1) * _hermite_coulomb(t, u, v - 2, n + 1, p, dr) r = r + z * _hermite_coulomb(t, u, v - 1, n + 1, p, dr) return r if t == 0: if u > 1: r = r + (u - 1) * _hermite_coulomb(t, u - 2, v, n + 1, p, dr) r = r + y * _hermite_coulomb(t, u - 1, v, n + 1, p, dr) return r if t > 1: r = r + (t - 1) * _hermite_coulomb(t - 2, u, v, n + 1, p, dr) r = r + x * _hermite_coulomb(t - 1, u, v, n + 1, p, dr) return r
def _hermite_coulomb(t, u, v, n, p, dr): """Evaluate Hermite integral needed to compute the nuclear attraction and electron repulsion integrals. These integrals are computed recursively starting from the Boys function [`Helgaker (1995) p817 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]: .. math:: R_{000}^n = (-2p)^n F_n(pR_{CP}^2), where :math:`F_n` is the Boys function, :math:`p` is computed from the exponents of the two Gaussian functions as :math:`p = \alpha + \beta`, and :math:`R_{CP}` is the distance between the center of the composite Gaussian centered at :math:`P` and the electrostatic potential at :math:`C`. The following recursive equations are used to compute the higher-order Hermite integrals .. math:: R_{t+1, u, v}^n = t R_{t-1, u, v}^{n+1} + x R_{t, u, v}^{n+1} R_{t, u+1, v}^n = u R_{t, u-1, v}^{n+1} + y R_{t, u, v}^{n+1} R_{t, u, v+1}^n = v R_{t, u, v-1}^{n+1} + z R_{t, u, v}^{n+1} where :math:`x`, :math:`y` and :math:`z` are the Cartesian components of :math:`R_{CP}`. Args: t (integer): order of Hermite derivative in x u (integer): order of Hermite derivative in y v (float): order of Hermite derivative in z n (integer): order of the Boys function p (float): sum of the Gaussian exponents dr (array[float]): distance between the center of the composite Gaussian and the nucleus Returns: array[float]: value of the Hermite integral """ x, y, z = dr[0], dr[1], dr[2] T = p * (dr ** 2).sum(axis=0) r = 0 if t == u == v == 0: f = [] for term in T.flatten(): f.append(_boys(n, term)) return ((-2 * p) ** n) * anp.array(f).reshape(T.shape) if t == u == 0: if v > 1: r = r + (v - 1) * _hermite_coulomb(t, u, v - 2, n + 1, p, dr) r = r + z * _hermite_coulomb(t, u, v - 1, n + 1, p, dr) return r if t == 0: if u > 1: r = r + (u - 1) * _hermite_coulomb(t, u - 2, v, n + 1, p, dr) r = r + y * _hermite_coulomb(t, u - 1, v, n + 1, p, dr) return r if t > 1: r = r + (t - 1) * _hermite_coulomb(t - 2, u, v, n + 1, p, dr) r = r + x * _hermite_coulomb(t - 1, u, v, n + 1, p, dr) return r
57,146
def get_package_file_contents(package: str, filepath: str) -> str: """Open file and return its contents. This needs to be used for files that are loaded by the Python code directly, like constants.ts or rich_text_components.json. This function is needed to make loading these files work even when Oppia is packaged. Args: package: str. The package where the file is located. For Oppia the package is usually the folder in the root folder, like 'core' or 'extensions'. filepath: str. The path to the file in the package. Returns: str. The contents of the file. """ try: with io.open( os.path.join(package, filepath), 'r', encoding='utf-8') as file: return file.read() except FileNotFoundError: return pkgutil.get_data(package, filepath).decode('utf-8')
def get_package_file_contents(package: str, filepath: str) -> str: """Open file and return its contents. This needs to be used for files that are loaded by the Python code directly, like constants.ts or rich_text_components.json. This function is needed to make loading these files work even when Oppia is packaged. Args: package: str. The package where the file is located. For Oppia the package is usually the folder in the root folder, like 'core' or 'extensions'. filepath: str. The path to the file in the package. Returns: str. The contents of the file. """ try: with io.open( os.path.join(package, filepath), 'r', encoding='utf-8' ) as file: return file.read() except FileNotFoundError: return pkgutil.get_data(package, filepath).decode('utf-8')
40,131
def apt_install_packages(*packages: str): ''' Install packages on Ubuntu / Debian / Mint / Kali systems. :param packages: Iterable containing packages to install. ''' log_current_packages(packages) return _run_shell_command_raise_on_return_code(f"sudo apt-get install -y {' '.join(packages)}", f"Error in installation of package(s) {' '.join(packages)}", True)
def apt_install_packages(*packages: str): ''' Install packages on Ubuntu / Debian / Mint / Kali systems. :param packages: Iterable containing packages to install. ''' return _run_shell_command_raise_on_return_code(f'sudo apt-get install -y {" ".join(packages)}', f'Error in installation of package(s) {" ".join(packages)}', True) return _run_shell_command_raise_on_return_code(f"sudo apt-get install -y {' '.join(packages)}", f"Error in installation of package(s) {' '.join(packages)}", True)
12,705
def _validate_metadata(metadata: LockfileMetadata, request: PexRequest, python_setup: PythonSetup): if metadata.is_valid_for( request.requirements.lockfile_hex_digest, request.interpreter_constraints, python_setup.interpreter_universe, ): return None message_1 = f"Invalid lockfile for PEX request `{request.output_filename}`. " message_2 = ( "If your requirements or interpreter constraints have changed, follow the " "instructions in the header of the lockfile to regenerate it. Otherwise, ensure your interpreter " "constraints are compatible with the constraints specified in the lockfile." ) if python_setup.invalid_lockfile_behavior == InvalidLockfileBehavior.error: logger.error("%s %s", message_1, message_2) raise ValueError(message_1) elif python_setup.invalid_lockfile_behavior == InvalidLockfileBehavior.warn: logger.warning("%s %s", message_1, message_2)
def _validate_metadata(metadata: LockfileMetadata, request: PexRequest, python_setup: PythonSetup) -> None: if metadata.is_valid_for( request.requirements.lockfile_hex_digest, request.interpreter_constraints, python_setup.interpreter_universe, ): return None message_1 = f"Invalid lockfile for PEX request `{request.output_filename}`. " message_2 = ( "If your requirements or interpreter constraints have changed, follow the " "instructions in the header of the lockfile to regenerate it. Otherwise, ensure your interpreter " "constraints are compatible with the constraints specified in the lockfile." ) if python_setup.invalid_lockfile_behavior == InvalidLockfileBehavior.error: logger.error("%s %s", message_1, message_2) raise ValueError(message_1) elif python_setup.invalid_lockfile_behavior == InvalidLockfileBehavior.warn: logger.warning("%s %s", message_1, message_2)
58,034
def category_mod_command(client, args): uuid = args.get('uuid') original = client.category_getItem({'uuid': uuid}) modified = {"category": {}} # type: Dict[str, Any] for key in original['category'].keys(): newvalue = args.get(key) if newvalue is None: modified['category'][key] = original['category'][key] else: modified['category'][key] = newvalue result = client.category_setItem(uuid, modified) output = output_format(result, 'Category', 'Category uuid : ' + uuid + ' modified:') return output
def category_mod_command(client, args): uuid = args.get('uuid') original = client.category_getItem({'uuid': uuid}) modified = {"category": {}} # type: Dict[str, Any] for key in original['category'].keys(): newvalue = args.get(key, original['category'][key]) modified['category'][key] = newvalue result = client.category_setItem(uuid, modified) output = output_format(result, 'Category', 'Category uuid : ' + uuid + ' modified:') return output
48,456
def add_fragments(doc, filename, fragment_loader, is_module=False): fragments = doc.pop('extends_documentation_fragment', []) if isinstance(fragments, string_types): fragments = [fragments] unknown_fragments = [] # doc_fragments are allowed to specify a fragment var other than DOCUMENTATION # with a . separator; this is complicated by collections-hosted doc_fragments that # use the same separator. Assume it's collection-hosted normally first, try to load # as-specified. If failure, assume the right-most component is a var, split it off, # and retry the load. for fragment_slug in fragments: fragment_name = fragment_slug fragment_var = 'DOCUMENTATION' fragment_class = fragment_loader.get(fragment_name) if fragment_class is None and '.' in fragment_slug: splitname = fragment_slug.rsplit('.', 1) fragment_name = splitname[0] fragment_var = splitname[1].upper() fragment_class = fragment_loader.get(fragment_name) if fragment_class is None: unknown_fragments.append(fragment_slug) continue fragment_yaml = getattr(fragment_class, fragment_var, None) if fragment_yaml is None: if fragment_var != 'DOCUMENTATION': # if it's asking for something specific that's missing, that's an error unknown_fragments.append(fragment_slug) continue else: fragment_yaml = '{}' # TODO: this is still an error later since we require 'options' below... fragment = AnsibleLoader(fragment_yaml, file_name=filename).get_single_data() real_collection_name = 'ansible.builtin' real_fragment_name = getattr(fragment_class, '_load_name') if real_fragment_name.startswith('ansible_collections.'): real_collection_name = '.'.join(real_fragment_name.split('.')[1:3]) add_collection_to_versions_and_dates(fragment, real_collection_name, is_module=is_module) if 'notes' in fragment: notes = fragment.pop('notes') if notes: if 'notes' not in doc: doc['notes'] = [] doc['notes'].extend(notes) if 'seealso' in fragment: seealso = fragment.pop('seealso') if seealso: if 'seealso' not in doc: doc['seealso'] = [] doc['seealso'].extend(seealso) if 'options' not in fragment and 'attributes' not in fragment: raise Exception("missing options or attributes in fragment (%s), possibly misformatted?: %s" % (fragment_name, filename)) # ensure options themselves are directly merged if 'options' in fragment: if 'options' in doc: try: merge_fragment(doc['options'], fragment.pop('options')) except Exception as e: raise AnsibleError("%s options (%s) of unknown type: %s" % (to_native(e), fragment_name, filename)) else: doc['options'] = fragment.pop('options') # same with fragments as with options if 'attributes' in fragment: if 'attributes' in doc: try: merge_fragment(doc['attributes'], fragment.pop('attributes')) except Exception as e: raise AnsibleError("%s attributes (%s) of unknown type: %s" % (to_native(e), fragment_name, filename)) else: doc['attributes'] = fragment.pop('attributes') # merge rest of the sections try: merge_fragment(doc, fragment) except Exception as e: raise AnsibleError("%s (%s) of unknown type: %s" % (to_native(e), fragment_name, filename)) if unknown_fragments: raise AnsibleError('unknown doc_fragment(s) in file {0}: {1}'.format(filename, to_native(', '.join(unknown_fragments))))
def add_fragments(doc, filename, fragment_loader, is_module=False): fragments = doc.pop('extends_documentation_fragment', []) if isinstance(fragments, string_types): fragments = [fragments] unknown_fragments = [] # doc_fragments are allowed to specify a fragment var other than DOCUMENTATION # with a . separator; this is complicated by collections-hosted doc_fragments that # use the same separator. Assume it's collection-hosted normally first, try to load # as-specified. If failure, assume the right-most component is a var, split it off, # and retry the load. for fragment_slug in fragments: fragment_name = fragment_slug fragment_var = 'DOCUMENTATION' fragment_class = fragment_loader.get(fragment_name) if fragment_class is None and '.' in fragment_slug: splitname = fragment_slug.rsplit('.', 1) fragment_name = splitname[0] fragment_var = splitname[1].upper() fragment_class = fragment_loader.get(fragment_name) if fragment_class is None: unknown_fragments.append(fragment_slug) continue fragment_yaml = getattr(fragment_class, fragment_var, None) if fragment_yaml is None: if fragment_var != 'DOCUMENTATION': # if it's asking for something specific that's missing, that's an error unknown_fragments.append(fragment_slug) continue else: fragment_yaml = '{}' # TODO: this is still an error later since we require 'options' below... fragment = AnsibleLoader(fragment_yaml, file_name=filename).get_single_data() real_collection_name = 'ansible.builtin' real_fragment_name = getattr(fragment_class, '_load_name') if real_fragment_name.startswith('ansible_collections.'): real_collection_name = '.'.join(real_fragment_name.split('.')[1:3]) add_collection_to_versions_and_dates(fragment, real_collection_name, is_module=is_module) if 'notes' in fragment: notes = fragment.pop('notes') if notes: if 'notes' not in doc: doc['notes'] = [] doc['notes'].extend(notes) if 'seealso' in fragment: seealso = fragment.pop('seealso') if seealso: if 'seealso' not in doc: doc['seealso'] = [] doc['seealso'].extend(seealso) if 'options' not in fragment and 'attributes' not in fragment: raise Exception("missing options or attributes in fragment (%s), possibly misformatted?: %s" % (fragment_name, filename)) # ensure options themselves are directly merged for doc_key in ['options', 'attributes']: if doc_key in fragment: if doc_key in doc: try: merge_fragment(doc[doc_key], fragment.pop(doc_key)) except Exception as e: raise AnsibleError("%s %s (%s) of unknown type: %s" % (to_native(e), doc_key, fragment_name, filename)) else: doc[doc_key] = fragment.pop(doc_key) # merge rest of the sections try: merge_fragment(doc, fragment) except Exception as e: raise AnsibleError("%s (%s) of unknown type: %s" % (to_native(e), fragment_name, filename)) if unknown_fragments: raise AnsibleError('unknown doc_fragment(s) in file {0}: {1}'.format(filename, to_native(', '.join(unknown_fragments))))
21,170
def test_span_sents(doc, doc_not_parsed) : # Entire doc span = Span(doc, 0, len(doc)) sentences = list(span.sents) assert len(sentences) == 3 # Overlapping with 2 sentences span = Span(doc, 3, 6) sentences = list(span.sents) assert len(sentences) == 2 # Beginning of the Doc. Full sentence span = Span(doc, 0, 4) sentences = list(span.sents) assert len(sentences) == 1 # Beginning of the Doc. Part of a sentence span = Span(doc, 0, 3) sentences = list(span.sents) assert len(sentences) == 1 # End of the Doc. Overlapping with 2 senteces span = Span(doc, 9, 14) sentences = list(span.sents) assert len(sentences) == 2 # End of the Doc. Full sentence span = Span(doc, 10, 14) sentences = list(span.sents) assert len(sentences) == 1 # End of the Doc. Partial sentence span = Span(doc, 11, 14) sentences = list(span.sents) assert len(sentences) == 1 # Empty Span span = Span(doc, 0, 0) sentences = list(span.sents) assert len(sentences) == 1 span = Span(doc_not_parsed, 0, 3) with pytest.raises(ValueError) : sentences = list(span.sents) def user_hook(span) : yield span doc.user_hooks['sents'] = user_hook span = Span(doc, 0, 4) sentences = list(span.sents) assert len(sentences) == 1 assert sentences[0] == span
def test_span_sents(doc, doc_not_parsed) : # Entire doc span = Span(doc, 0, len(doc)) assert len(list(span.sents)) == 3 # Overlapping with 2 sentences span = Span(doc, 3, 6) sentences = list(span.sents) assert len(sentences) == 2 # Beginning of the Doc. Full sentence span = Span(doc, 0, 4) sentences = list(span.sents) assert len(sentences) == 1 # Beginning of the Doc. Part of a sentence span = Span(doc, 0, 3) sentences = list(span.sents) assert len(sentences) == 1 # End of the Doc. Overlapping with 2 senteces span = Span(doc, 9, 14) sentences = list(span.sents) assert len(sentences) == 2 # End of the Doc. Full sentence span = Span(doc, 10, 14) sentences = list(span.sents) assert len(sentences) == 1 # End of the Doc. Partial sentence span = Span(doc, 11, 14) sentences = list(span.sents) assert len(sentences) == 1 # Empty Span span = Span(doc, 0, 0) sentences = list(span.sents) assert len(sentences) == 1 span = Span(doc_not_parsed, 0, 3) with pytest.raises(ValueError) : sentences = list(span.sents) def user_hook(span) : yield span doc.user_hooks['sents'] = user_hook span = Span(doc, 0, 4) sentences = list(span.sents) assert len(sentences) == 1 assert sentences[0] == span
17,893
def build_formdata(form_object): """Convert HTML form data to GitHub API data. Summary -> title Version -> part of body URL -> part of body Category -> labels Details -> part of body Description -> part of body Browser -> part of body, labels OS -> part of body, labels Tested Elsewhere -> body Image Upload -> part of body We'll try to parse the Browser and come up with a browser label, as well as labels like mobile, desktop, tablet. Here's a description of what the Issues API expects to create an issue -------------------------------------------------------------------------- | title | string | The title of the issue. Required. | | body | string | The contents of the issue. | | labels | array of strings | Labels to associate with this issue. | | milestone| integer | Milestone to associate with this issue. | -------------------------------------------------------------------------- NOTE: Only users with push access can set labels for new issues. Labels are silently dropped otherwise. NOTE: intentionally leaving out `assignee`. NOTE: Add milestone "needstriage" when creating a new issue """ # Do domain extraction for adding to the summary/title # form_object always returns a unicode string url = form_object.get('url') normalized_url = normalize_url(url) domain = domain_name(normalized_url) problem_summary = get_problem_summary(form_object.get('problem_category')) switch_problem_subtype(form_object.get('problem_category')) if domain: summary = '{0} - {1}'.format(domain, problem_summary) else: summary = '{0} - {1}'.format(normalized_url, problem_summary) metadata_keys = ['browser', 'ua_header', 'reported_with'] extra_labels = form_object.get('extra_labels', None) if extra_labels: metadata_keys.append('extra_labels') clean = re.compile('<.*?>') problem_type_stripped = re.sub(clean, '', get_radio_button_label( form_object.get('problem_category'), problem_choices)) browser_tested_on = form_object.get('browser_test') if browser_tested_on is None: browser_tested_on = "Unknown" formdata = { 'metadata': get_metadata(metadata_keys, form_object), 'url': normalized_url, 'browser': normalize_metadata(form_object.get('browser')), 'os': normalize_metadata(form_object.get('os')), 'problem_type': problem_type_stripped, 'browser_test_type': browser_tested_on, 'description': form_object.get('description'), 'steps_reproduce': form_object.get('steps_reproduce'), } # Preparing the body body = """{metadata} **URL**: {url} **Browser / Version**: {browser} **Operating System**: {os} **Tested Another Browser**: {browser_test_type} **Problem type**: {problem_type} **Description**: {description} **Steps to Reproduce**: {steps_reproduce} """.format(**formdata) # Append details info, if any. details = form_object.get('details') if details: body += build_details(details) # Add the image, if there was one. if form_object.get('image_upload') is not None: body += '\n\n![Screenshot of the site issue]({image_url})'.format( image_url=form_object.get('image_upload').get('url')) # Append contact information if available contact = form_object.get('contact', '') # This probably deserves its own function. contact = contact.strip() contact = contact.replace('@', '') if contact and not g.user: body += '\n\nSubmitted in the name of `@{contact}`'.format( contact=contact) # Append "from webcompat.com" message to bottom (for GitHub issue viewers) body += '\n\n{0}'.format(GITHUB_HELP) rv = {'title': summary, 'body': body} return rv
def build_formdata(form_object): """Convert HTML form data to GitHub API data. Summary -> title Version -> part of body URL -> part of body Category -> labels Details -> part of body Description -> part of body Browser -> part of body, labels OS -> part of body, labels Tested Elsewhere -> body Image Upload -> part of body We'll try to parse the Browser and come up with a browser label, as well as labels like mobile, desktop, tablet. Here's a description of what the Issues API expects to create an issue -------------------------------------------------------------------------- | title | string | The title of the issue. Required. | | body | string | The contents of the issue. | | labels | array of strings | Labels to associate with this issue. | | milestone| integer | Milestone to associate with this issue. | -------------------------------------------------------------------------- NOTE: Only users with push access can set labels for new issues. Labels are silently dropped otherwise. NOTE: intentionally leaving out `assignee`. NOTE: Add milestone "needstriage" when creating a new issue """ # Do domain extraction for adding to the summary/title # form_object always returns a unicode string url = form_object.get('url') normalized_url = normalize_url(url) domain = domain_name(normalized_url) problem_summary = get_problem_summary(form_object.get('problem_category')) switch_problem_subtype(form_object.get('problem_category')) if domain: summary = '{0} - {1}'.format(domain, problem_summary) else: summary = '{0} - {1}'.format(normalized_url, problem_summary) metadata_keys = ['browser', 'ua_header', 'reported_with'] extra_labels = form_object.get('extra_labels', None) if extra_labels: metadata_keys.append('extra_labels') clean = re.compile('<.*?>') problem_type_stripped = re.sub(clean, '', get_radio_button_label( form_object.get('problem_category'), problem_choices)) browser_tested_on = form_object.get('browser_test', 'Unknown') if browser_tested_on is None: browser_tested_on = "Unknown" formdata = { 'metadata': get_metadata(metadata_keys, form_object), 'url': normalized_url, 'browser': normalize_metadata(form_object.get('browser')), 'os': normalize_metadata(form_object.get('os')), 'problem_type': problem_type_stripped, 'browser_test_type': browser_tested_on, 'description': form_object.get('description'), 'steps_reproduce': form_object.get('steps_reproduce'), } # Preparing the body body = """{metadata} **URL**: {url} **Browser / Version**: {browser} **Operating System**: {os} **Tested Another Browser**: {browser_test_type} **Problem type**: {problem_type} **Description**: {description} **Steps to Reproduce**: {steps_reproduce} """.format(**formdata) # Append details info, if any. details = form_object.get('details') if details: body += build_details(details) # Add the image, if there was one. if form_object.get('image_upload') is not None: body += '\n\n![Screenshot of the site issue]({image_url})'.format( image_url=form_object.get('image_upload').get('url')) # Append contact information if available contact = form_object.get('contact', '') # This probably deserves its own function. contact = contact.strip() contact = contact.replace('@', '') if contact and not g.user: body += '\n\nSubmitted in the name of `@{contact}`'.format( contact=contact) # Append "from webcompat.com" message to bottom (for GitHub issue viewers) body += '\n\n{0}'.format(GITHUB_HELP) rv = {'title': summary, 'body': body} return rv
10,844
def jit(signature_or_function=None, locals={}, cache=False, pipeline_class=None, boundscheck=False, **options): """ This decorator is used to compile a Python function into native code. Args ----- signature_or_function: The (optional) signature or list of signatures to be compiled. If not passed, required signatures will be compiled when the decorated function is called, depending on the argument values. As a convenience, you can directly pass the function to be compiled instead. locals: dict Mapping of local variable names to Numba types. Used to override the types deduced by Numba's type inference engine. target (deprecated): str Specifies the target platform to compile for. Valid targets are cpu, gpu, npyufunc, and cuda. Defaults to cpu. pipeline_class: type numba.compiler.CompilerBase The compiler pipeline type for customizing the compilation stages. options: For a cpu target, valid options are: nopython: bool Set to True to disable the use of PyObjects and Python API calls. The default behavior is to allow the use of PyObjects and Python API. Default value is False. forceobj: bool Set to True to force the use of PyObjects for every value. Default value is False. looplift: bool Set to True to enable jitting loops in nopython mode while leaving surrounding code in object mode. This allows functions to allocate NumPy arrays and use Python objects, while the tight loops in the function can still be compiled in nopython mode. Any arrays that the tight loop uses should be created before the loop is entered. Default value is True. error_model: str The error-model affects divide-by-zero behavior. Valid values are 'python' and 'numpy'. The 'python' model raises exception. The 'numpy' model sets the result to *+/-inf* or *nan*. Default value is 'python'. inline: str or callable The inline option will determine whether a function is inlined at into its caller if called. String options are 'never' (default) which will never inline, and 'always', which will always inline. If a callable is provided it will be called with the call expression node that is requesting inlining, the caller's IR and callee's IR as arguments, it is expected to return Truthy as to whether to inline. NOTE: This inlining is performed at the Numba IR level and is in no way related to LLVM inlining. boundscheck: bool Set to True to enable bounds checking for array indices. Out of bounds accesses will raise IndexError. The default is to not do bounds checking. If bounds checking is disabled, out of bounds accesses can produce garbage results or segfaults. However, enabling bounds checking will slow down typical functions, so it is recommended to only use this flag for debugging. You can also set the NUMBA_BOUNDSCHECK environment variable to 0 or 1 to globally override this flag. Returns -------- A callable usable as a compiled function. Actual compiling will be done lazily if no explicit signatures are passed. Examples -------- The function can be used in the following ways: 1) jit(signatures, target='cpu', **targetoptions) -> jit(function) Equivalent to: d = dispatcher(function, targetoptions) for signature in signatures: d.compile(signature) Create a dispatcher object for a python function. Then, compile the function with the given signature(s). Example: @jit("int32(int32, int32)") def foo(x, y): return x + y @jit(["int32(int32, int32)", "float32(float32, float32)"]) def bar(x, y): return x + y 2) jit(function, target='cpu', **targetoptions) -> dispatcher Create a dispatcher function object that specializes at call site. Examples: @jit def foo(x, y): return x + y @jit(target='cpu', nopython=True) def bar(x, y): return x + y """ if 'argtypes' in options: raise DeprecationError(_msg_deprecated_signature_arg.format('argtypes')) if 'restype' in options: raise DeprecationError(_msg_deprecated_signature_arg.format('restype')) if options.get('nopython', False) and options.get('forceobj', False): raise ValueError("Only one of 'nopython' or 'forceobj' can be True.") if 'target' in options: target = options.pop('target') warnings.warn("The 'target' keyword argument is deprecated for the numba.jit decorator.", NumbaDeprecationWarning) else: target = options.pop('_target', 'cpu') options['boundscheck'] = boundscheck # Handle signature if signature_or_function is None: # No signature, no function pyfunc = None sigs = None elif isinstance(signature_or_function, list): # A list of signatures is passed pyfunc = None sigs = signature_or_function elif sigutils.is_signature(signature_or_function): # A single signature is passed pyfunc = None sigs = [signature_or_function] else: # A function is passed pyfunc = signature_or_function sigs = None dispatcher_args = {} if pipeline_class is not None: dispatcher_args['pipeline_class'] = pipeline_class wrapper = _jit(sigs, locals=locals, target=target, cache=cache, targetoptions=options, **dispatcher_args) if pyfunc is not None: return wrapper(pyfunc) else: return wrapper
def jit(signature_or_function=None, locals={}, cache=False, pipeline_class=None, boundscheck=False, **options): """ This decorator is used to compile a Python function into native code. Args ----- signature_or_function: The (optional) signature or list of signatures to be compiled. If not passed, required signatures will be compiled when the decorated function is called, depending on the argument values. As a convenience, you can directly pass the function to be compiled instead. locals: dict Mapping of local variable names to Numba types. Used to override the types deduced by Numba's type inference engine. target (deprecated): str Specifies the target platform to compile for. Valid targets are cpu, gpu, npyufunc, and cuda. Defaults to cpu. pipeline_class: type numba.compiler.CompilerBase The compiler pipeline type for customizing the compilation stages. options: For a cpu target, valid options are: nopython: bool Set to True to disable the use of PyObjects and Python API calls. The default behavior is to allow the use of PyObjects and Python API. Default value is False. forceobj: bool Set to True to force the use of PyObjects for every value. Default value is False. looplift: bool Set to True to enable jitting loops in nopython mode while leaving surrounding code in object mode. This allows functions to allocate NumPy arrays and use Python objects, while the tight loops in the function can still be compiled in nopython mode. Any arrays that the tight loop uses should be created before the loop is entered. Default value is True. error_model: str The error-model affects divide-by-zero behavior. Valid values are 'python' and 'numpy'. The 'python' model raises exception. The 'numpy' model sets the result to *+/-inf* or *nan*. Default value is 'python'. inline: str or callable The inline option will determine whether a function is inlined at into its caller if called. String options are 'never' (default) which will never inline, and 'always', which will always inline. If a callable is provided it will be called with the call expression node that is requesting inlining, the caller's IR and callee's IR as arguments, it is expected to return Truthy as to whether to inline. NOTE: This inlining is performed at the Numba IR level and is in no way related to LLVM inlining. boundscheck: bool Set to True to enable bounds checking for array indices. Out of bounds accesses will raise IndexError. The default is to not do bounds checking. If bounds checking is disabled, out of bounds accesses can produce garbage results or segfaults. However, enabling bounds checking will slow down typical functions, so it is recommended to only use this flag for debugging. You can also set the NUMBA_BOUNDSCHECK environment variable to 0 or 1 to globally override this flag. Returns -------- A callable usable as a compiled function. Actual compiling will be done lazily if no explicit signatures are passed. Examples -------- The function can be used in the following ways: 1) jit(signatures, target='cpu', **targetoptions) -> jit(function) Equivalent to: d = dispatcher(function, targetoptions) for signature in signatures: d.compile(signature) Create a dispatcher object for a python function. Then, compile the function with the given signature(s). Example: @jit("int32(int32, int32)") def foo(x, y): return x + y @jit(["int32(int32, int32)", "float32(float32, float32)"]) def bar(x, y): return x + y 2) jit(function, target='cpu', **targetoptions) -> dispatcher Create a dispatcher function object that specializes at call site. Examples: @jit def foo(x, y): return x + y @jit(target='cpu', nopython=True) def bar(x, y): return x + y """ if 'argtypes' in options: raise DeprecationError(_msg_deprecated_signature_arg.format('argtypes')) if 'restype' in options: raise DeprecationError(_msg_deprecated_signature_arg.format('restype')) if options.get('nopython', False) and options.get('forceobj', False): raise ValueError("Only one of 'nopython' or 'forceobj' can be True.") if 'target' in options: target = options.pop('target') warnings.warn("The 'target' keyword argument is deprecated.", NumbaDeprecationWarning) else: target = options.pop('_target', 'cpu') options['boundscheck'] = boundscheck # Handle signature if signature_or_function is None: # No signature, no function pyfunc = None sigs = None elif isinstance(signature_or_function, list): # A list of signatures is passed pyfunc = None sigs = signature_or_function elif sigutils.is_signature(signature_or_function): # A single signature is passed pyfunc = None sigs = [signature_or_function] else: # A function is passed pyfunc = signature_or_function sigs = None dispatcher_args = {} if pipeline_class is not None: dispatcher_args['pipeline_class'] = pipeline_class wrapper = _jit(sigs, locals=locals, target=target, cache=cache, targetoptions=options, **dispatcher_args) if pyfunc is not None: return wrapper(pyfunc) else: return wrapper
28,019
def get_user_dn(con, account_base_dn, account_pattern, scope=ldap.SCOPE_SUBTREE, user_dn_postfix_preference=None): """ Search for the user dn based on the account pattern. Return the full user dn None if search failed. user_dn_postfix_preference User DN postfix preference value can be used to select out one prefered user DN if multiple DN entries are found buy the LDAP search. The configured value will be matched and the first matching will be used. If only one DN was found this postfix mathcing will not be used. """ with ldap_error_handler(): # Attribute values MAY contain any type of data. Before you use a # value, call 'bytes_to_str' helper function to convert it to text. user_data = con.search_s(account_base_dn, scope, account_pattern) user_dns = [] if user_data: # User found use the user DN from the first result. if len(user_data) > 1: for user_info in user_data: user_dns.append(bytes_to_str(user_info[0])) else: user_dns.append(bytes_to_str(user_data[0][0])) LOG.debug("Found user dns: %s", ', '.join(user_dns)) if len(user_dns) > 1 and user_dn_postfix_preference: for user_dn in user_dns: if user_dn.endswith(user_dn_postfix_preference): LOG.debug("Selected user dn: %s", user_dn) return user_dn elif len(user_dns) >= 1: LOG.debug("Selected user dn: %s", user_dns[0]) return user_dns[0] LOG.debug("Searching for user failed with pattern: %s", account_pattern) LOG.debug("Account base DN: %s", account_base_dn) return None
def get_user_dn(con, account_base_dn, account_pattern, scope=ldap.SCOPE_SUBTREE, user_dn_postfix_preference=None): """ Search for the user dn based on the account pattern. Return the full user dn None if search failed. user_dn_postfix_preference User DN postfix preference value can be used to select out one prefered user DN if multiple DN entries are found buy the LDAP search. The configured value will be matched and the first matching will be used. If only one DN was found this postfix mathcing will not be used. """ with ldap_error_handler(): # Attribute values MAY contain any type of data. Before you use a # value, call 'bytes_to_str' helper function to convert it to text. user_data = con.search_s(account_base_dn, scope, account_pattern) user_dns = [] if user_data: # User found use the user DN from the first result. if len(user_data) > 1: for user_info in user_data: user_dns.append(bytes_to_str(user_info[0])) else: user_dns.append(bytes_to_str(user_data[0][0])) LOG.debug("Found user dns: %s", ', '.join(user_dns)) if len(user_dns) > 1 and user_dn_postfix_preference: for user_dn in user_dns: if user_dn.endswith(user_dn_postfix_preference): LOG.debug("Selected user dn: %s", user_dn) return user_dn elif len(user_dns) > 0: LOG.debug("Selected user dn: %s", user_dns[0]) return user_dns[0] LOG.debug("Searching for user failed with pattern: %s", account_pattern) LOG.debug("Account base DN: %s", account_base_dn) return None
42,156
def validate_config(rules: Mapping = AntiSpamConfig.rules) -> dict: """Validates the antispam configs.""" validation_errors = {} for name, config in rules.items(): if name not in RULE_FUNCTION_MAPPING: log.error( f"Unrecognized antispam rule `{name}`. " f"Valid rules are: {', '.join(RULE_FUNCTION_MAPPING)}" ) validation_errors[name] = f"`{name}` is not recognized as an antispam rule." continue for required_key in ('interval', 'max'): if required_key not in config: log.error( f"`{required_key}` is required but was not " f"set in rule `{name}`'s configuration." ) validation_errors[name] = f"Key `{required_key}` is required but not set for rule `{name}`" return validation_errors
def validate_config(rules: Mapping = AntiSpamConfig.rules) -> Dict[str, str]: """Validates the antispam configs.""" validation_errors = {} for name, config in rules.items(): if name not in RULE_FUNCTION_MAPPING: log.error( f"Unrecognized antispam rule `{name}`. " f"Valid rules are: {', '.join(RULE_FUNCTION_MAPPING)}" ) validation_errors[name] = f"`{name}` is not recognized as an antispam rule." continue for required_key in ('interval', 'max'): if required_key not in config: log.error( f"`{required_key}` is required but was not " f"set in rule `{name}`'s configuration." ) validation_errors[name] = f"Key `{required_key}` is required but not set for rule `{name}`" return validation_errors
3,831
def fastCopyAndTranspose(a): """ .. deprecated:: 1.24 fastCopyAndTranspose is deprecated and will be removed. Use the copy and transpose methods instead, e.g. ``arr.copy().T`` """ warnings.warn( "fastCopyAndTranspose is deprecated. Use ``arr.copy().T`` instead", DeprecationWarning, stacklevel=2, ) return _fastCopyAndTranspose(a)
def fastCopyAndTranspose(a): """ .. deprecated:: 1.24 fastCopyAndTranspose is deprecated and will be removed. Use the copy and transpose methods instead, e.g. ``arr.copy().T`` """ warnings.warn( "fastCopyAndTranspose is deprecated. Use ``arr.T.copy()`` instead", DeprecationWarning, stacklevel=2, ) return _fastCopyAndTranspose(a)
32,387
def test_module(client: Client) -> str: """Tests API connectivity and authentication' Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful. Raises exceptions if something goes wrong. type client: ``Client`` :param client: client to use :return: 'ok' if test passed, anything else will fail the test. :rtype: ``str`` """ try: client.test() message = 'ok' except DemistoException as e: if 'Forbidden' in str(e) or 'Authorization' in str(e): message = 'Authorization Error: make sure API Key is correctly set' else: raise e return message
def test_module(client: Client) -> str: """Tests API connectivity and authentication' Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful. Raises exceptions if something goes wrong. type client: ``Client`` :param client: client to use :return: 'ok' if test passed, anything else will fail the test. :rtype: ``str`` """ try: client.test() message = 'ok' except DemistoException as e: if 'Forbidden' in str(e) or 'Authorization' in str(e): message = 'Authorization Error: make sure API Key is correctly set' else: raise return message
58,539
def get_deployment(name: str): """Retrieve RayServeHandle for service endpoint to invoke it from Python. Args: name(str): name of the deployment. This must have already been. deployed Returns: ServeDeployment """ try: backend_info, route = _get_global_client().get_deployment_info(name) except KeyError: raise KeyError(f"Deployment {name} was not found. " "Did you call Deployment.deploy()?") return make_deployment_cls( backend_info.replica_config.backend_def, name, backend_info.backend_config, version=backend_info.version, init_args=backend_info.replica_config.init_args, ray_actor_options=backend_info.replica_config.ray_actor_options)
def get_deployment(name: str): """Retrieve RayServeHandle for service endpoint to invoke it from Python. Args: name(str): name of the deployment. This must have already been deployed. Returns: ServeDeployment """ try: backend_info, route = _get_global_client().get_deployment_info(name) except KeyError: raise KeyError(f"Deployment {name} was not found. " "Did you call Deployment.deploy()?") return make_deployment_cls( backend_info.replica_config.backend_def, name, backend_info.backend_config, version=backend_info.version, init_args=backend_info.replica_config.init_args, ray_actor_options=backend_info.replica_config.ray_actor_options)
31,390
def raise_if_hash_not_valid(file_hash: str): """Raises an error if file_hash is not valid Args: file_hash: file hash Raises: ValueError: if hash is not sha256, sha1, md5 Examples: >>> raise_if_hash_not_valid('not a hash') Traceback (most recent call last): ... ValueError: Hash not a hash is not of type sha256, sha1 or md5 >>> raise_if_hash_not_valid('7e641f6b9706d860baf09fe418b6cc87') """ if get_hash_type(file_hash) not in ('sha256', 'sha1', 'md5'): raise ValueError(f'Hash {file_hash} is not of type sha256, sha1 or md5')
def raise_if_hash_not_valid(file_hash: str): """Raises an error if file_hash is not valid Args: file_hash: file hash Raises: ValueError: if hash is not sha256, sha1, md5 Examples: >>> raise_if_hash_not_valid('not a hash') Traceback (most recent call last): ... ValueError: Hash not a hash is not of type sha256, sha1 or md5 >>> raise_if_hash_not_valid('7e641f6b9706d860baf09fe418b6cc87') """ if get_hash_type(file_hash) not in ('sha256', 'sha1', 'md5'): raise ValueError(f'Hash {file_hash} is not of type SHA-256, SHA-1 or MD5')
24,793
def get_subscript_const_value(node: nodes.Subscript) -> nodes.Const: """ Returns the value 'subscript.slice' of a Subscript node. :param node: Subscript Node to extract value from :returns: Const Node containing subscript value :raises InferredTypeError: if the subscript node cannot be inferred as a Const """ inferred = safe_infer(node.slice) if not isinstance(inferred, nodes.Const): raise InferredTypeError("Subscript.slice cannot be inferred as an nodes.Const") return inferred
def get_subscript_const_value(node: nodes.Subscript) -> nodes.Const: """ Returns the value 'subscript.slice' of a Subscript node. :param node: Subscript Node to extract value from :returns: Const Node containing subscript value :raises InferredTypeError: if the subscript node cannot be inferred as a Const """ inferred = safe_infer(node.slice) if not isinstance(inferred, nodes.Const): raise InferredTypeError("Subscript.slice cannot be inferred as a nodes.Const") return inferred
28,236
def parse_dcv_measurement_response(response: str) -> dict: """ Extract status, channel number, value and accompanying metadata from the string and return them as a dictionary. Args: response: Response str to lrn_query For the MFCMU. """ match = re.match(_pattern_lrn, response) if match is None: raise ValueError(f"{response!r} didn't match {_pattern_lrn!r} pattern") dd = match.groupdict() d = cast(Dict[str, Union[str, float]], dd) return d
def parse_dcv_measurement_response(response: str) -> Dict[str, Union[str, float]]: """ Extract status, channel number, value and accompanying metadata from the string and return them as a dictionary. Args: response: Response str to lrn_query For the MFCMU. """ match = re.match(_pattern_lrn, response) if match is None: raise ValueError(f"{response!r} didn't match {_pattern_lrn!r} pattern") dd = match.groupdict() d = cast(Dict[str, Union[str, float]], dd) return d
34,169
def add_subparser( subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser] ): import rasa.nlu.convert as convert data_parser = subparsers.add_parser( "data", conflict_handler="resolve", formatter_class=argparse.ArgumentDefaultsHelpFormatter, parents=parents, help="Utils for the Rasa training files.", ) data_parser.set_defaults(func=lambda _: data_parser.print_help(None)) data_subparsers = data_parser.add_subparsers() convert_parser = data_subparsers.add_parser( "convert", formatter_class=argparse.ArgumentDefaultsHelpFormatter, parents=parents, help="Converts Rasa data between different formats.", ) convert_parser.set_defaults(func=lambda _: convert_parser.print_help(None)) convert_subparsers = convert_parser.add_subparsers() convert_nlu_parser = convert_subparsers.add_parser( "nlu", formatter_class=argparse.ArgumentDefaultsHelpFormatter, parents=parents, help="Converts NLU data between markdown and json.", ) convert_nlu_parser.set_defaults(func=convert.main) arguments.set_convert_arguments(convert_nlu_parser) split_parser = data_subparsers.add_parser( "split", formatter_class=argparse.ArgumentDefaultsHelpFormatter, parents=parents, help="Splits Rasa data in training and test data.", ) split_parser.set_defaults(func=lambda _: split_parser.print_help(None)) split_subparsers = split_parser.add_subparsers() nlu_split_parser = split_subparsers.add_parser( "nlu", parents=parents, formatter_class=argparse.ArgumentDefaultsHelpFormatter, help="Performs a split of your NLU data according to the specified " "percentages.", ) nlu_split_parser.set_defaults(func=split_nlu_data) arguments.set_split_arguments(nlu_split_parser)
def add_subparser( subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser] ): import rasa.nlu.convert as convert data_parser = subparsers.add_parser( "data", conflict_handler="resolve", formatter_class=argparse.ArgumentDefaultsHelpFormatter, parents=parents, help="Utils for the Rasa training files.", ) data_parser.set_defaults(func=lambda _: data_parser.print_help(None)) data_subparsers = data_parser.add_subparsers() convert_parser = data_subparsers.add_parser( "convert", formatter_class=argparse.ArgumentDefaultsHelpFormatter, parents=parents, help="Converts Rasa data between different formats.", ) convert_parser.set_defaults(func=lambda _: convert_parser.print_help(None)) convert_subparsers = convert_parser.add_subparsers() convert_nlu_parser = convert_subparsers.add_parser( "nlu", formatter_class=argparse.ArgumentDefaultsHelpFormatter, parents=parents, help="Converts NLU data between markdown and json.", ) convert_nlu_parser.set_defaults(func=convert.main) arguments.set_convert_arguments(convert_nlu_parser) split_parser = data_subparsers.add_parser( "split", formatter_class=argparse.ArgumentDefaultsHelpFormatter, parents=parents, help="Splits Rasa data in training and test data.", ) split_parser.set_defaults(func=lambda _: split_parser.print_help(None)) split_subparsers = split_parser.add_subparsers() nlu_split_parser = split_subparsers.add_parser( "nlu", parents=parents, formatter_class=argparse.ArgumentDefaultsHelpFormatter, help="Performs a split of your NLU data according to the specified " "according to the specified percentages.", ) nlu_split_parser.set_defaults(func=split_nlu_data) arguments.set_split_arguments(nlu_split_parser)
5,628
def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba', fs=None): """Complete IIR digital and analog filter design. Given passband and stopband frequencies and gains, construct an analog or digital IIR filter of minimum order for a given basic type. Return the output in numerator, denominator ('ba'), pole-zero ('zpk') or second order sections ('sos') form. Parameters ---------- wp, ws : float Passband and stopband edge frequencies. For digital filters, these are in the same units as `fs`. By default, `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, where 1 is the Nyquist frequency. For example: - Lowpass: wp = 0.2, ws = 0.3 - Highpass: wp = 0.3, ws = 0.2 - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s). gpass : float The maximum loss in the passband (dB). gstop : float The minimum attenuation in the stopband (dB). analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. ftype : str, optional The type of IIR filter to design: - Butterworth : 'butter' - Chebyshev I : 'cheby1' - Chebyshev II : 'cheby2' - Cauer/elliptic: 'ellip' - Bessel/Thomson: 'bessel' output : {'ba', 'zpk', 'sos'}, optional Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or second-order sections ('sos'). Default is 'ba', for backwards compatibility, but 'sos' should be used for general-purpose filtering. fs : float, optional The sampling frequency of the digital system. .. versionadded:: 1.2.0 Returns ------- b, a : ndarray, ndarray Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. Only returned if ``output='ba'``. z, p, k : ndarray, ndarray, float Zeros, poles, and system gain of the IIR filter transfer function. Only returned if ``output='zpk'``. sos : ndarray Second-order sections representation of the IIR filter. Only returned if ``output=='sos'``. See Also -------- butter : Filter design using order and critical points cheby1, cheby2, ellip, bessel buttord : Find order and critical points from passband and stopband spec cheb1ord, cheb2ord, ellipord iirfilter : General filter design using order and critical frequencies Notes ----- The ``'sos'`` output parameter was added in 0.16.0. Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> import matplotlib.ticker >>> wp = 0.2 >>> ws = 0.3 >>> gpass = 1 >>> gstop = 40 >>> system = signal.iirdesign(wp, ws, gpass, gstop) >>> w, h = signal.freqz(*system) >>> fig, ax1 = plt.subplots() >>> ax1.set_title('Digital filter frequency response') >>> ax1.plot(w, 20 * np.log10(abs(h)), 'b') >>> ax1.set_ylabel('Amplitude [dB]', color='b') >>> ax1.set_xlabel('Frequency [rad/sample]') >>> ax1.grid() >>> ax1.set_ylim([-120, 20]) >>> ax2 = ax1.twinx() >>> angles = np.unwrap(np.angle(h)) >>> ax2.plot(w, angles, 'g') >>> ax2.set_ylabel('Angle (radians)', color='g') >>> ax2.grid() >>> ax2.axis('tight') >>> ax2.set_ylim([-6, 1]) >>> nticks = 8 >>> ax1.yaxis.set_major_locator(matplotlib.ticker.LinearLocator(nticks)) >>> ax2.yaxis.set_major_locator(matplotlib.ticker.LinearLocator(nticks)) """ try: ordfunc = filter_dict[ftype][1] except KeyError: raise ValueError("Invalid IIR filter type: %s" % ftype) except IndexError: raise ValueError(("%s does not have order selection. Use " "iirfilter function.") % ftype) wp = atleast_1d(wp) ws = atleast_1d(ws) band_type = 2 * (len(wp) - 1) band_type += 1 if wp[0] >= ws[0]: band_type += 1 btype = {1: 'lowpass', 2: 'highpass', 3: 'bandstop', 4: 'bandpass'}[band_type] N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog, fs=fs) return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype, ftype=ftype, output=output, fs=fs)
def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba', fs=None): """Complete IIR digital and analog filter design. Given passband and stopband frequencies and gains, construct an analog or digital IIR filter of minimum order for a given basic type. Return the output in numerator, denominator ('ba'), pole-zero ('zpk') or second order sections ('sos') form. Parameters ---------- wp, ws : float Passband and stopband edge frequencies. For digital filters, these are in the same units as `fs`. By default, `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, where 1 is the Nyquist frequency. For example: - Lowpass: wp = 0.2, ws = 0.3 - Highpass: wp = 0.3, ws = 0.2 - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s). gpass : float The maximum loss in the passband (dB). gstop : float The minimum attenuation in the stopband (dB). analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. ftype : str, optional The type of IIR filter to design: - Butterworth : 'butter' - Chebyshev I : 'cheby1' - Chebyshev II : 'cheby2' - Cauer/elliptic: 'ellip' - Bessel/Thomson: 'bessel' output : {'ba', 'zpk', 'sos'}, optional Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or second-order sections ('sos'). Default is 'ba' for backwards compatibility, but 'sos' should be used for general-purpose filtering. fs : float, optional The sampling frequency of the digital system. .. versionadded:: 1.2.0 Returns ------- b, a : ndarray, ndarray Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. Only returned if ``output='ba'``. z, p, k : ndarray, ndarray, float Zeros, poles, and system gain of the IIR filter transfer function. Only returned if ``output='zpk'``. sos : ndarray Second-order sections representation of the IIR filter. Only returned if ``output=='sos'``. See Also -------- butter : Filter design using order and critical points cheby1, cheby2, ellip, bessel buttord : Find order and critical points from passband and stopband spec cheb1ord, cheb2ord, ellipord iirfilter : General filter design using order and critical frequencies Notes ----- The ``'sos'`` output parameter was added in 0.16.0. Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> import matplotlib.ticker >>> wp = 0.2 >>> ws = 0.3 >>> gpass = 1 >>> gstop = 40 >>> system = signal.iirdesign(wp, ws, gpass, gstop) >>> w, h = signal.freqz(*system) >>> fig, ax1 = plt.subplots() >>> ax1.set_title('Digital filter frequency response') >>> ax1.plot(w, 20 * np.log10(abs(h)), 'b') >>> ax1.set_ylabel('Amplitude [dB]', color='b') >>> ax1.set_xlabel('Frequency [rad/sample]') >>> ax1.grid() >>> ax1.set_ylim([-120, 20]) >>> ax2 = ax1.twinx() >>> angles = np.unwrap(np.angle(h)) >>> ax2.plot(w, angles, 'g') >>> ax2.set_ylabel('Angle (radians)', color='g') >>> ax2.grid() >>> ax2.axis('tight') >>> ax2.set_ylim([-6, 1]) >>> nticks = 8 >>> ax1.yaxis.set_major_locator(matplotlib.ticker.LinearLocator(nticks)) >>> ax2.yaxis.set_major_locator(matplotlib.ticker.LinearLocator(nticks)) """ try: ordfunc = filter_dict[ftype][1] except KeyError: raise ValueError("Invalid IIR filter type: %s" % ftype) except IndexError: raise ValueError(("%s does not have order selection. Use " "iirfilter function.") % ftype) wp = atleast_1d(wp) ws = atleast_1d(ws) band_type = 2 * (len(wp) - 1) band_type += 1 if wp[0] >= ws[0]: band_type += 1 btype = {1: 'lowpass', 2: 'highpass', 3: 'bandstop', 4: 'bandpass'}[band_type] N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog, fs=fs) return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype, ftype=ftype, output=output, fs=fs)
45,202
def file_open(file_path, mode="rb", kwargs=None): if isinstance(file_path, str): match = S3_ADDRESS_REGEX.search(file_path) if match: import s3fs as S3FS from botocore.exceptions import NoCredentialsError s3fs = S3FS.S3FileSystem(anon=False) try: return s3fs.open(file_path) except NoCredentialsError: s3fs = S3FS.S3FileSystem(anon=True) return s3fs.open(file_path) elif "compression" in kwargs: if kwargs["compression"] == "gzip": import gzip return gzip.open(file_path, mode=mode) return open(file_path, mode=mode)
def file_open(file_path, mode="rb", compression="infer"): if isinstance(file_path, str): match = S3_ADDRESS_REGEX.search(file_path) if match: import s3fs as S3FS from botocore.exceptions import NoCredentialsError s3fs = S3FS.S3FileSystem(anon=False) try: return s3fs.open(file_path) except NoCredentialsError: s3fs = S3FS.S3FileSystem(anon=True) return s3fs.open(file_path) elif "compression" in kwargs: if kwargs["compression"] == "gzip": import gzip return gzip.open(file_path, mode=mode) return open(file_path, mode=mode)
1,322
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8, method='lars', n_jobs=None, dict_init=None, code_init=None, callback=None, verbose=False, random_state=None, return_n_iter=False, positive_dict=False, positive_code=False): """Solves a dictionary learning matrix factorization problem. Finds the best dictionary and the corresponding sparse code for approximating the data matrix X by solving:: (U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1 (U,V) with || V_k ||_2 = 1 for all 0 <= k < n_components where V is the dictionary and U is the sparse code. Read more in the :ref:`User Guide <DictionaryLearning>`. Parameters ---------- X : array of shape (n_samples, n_features) Data matrix. n_components : int, Number of dictionary atoms to extract. alpha : int, Sparsity controlling parameter. max_iter : int, Maximum number of iterations to perform. tol : float, Tolerance for the stopping condition. method : {'lars', 'cd'} lars: uses the least angle regression method to solve the lasso problem (linear_model.lars_path) cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). Lars will be faster if the estimated components are sparse. n_jobs : int or None, optional (default=None) Number of parallel jobs to run. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. dict_init : array of shape (n_components, n_features), Initial value for the dictionary for warm restart scenarios. code_init : array of shape (n_samples, n_components), Initial value for the sparse code for warm restart scenarios. callback : callable or None, optional (default: None) Callable that gets invoked every five iterations verbose : bool, optional (default: False) To control the verbosity of the procedure. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. return_n_iter : bool Whether or not to return the number of iterations. positive_dict : bool Whether to enforce positivity when finding the dictionary. .. versionadded:: 0.20 positive_code : bool Whether to enforce positivity when finding the code. .. versionadded:: 0.20 Returns ------- code : array of shape (n_samples, n_components) The sparse code factor in the matrix factorization. dictionary : array of shape (n_components, n_features), The dictionary factor in the matrix factorization. errors : array Vector of errors at each iteration. n_iter : int Number of iterations run. Returned only if `return_n_iter` is set to True. See also -------- dict_learning_online DictionaryLearning MiniBatchDictionaryLearning SparsePCA MiniBatchSparsePCA """ if method not in ('lars', 'cd'): raise ValueError('Coding method %r not supported as a fit algorithm.' % method) if method == 'lars' and positive_code: raise ValueError( "Positive constraint not supported for \"lars\" coding method." ) method = 'lasso_' + method t0 = time.time() # Avoid integer division problems alpha = float(alpha) random_state = check_random_state(random_state) # Init the code and the dictionary with SVD of Y if code_init is not None and dict_init is not None: code = np.array(code_init, order='F') # Don't copy V, it will happen below dictionary = dict_init else: code, S, dictionary = linalg.svd(X, full_matrices=False) dictionary = S[:, np.newaxis] * dictionary r = len(dictionary) if n_components <= r: # True even if n_components=None code = code[:, :n_components] dictionary = dictionary[:n_components, :] else: code = np.c_[code, np.zeros((len(code), n_components - r))] dictionary = np.r_[dictionary, np.zeros((n_components - r, dictionary.shape[1]))] # Fortran-order dict, as we are going to access its row vectors dictionary = np.array(dictionary, order='F') residuals = 0 errors = [] current_cost = np.nan if verbose == 1: print('[dict_learning]', end=' ') # If max_iter is 0, number of iterations returned should be zero ii = -1 for ii in range(max_iter): dt = (time.time() - t0) if verbose == 1: sys.stdout.write(".") sys.stdout.flush() elif verbose: print("Iteration % 3i " "(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)" % (ii, dt, dt / 60, current_cost)) # Update code code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha, init=code, n_jobs=n_jobs, positive=positive_code) # Update dictionary dictionary, residuals = _update_dict(dictionary.T, X.T, code.T, verbose=verbose, return_r2=True, random_state=random_state, positive=positive_dict) dictionary = dictionary.T # Cost function current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code)) errors.append(current_cost) if ii > 0: dE = errors[-2] - errors[-1] # assert(dE >= -tol * errors[-1]) if dE < tol * errors[-1]: if verbose == 1: # A line return print("") elif verbose: print("--- Convergence reached after %d iterations" % ii) break if ii % 5 == 0 and callback is not None: callback(locals()) if return_n_iter: return code, dictionary, errors, ii + 1 else: return code, dictionary, errors
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8, method='lars', n_jobs=None, dict_init=None, code_init=None, callback=None, verbose=False, random_state=None, return_n_iter=False, positive_dict=False, positive_code=False): """Solves a dictionary learning matrix factorization problem. Finds the best dictionary and the corresponding sparse code for approximating the data matrix X by solving:: (U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1 (U,V) with || V_k ||_2 = 1 for all 0 <= k < n_components where V is the dictionary and U is the sparse code. Read more in the :ref:`User Guide <DictionaryLearning>`. Parameters ---------- X : array of shape (n_samples, n_features) Data matrix. n_components : int, Number of dictionary atoms to extract. alpha : int, Sparsity controlling parameter. max_iter : int, Maximum number of iterations to perform. tol : float, Tolerance for the stopping condition. method : {'lars', 'cd'} lars: uses the least angle regression method to solve the lasso problem (linear_model.lars_path) cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). Lars will be faster if the estimated components are sparse. n_jobs : int or None, optional (default=None) Number of parallel jobs to run. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. dict_init : array of shape (n_components, n_features), Initial value for the dictionary for warm restart scenarios. code_init : array of shape (n_samples, n_components), Initial value for the sparse code for warm restart scenarios. callback : callable or None, optional (default: None) Callable that gets invoked every five iterations verbose : bool, optional (default: False) To control the verbosity of the procedure. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. return_n_iter : bool Whether or not to return the number of iterations. positive_dict : bool Whether to enforce positivity when finding the dictionary. .. versionadded:: 0.20 positive_code : bool Whether to enforce positivity when finding the code. .. versionadded:: 0.20 Returns ------- code : array of shape (n_samples, n_components) The sparse code factor in the matrix factorization. dictionary : array of shape (n_components, n_features), The dictionary factor in the matrix factorization. errors : array Vector of errors at each iteration. n_iter : int Number of iterations run. Returned only if `return_n_iter` is set to True. See also -------- dict_learning_online DictionaryLearning MiniBatchDictionaryLearning SparsePCA MiniBatchSparsePCA """ if method not in ('lars', 'cd'): raise ValueError('Coding method %r not supported as a fit algorithm.' % method) if method == 'lars' and positive_code: raise ValueError( "Positive constraint not supported for 'lars' coding method." ) method = 'lasso_' + method t0 = time.time() # Avoid integer division problems alpha = float(alpha) random_state = check_random_state(random_state) # Init the code and the dictionary with SVD of Y if code_init is not None and dict_init is not None: code = np.array(code_init, order='F') # Don't copy V, it will happen below dictionary = dict_init else: code, S, dictionary = linalg.svd(X, full_matrices=False) dictionary = S[:, np.newaxis] * dictionary r = len(dictionary) if n_components <= r: # True even if n_components=None code = code[:, :n_components] dictionary = dictionary[:n_components, :] else: code = np.c_[code, np.zeros((len(code), n_components - r))] dictionary = np.r_[dictionary, np.zeros((n_components - r, dictionary.shape[1]))] # Fortran-order dict, as we are going to access its row vectors dictionary = np.array(dictionary, order='F') residuals = 0 errors = [] current_cost = np.nan if verbose == 1: print('[dict_learning]', end=' ') # If max_iter is 0, number of iterations returned should be zero ii = -1 for ii in range(max_iter): dt = (time.time() - t0) if verbose == 1: sys.stdout.write(".") sys.stdout.flush() elif verbose: print("Iteration % 3i " "(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)" % (ii, dt, dt / 60, current_cost)) # Update code code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha, init=code, n_jobs=n_jobs, positive=positive_code) # Update dictionary dictionary, residuals = _update_dict(dictionary.T, X.T, code.T, verbose=verbose, return_r2=True, random_state=random_state, positive=positive_dict) dictionary = dictionary.T # Cost function current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code)) errors.append(current_cost) if ii > 0: dE = errors[-2] - errors[-1] # assert(dE >= -tol * errors[-1]) if dE < tol * errors[-1]: if verbose == 1: # A line return print("") elif verbose: print("--- Convergence reached after %d iterations" % ii) break if ii % 5 == 0 and callback is not None: callback(locals()) if return_n_iter: return code, dictionary, errors, ii + 1 else: return code, dictionary, errors
31,679
def copy_dict_value(source_dict: Dict[str, Any], dest_dict: Dict[str, Any], source_dict_key: str, dest_dict_key: str = None): if not source_dict_key: return None param_value = source_dict.get(source_dict_key) if param_value: if dest_dict_key: dest_dict[dest_dict_key] = param_value else: dest_dict[source_dict_key] = param_value
def copy_dict_value(source_dict: Dict[str, Any], dest_dict: Dict[str, Any], source_dict_key: str, dest_dict_key: str = None): if not source_dict_key: return param_value = source_dict.get(source_dict_key) if param_value: if dest_dict_key: dest_dict[dest_dict_key] = param_value else: dest_dict[source_dict_key] = param_value
30,473
def fetch_indicators(client: Client, limit: int = -1) -> Tuple[List[Dict], List]: """Fetches indicators from the feed to the indicators tab. Args: client (Client): Client object configured according to instance arguments. limit (int): Maximum number of indicators to return. Returns: Tuple of: str. Information to be printed to war room. Dict. Data to be entered to context. Dict. The raw data of the indicators. """ iterator = client.build_iterator() indicators = [] raw_response = [] if limit != -1: iterator = iterator[:limit] for indicator in iterator: raw_data = { 'Value': indicator['value'], 'Type': indicator['type'], 'Azure_group_name': indicator['azure_name'], 'Azure_group_id': indicator['azure_id'], 'Azure_region': indicator['azure_region'], 'Azure_platform': indicator['azure_platform'], 'Azure_system_service': indicator['azure_system_service'] } indicators.append({ 'Value': indicator['value'], 'Type': indicator['type'], 'rawJSON': raw_data }) raw_response.append(raw_data) return indicators, raw_response
def fetch_indicators(client: Client, limit: int = -1) -> Tuple[List[Dict], List]: """Fetches indicators from the feed to the indicators tab. Args: client (Client): Client object configured according to instance arguments. limit (int): Maximum number of indicators to return. Returns: Tuple of: str. Information to be printed to war room. Dict. Data to be entered to context. Dict. The raw data of the indicators. """ iterator = client.build_iterator() indicators = [] raw_response = [] if limit != -1: iterator = iterator[:limit] for indicator in iterator: raw_data = { 'Value': indicator['value'], 'Type': indicator['type'], 'Azure_group_name': indicator['azure_name'], 'Azure_group_id': indicator['azure_id'], 'Azure_region': indicator['azure_region'], 'Azure_platform': indicator['azure_platform'], 'Azure_system_service': indicator['azure_system_service'] } indicators.append({ 'Value': indicator['value'], 'Type': indicator['type'], 'rawJSON': raw_data }) raw_response.append(indicator) return indicators, raw_response
47,495
def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Load dataset # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # TODO support datasets from local folders dataset = load_dataset(data_args.dataset_name, cache_dir=model_args.cache_dir) # Rename column names to standardized names (only "image" and "label" need to be present) if "pixel_values" in dataset["train"].column_names: dataset = dataset.rename_columns({"pixel_values": "image"}) if "annotation" in dataset["train"].column_names: dataset = dataset.rename_columns({"annotation": "label"}) # If we don't have a validation split, split off a percentage of train as validation. data_args.train_val_split = None if "validation" in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split, float) and data_args.train_val_split > 0.0: split = dataset["train"].train_test_split(data_args.train_val_split) dataset["train"] = split["train"] dataset["validation"] = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. if data_args.dataset_name == "scene_parse_150": repo_id = "datasets/huggingface/label-files" filename = "ade20k-id2label.json" else: repo_id = f"datasets/{data_args.dataset_name}" filename = "id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} # Load the mean IoU metric from the datasets package metric = datasets.load_metric("mean_iou") # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. @torch.no_grad() def compute_metrics(eval_pred): logits, labels = eval_pred logits_tensor = torch.from_numpy(logits) # scale the logits to the size of the label logits_tensor = nn.functional.interpolate( logits_tensor, size=labels.shape[-2:], mode="bilinear", align_corners=False, ).argmax(dim=1) pred_labels = logits_tensor.detach().cpu().numpy() metrics = metric.compute( predictions=pred_labels, references=labels, num_labels=len(id2label), ignore_index=0, reduce_labels=feature_extractor.reduce_labels, ) for key, value in metrics.items(): if type(value) is np.ndarray: metrics[key] = value.tolist() return metrics config = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path, label2id=label2id, id2label=id2label, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForSemanticSegmentation.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) feature_extractor = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Define torchvision transforms to be applied to each image + target. # Not that straightforward in torchvision: https://github.com/pytorch/vision/issues/9 # Currently based on official torchvision references: https://github.com/pytorch/vision/blob/main/references/segmentation/transforms.py train_transforms = Compose( [ ReduceLabels() if data_args.reduce_labels else Identity(), RandomCrop(size=feature_extractor.size), RandomHorizontalFlip(flip_prob=0.5), PILToTensor(), ConvertImageDtype(torch.float), Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std), ] ) # Define torchvision transform to be applied to each image. # jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1) val_transforms = Compose( [ ReduceLabels() if data_args.reduce_labels else Identity(), Resize(size=(feature_extractor.size, feature_extractor.size)), PILToTensor(), ConvertImageDtype(torch.float), Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std), ] ) def preprocess_train(example_batch): pixel_values = [] labels = [] for image, target in zip(example_batch["image"], example_batch["label"]): image, target = train_transforms(image.convert("RGB"), target) pixel_values.append(image) labels.append(target) encoding = dict() encoding["pixel_values"] = torch.stack(pixel_values) encoding["labels"] = torch.stack(labels) return encoding def preprocess_val(example_batch): pixel_values = [] labels = [] for image, target in zip(example_batch["image"], example_batch["label"]): image, target = val_transforms(image.convert("RGB"), target) pixel_values.append(image) labels.append(target) encoding = dict() encoding["pixel_values"] = torch.stack(pixel_values) encoding["labels"] = torch.stack(labels) return encoding if training_args.do_train: if "train" not in dataset: raise ValueError("--do_train requires a train dataset") if data_args.max_train_samples is not None: dataset["train"] = ( dataset["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples)) ) # Set the training transforms dataset["train"].set_transform(preprocess_train) if training_args.do_eval: if "validation" not in dataset: raise ValueError("--do_eval requires a validation dataset") if data_args.max_eval_samples is not None: dataset["validation"] = ( dataset["validation"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples)) ) # Set the validation transforms dataset["validation"].set_transform(preprocess_val) # Initalize our trainer trainer = Trainer( model=model, args=training_args, train_dataset=dataset["train"] if training_args.do_train else None, eval_dataset=dataset["validation"] if training_args.do_eval else None, compute_metrics=compute_metrics, tokenizer=feature_extractor, data_collator=default_data_collator, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics) trainer.save_state() # Evaluation if training_args.do_eval: metrics = trainer.evaluate() trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Write model card and (optionally) push to hub kwargs = { "finetuned_from": model_args.model_name_or_path, "dataset": data_args.dataset_name, "tags": ["image-segmentation", "vision"], } if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs)
def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Load dataset # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # TODO support datasets from local folders dataset = load_dataset(data_args.dataset_name, cache_dir=model_args.cache_dir) # Rename column names to standardized names (only "image" and "label" need to be present) if "pixel_values" in dataset["train"].column_names: dataset = dataset.rename_columns({"pixel_values": "image"}) if "annotation" in dataset["train"].column_names: dataset = dataset.rename_columns({"annotation": "label"}) # If we don't have a validation split, split off a percentage of train as validation. data_args.train_val_split = None if "validation" in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split, float) and data_args.train_val_split > 0.0: split = dataset["train"].train_test_split(data_args.train_val_split) dataset["train"] = split["train"] dataset["validation"] = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. if data_args.dataset_name == "scene_parse_150": repo_id = "datasets/huggingface/label-files" filename = "ade20k-id2label.json" else: repo_id = f"datasets/{data_args.dataset_name}" filename = "id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: str(k) for k, v in id2label.items()} # Load the mean IoU metric from the datasets package metric = datasets.load_metric("mean_iou") # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. @torch.no_grad() def compute_metrics(eval_pred): logits, labels = eval_pred logits_tensor = torch.from_numpy(logits) # scale the logits to the size of the label logits_tensor = nn.functional.interpolate( logits_tensor, size=labels.shape[-2:], mode="bilinear", align_corners=False, ).argmax(dim=1) pred_labels = logits_tensor.detach().cpu().numpy() metrics = metric.compute( predictions=pred_labels, references=labels, num_labels=len(id2label), ignore_index=0, reduce_labels=feature_extractor.reduce_labels, ) for key, value in metrics.items(): if type(value) is np.ndarray: metrics[key] = value.tolist() return metrics config = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path, label2id=label2id, id2label=id2label, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForSemanticSegmentation.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) feature_extractor = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Define torchvision transforms to be applied to each image + target. # Not that straightforward in torchvision: https://github.com/pytorch/vision/issues/9 # Currently based on official torchvision references: https://github.com/pytorch/vision/blob/main/references/segmentation/transforms.py train_transforms = Compose( [ ReduceLabels() if data_args.reduce_labels else Identity(), RandomCrop(size=feature_extractor.size), RandomHorizontalFlip(flip_prob=0.5), PILToTensor(), ConvertImageDtype(torch.float), Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std), ] ) # Define torchvision transform to be applied to each image. # jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1) val_transforms = Compose( [ ReduceLabels() if data_args.reduce_labels else Identity(), Resize(size=(feature_extractor.size, feature_extractor.size)), PILToTensor(), ConvertImageDtype(torch.float), Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std), ] ) def preprocess_train(example_batch): pixel_values = [] labels = [] for image, target in zip(example_batch["image"], example_batch["label"]): image, target = train_transforms(image.convert("RGB"), target) pixel_values.append(image) labels.append(target) encoding = dict() encoding["pixel_values"] = torch.stack(pixel_values) encoding["labels"] = torch.stack(labels) return encoding def preprocess_val(example_batch): pixel_values = [] labels = [] for image, target in zip(example_batch["image"], example_batch["label"]): image, target = val_transforms(image.convert("RGB"), target) pixel_values.append(image) labels.append(target) encoding = dict() encoding["pixel_values"] = torch.stack(pixel_values) encoding["labels"] = torch.stack(labels) return encoding if training_args.do_train: if "train" not in dataset: raise ValueError("--do_train requires a train dataset") if data_args.max_train_samples is not None: dataset["train"] = ( dataset["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples)) ) # Set the training transforms dataset["train"].set_transform(preprocess_train) if training_args.do_eval: if "validation" not in dataset: raise ValueError("--do_eval requires a validation dataset") if data_args.max_eval_samples is not None: dataset["validation"] = ( dataset["validation"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples)) ) # Set the validation transforms dataset["validation"].set_transform(preprocess_val) # Initalize our trainer trainer = Trainer( model=model, args=training_args, train_dataset=dataset["train"] if training_args.do_train else None, eval_dataset=dataset["validation"] if training_args.do_eval else None, compute_metrics=compute_metrics, tokenizer=feature_extractor, data_collator=default_data_collator, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics) trainer.save_state() # Evaluation if training_args.do_eval: metrics = trainer.evaluate() trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Write model card and (optionally) push to hub kwargs = { "finetuned_from": model_args.model_name_or_path, "dataset": data_args.dataset_name, "tags": ["image-segmentation", "vision"], } if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs)
13,763
def _log_and_monitor_expected_errors(request, exception, caller): """ Adds logging and monitoring for expected errors as needed. Arguments: request: The request exception: The exception caller: Either 'middleware' or 'drf` """ expected_error_settings_dict = _get_expected_error_settings_dict() if not expected_error_settings_dict: return # 'module:class', for example, 'django.core.exceptions:PermissionDenied' # Note: `Exception` itself doesn't have a module. exception_module = exception.__module__ if hasattr(exception, '__module__') else '' module_and_class = f'{exception_module}:{exception.__class__.__name__}' # Set checked_error_expected_from custom attribute to potentially help find issues where errors are never processed. set_custom_attribute('checked_error_expected_from', caller) # check if we already added logging/monitoring from a different caller request_cache = RequestCache('openedx.core.lib.request_utils') cached_handled_exception = request_cache.get_cached_response('handled_exception') if cached_handled_exception.is_found: cached_module_and_class = cached_handled_exception.value # exception was already processed by a different caller if cached_handled_exception.value == module_and_class: set_custom_attribute('checked_error_expected_from', 'multiple') return # Currently, it seems unexpected that middleware and drf will both handle different uncaught exceptions. # However, since it is possible, we will add an additional attribute and log message and then continue. set_custom_attribute('unexpected_multiple_exceptions', cached_module_and_class) log.warning( "Unexpected scenario where different exceptions are handled by _log_and_monitor_expected_errors. " "See 'unexpected_multiple_exceptions' custom attribute." ) request_cache.set('handled_exception', module_and_class) if module_and_class not in expected_error_settings_dict: return module_and_class_with_message = f'{exception_module}:{repr(exception)}' set_custom_attribute('error_expected', module_and_class_with_message) expected_error_settings = expected_error_settings_dict[module_and_class] if expected_error_settings['is_ignored']: set_custom_attribute('error_ignored', True) if expected_error_settings['log_error'] or expected_error_settings['log_stack_trace']: print_stack = expected_error_settings['log_stack_trace'] request_path = request.path if hasattr(request, 'path') else 'request-path-unknown' log.info('Expected error seen for %s', request_path, exc_info=exception, stack_info=print_stack)
def _log_and_monitor_expected_errors(request, exception, caller): """ Adds logging and monitoring for expected errors as needed. Arguments: request: The request exception: The exception caller: Either 'middleware' or 'drf` """ expected_error_settings_dict = _get_expected_error_settings_dict() if not expected_error_settings_dict: return # 'module:class', for example, 'django.core.exceptions:PermissionDenied' # Note: `Exception` itself doesn't have a module. exception_module = getattr(exception, '__module__', '') module_and_class = f'{exception_module}:{exception.__class__.__name__}' # Set checked_error_expected_from custom attribute to potentially help find issues where errors are never processed. set_custom_attribute('checked_error_expected_from', caller) # check if we already added logging/monitoring from a different caller request_cache = RequestCache('openedx.core.lib.request_utils') cached_handled_exception = request_cache.get_cached_response('handled_exception') if cached_handled_exception.is_found: cached_module_and_class = cached_handled_exception.value # exception was already processed by a different caller if cached_handled_exception.value == module_and_class: set_custom_attribute('checked_error_expected_from', 'multiple') return # Currently, it seems unexpected that middleware and drf will both handle different uncaught exceptions. # However, since it is possible, we will add an additional attribute and log message and then continue. set_custom_attribute('unexpected_multiple_exceptions', cached_module_and_class) log.warning( "Unexpected scenario where different exceptions are handled by _log_and_monitor_expected_errors. " "See 'unexpected_multiple_exceptions' custom attribute." ) request_cache.set('handled_exception', module_and_class) if module_and_class not in expected_error_settings_dict: return module_and_class_with_message = f'{exception_module}:{repr(exception)}' set_custom_attribute('error_expected', module_and_class_with_message) expected_error_settings = expected_error_settings_dict[module_and_class] if expected_error_settings['is_ignored']: set_custom_attribute('error_ignored', True) if expected_error_settings['log_error'] or expected_error_settings['log_stack_trace']: print_stack = expected_error_settings['log_stack_trace'] request_path = request.path if hasattr(request, 'path') else 'request-path-unknown' log.info('Expected error seen for %s', request_path, exc_info=exception, stack_info=print_stack)
59,658
def test_load_libgmt_with_broken_libraries(monkeypatch): """ Test load_libgmt still works when a broken library is found. """ # load the GMT library before mocking the ctypes.CDLL function loaded_libgmt = load_libgmt() def mock_ctypes_cdll_return(libname): """ Mock the return value of ctypes.CDLL. Parameters ---------- libname : str or FakedLibGMT or ctypes.CDLL Path to the GMT library, a faked GMT library or a working library loaded as ctypes.CDLL. Return ------ object Either the loaded GMT library or the faked GMT library. """ if isinstance(libname, FakedLibGMT): # libname is a faked GMT library, return the faked library return libname if isinstance(libname, str): # libname is an invalid library path in str type, # raise OSError like the original ctypes.CDLL raise OSError(f"Unable to find '{libname}'") # libname is a loaded GMT library return loaded_libgmt with monkeypatch.context() as mpatch: # pylint: disable=protected-access # mock the ctypes.CDLL using mock_ctypes_cdll_return() mpatch.setattr(ctypes, "CDLL", mock_ctypes_cdll_return) faked_libgmt1 = FakedLibGMT("/path/to/faked/libgmt1.so") faked_libgmt2 = FakedLibGMT("/path/to/faked/libgmt2.so") # case 1: two broken libraries # Raise the GMTCLibNotFoundError exception # The error message should contains information of both libraries lib_fullnames = [faked_libgmt1, faked_libgmt2] msg_regex = ( fr"Error loading the GMT shared library '{faked_libgmt1._name}'.\n" fr"Error loading '{faked_libgmt1._name}'. Couldn't access.*\n" fr"Error loading the GMT shared library '{faked_libgmt2._name}'.\n" fr"Error loading '{faked_libgmt2._name}'. Couldn't access.*" ) with pytest.raises(GMTCLibNotFoundError, match=msg_regex): load_libgmt(lib_fullnames=lib_fullnames) # case 2: broken library + invalid path lib_fullnames = [faked_libgmt1, "/invalid/path/to/libgmt.so"] msg_regex = ( fr"Error loading the GMT shared library '{faked_libgmt1._name}'.\n" fr"Error loading '{faked_libgmt1._name}'. Couldn't access.*\n" "Error loading the GMT shared library '/invalid/path/to/libgmt.so'.\n" "Unable to find '/invalid/path/to/libgmt.so'" ) with pytest.raises(GMTCLibNotFoundError, match=msg_regex): load_libgmt(lib_fullnames=lib_fullnames) # case 3: broken library + invalid path + working library lib_fullnames = [faked_libgmt1, "/invalid/path/to/libgmt.so", loaded_libgmt] assert check_libgmt(load_libgmt(lib_fullnames=lib_fullnames)) is None # case 4: invalid path + broken library + working library lib_fullnames = ["/invalid/path/to/libgmt.so", faked_libgmt1, loaded_libgmt] assert check_libgmt(load_libgmt(lib_fullnames=lib_fullnames)) is None # case 5: working library + broken library + invalid path lib_fullnames = [loaded_libgmt, faked_libgmt1, "/invalid/path/to/libgmt.so"] assert check_libgmt(load_libgmt(lib_fullnames=lib_fullnames)) is None # case 6: repeated broken library + working library lib_fullnames = [faked_libgmt1, faked_libgmt1, loaded_libgmt] assert check_libgmt(load_libgmt(lib_fullnames=lib_fullnames)) is None
def test_load_libgmt_with_broken_libraries(monkeypatch): """ Test load_libgmt still works when a broken library is found. """ # load the GMT library before mocking the ctypes.CDLL function loaded_libgmt = load_libgmt() def mock_ctypes_cdll_return(libname): """ Mock the return value of ctypes.CDLL. Parameters ---------- libname : str or FakedLibGMT or ctypes.CDLL Path to the GMT library, a faked GMT library or a working library loaded as ctypes.CDLL. Return ------ object Either the loaded GMT library or the faked GMT library. """ if isinstance(libname, FakedLibGMT): # libname is a faked GMT library, return the faked library return libname if isinstance(libname, str): # libname is an invalid library path in str type, # raise OSError like the original ctypes.CDLL raise OSError(f"Unable to find '{libname}'") # libname is a loaded GMT library return loaded_libgmt with monkeypatch.context() as mpatch: # pylint: disable=protected-access # mock the ctypes.CDLL using mock_ctypes_cdll_return() mpatch.setattr(ctypes, "CDLL", mock_ctypes_cdll_return) faked_libgmt1 = FakedLibGMT("/path/to/faked/libgmt1.so") faked_libgmt2 = FakedLibGMT("/path/to/faked/libgmt2.so") # case 1: two broken libraries # Raise the GMTCLibNotFoundError exception # The error message should contains information of both libraries lib_fullnames = [faked_libgmt1, faked_libgmt2] msg_regex = ( fr"Error loading the GMT shared library '{faked_libgmt1._name}'.\n" fr"Error loading '{faked_libgmt1._name}'. Couldn't access.*\n" fr"Error loading the GMT shared library '{faked_libgmt2._name}'.\n" fr"Error loading '{faked_libgmt2._name}'. Couldn't access.*" ) with pytest.raises(GMTCLibNotFoundError, match=msg_regex): load_libgmt(lib_fullnames=lib_fullnames) # case 2: broken library + invalid path lib_fullnames = [faked_libgmt1, "/invalid/path/to/libgmt.so"] msg_regex = ( fr"Error loading GMT shared library at '{faked_libgmt1._name}'.\n" fr"Error loading '{faked_libgmt1._name}'. Couldn't access.*\n" "Error loading GMT shared library at '/invalid/path/to/libgmt.so'.\n" "Unable to find '/invalid/path/to/libgmt.so'" ) with pytest.raises(GMTCLibNotFoundError, match=msg_regex): load_libgmt(lib_fullnames=lib_fullnames) # case 3: broken library + invalid path + working library lib_fullnames = [faked_libgmt1, "/invalid/path/to/libgmt.so", loaded_libgmt] assert check_libgmt(load_libgmt(lib_fullnames=lib_fullnames)) is None # case 4: invalid path + broken library + working library lib_fullnames = ["/invalid/path/to/libgmt.so", faked_libgmt1, loaded_libgmt] assert check_libgmt(load_libgmt(lib_fullnames=lib_fullnames)) is None # case 5: working library + broken library + invalid path lib_fullnames = [loaded_libgmt, faked_libgmt1, "/invalid/path/to/libgmt.so"] assert check_libgmt(load_libgmt(lib_fullnames=lib_fullnames)) is None # case 6: repeated broken library + working library lib_fullnames = [faked_libgmt1, faked_libgmt1, loaded_libgmt] assert check_libgmt(load_libgmt(lib_fullnames=lib_fullnames)) is None
57,488
def normalize_model_name(name: str) -> str: """Normalizes the given model name.""" return re.sub(r'[^a-zA-Z0-9.\-_]', '_', name)
def normalize_model_name(name: str) -> str: """ Normalizes the given model name. """ return re.sub(r'[^a-zA-Z0-9.\-_]', '_', name)
36,265
def _rank_genes_groups_plot( adata: AnnData, plot_type: str = 'heatmap', groups: Union[str, Sequence[str]] = None, n_genes: int = 10, groupby: Optional[str] = None, values_to_plot: Optional[str] = None, gene_names: Optional[Union[Sequence[str], Mapping[str, Sequence[str]]]] = None, min_logfoldchange: Optional[float] = None, key: Optional[str] = None, show: Optional[bool] = None, save: Optional[bool] = None, return_fig: Optional[bool] = False, **kwds, ): """\ Common function to call the different rank_genes_groups_* plots """ if key is None: key = 'rank_genes_groups' if groupby is None: groupby = str(adata.uns[key]['params']['groupby']) group_names = adata.uns[key]['names'].dtype.names if groups is None else groups gene_symbols = kwds.get('gene_symbols', None) if gene_names is not None: var_names = gene_names if isinstance(var_names, Mapping): # get a single list of all gene names in the dictionary gene_names = sum([list(x) for x in var_names.values()], []) elif isinstance(var_names, str): gene_names = [var_names] else: gene_names = var_names else: # dict in which each group is the key and the n_genes are the values var_names = {} gene_names = [] for group in group_names: df = rank_genes_groups_df(adata, group, key=key, gene_symbols=gene_symbols) if min_logfoldchange is not None: # select genes with given log_fold change df = df[df.logfoldchanges > min_logfoldchange] if gene_symbols is not None: df['names'] = df['symbol'] genes_list = df.names.tolist() if len(genes_list) == 0: logg.warning(f'No genes found for group {group}') continue if n_genes < 0: genes_list = genes_list[n_genes:] else: genes_list = genes_list[:n_genes] var_names[group] = genes_list gene_names.extend(genes_list) # by default add dendrogram to plots kwds.setdefault('dendrogram', True) if plot_type in ['dotplot', 'matrixplot']: # these two types of plots can also # show score, logfoldchange and pvalues, in general any value from rank # genes groups title = None values_df = None if values_to_plot is not None: values_df = _get_values_to_plot( adata, values_to_plot, gene_names, key=key, gene_symbols=gene_symbols ) title = values_to_plot if values_to_plot == 'logfoldchanges': title = 'log fold change' else: title = values_to_plot.replace("_", " ").replace('pvals', 'p-value') if plot_type == 'dotplot': from .._dotplot import dotplot _pl = dotplot( adata, var_names, groupby, dot_color_df=values_df, return_fig=True, **kwds, ) if title is not None and 'colorbar_title' not in kwds: _pl.legend(colorbar_title=title) elif plot_type == 'matrixplot': from .._matrixplot import matrixplot _pl = matrixplot( adata, var_names, groupby, values_df=values_df, return_fig=True, **kwds ) if title is not None and 'colorbar_title' not in kwds: _pl.legend(title=title) return _fig_show_save_or_axes(_pl, return_fig, show, save) elif plot_type == 'stacked_violin': from .._stacked_violin import stacked_violin _pl = stacked_violin(adata, var_names, groupby, return_fig=True, **kwds) return _fig_show_save_or_axes(_pl, return_fig, show, save) elif plot_type == 'heatmap': from .._anndata import heatmap return heatmap(adata, var_names, groupby, show=show, save=save, **kwds) elif plot_type == 'tracksplot': from .._anndata import tracksplot return tracksplot(adata, var_names, groupby, show=show, save=save, **kwds)
def _rank_genes_groups_plot( adata: AnnData, plot_type: str = 'heatmap', groups: Union[str, Sequence[str]] = None, n_genes: int = 10, groupby: Optional[str] = None, values_to_plot: Optional[str] = None, gene_names: Optional[Union[Sequence[str], Mapping[str, Sequence[str]]]] = None, min_logfoldchange: Optional[float] = None, key: Optional[str] = None, show: Optional[bool] = None, save: Optional[bool] = None, return_fig: Optional[bool] = False, **kwds, ): """\ Common function to call the different rank_genes_groups_* plots """ if key is None: key = 'rank_genes_groups' if groupby is None: groupby = str(adata.uns[key]['params']['groupby']) group_names = adata.uns[key]['names'].dtype.names if groups is None else groups gene_symbols = kwds.get('gene_symbols', None) if gene_names is not None: var_names = gene_names if isinstance(var_names, Mapping): # get a single list of all gene names in the dictionary gene_names = sum([list(x) for x in var_names.values()], []) elif isinstance(var_names, str): gene_names = [var_names] else: gene_names = var_names else: # dict in which each group is the key and the n_genes are the values var_names = {} gene_names = [] for group in group_names: df = rank_genes_groups_df(adata, group, key=key, gene_symbols=gene_symbols) if min_logfoldchange is not None: # select genes with given log_fold change df = df[df.logfoldchanges > min_logfoldchange] if gene_symbols is not None: df['names'] = df[gene_symbols] genes_list = df.names.tolist() if len(genes_list) == 0: logg.warning(f'No genes found for group {group}') continue if n_genes < 0: genes_list = genes_list[n_genes:] else: genes_list = genes_list[:n_genes] var_names[group] = genes_list gene_names.extend(genes_list) # by default add dendrogram to plots kwds.setdefault('dendrogram', True) if plot_type in ['dotplot', 'matrixplot']: # these two types of plots can also # show score, logfoldchange and pvalues, in general any value from rank # genes groups title = None values_df = None if values_to_plot is not None: values_df = _get_values_to_plot( adata, values_to_plot, gene_names, key=key, gene_symbols=gene_symbols ) title = values_to_plot if values_to_plot == 'logfoldchanges': title = 'log fold change' else: title = values_to_plot.replace("_", " ").replace('pvals', 'p-value') if plot_type == 'dotplot': from .._dotplot import dotplot _pl = dotplot( adata, var_names, groupby, dot_color_df=values_df, return_fig=True, **kwds, ) if title is not None and 'colorbar_title' not in kwds: _pl.legend(colorbar_title=title) elif plot_type == 'matrixplot': from .._matrixplot import matrixplot _pl = matrixplot( adata, var_names, groupby, values_df=values_df, return_fig=True, **kwds ) if title is not None and 'colorbar_title' not in kwds: _pl.legend(title=title) return _fig_show_save_or_axes(_pl, return_fig, show, save) elif plot_type == 'stacked_violin': from .._stacked_violin import stacked_violin _pl = stacked_violin(adata, var_names, groupby, return_fig=True, **kwds) return _fig_show_save_or_axes(_pl, return_fig, show, save) elif plot_type == 'heatmap': from .._anndata import heatmap return heatmap(adata, var_names, groupby, show=show, save=save, **kwds) elif plot_type == 'tracksplot': from .._anndata import tracksplot return tracksplot(adata, var_names, groupby, show=show, save=save, **kwds)
32,745
def unpatch_cache(): cache_backends = {cache['BACKEND'] for cache in django_settings.CACHES.values()} for cache_module in cache_backends: cache = import_from_string(cache_module, cache_module) for method in TRACED_METHODS: unpatch_method(cache, method)
def unpatch_cache(): cache_backends = set([cache['BACKEND'] for cache in django_settings.CACHES.values()]) for cache_module in cache_backends: cache = import_from_string(cache_module, cache_module) for method in TRACED_METHODS: unpatch_method(cache, method)
3,590
def map_host_to_project_slug(request): # pylint: disable=too-many-return-statements """ Take the request and map the host to the proper project slug. We check, in order: * The ``HTTP_X_RTD_SLUG`` host header for explicit Project mapping - This sets ``request.rtdheader`` True * The ``PUBLIC_DOMAIN`` where we can use the subdomain as the project name - This sets ``request.subdomain`` True * The hostname without port information, which maps to ``Domain`` objects - This sets ``request.cname`` True * The domain is the canonical one and using HTTPS if supported - This sets ``request.canonicalize`` with the value as the reason """ host = unresolver.get_domain_from_host(request.get_host()) public_domain = unresolver.get_domain_from_host(settings.PUBLIC_DOMAIN) external_domain = unresolver.get_domain_from_host( settings.RTD_EXTERNAL_VERSION_DOMAIN ) # Explicit Project slug being passed in. if "HTTP_X_RTD_SLUG" in request.META: project_slug = request.headers["X-Rtd-Slug"].lower() if Project.objects.filter(slug=project_slug).exists(): request.rtdheader = True log.info('Setting project based on X_RTD_SLUG header.', project_slug=project_slug) return project_slug project_slug, domain_object, external_version_slug = unresolver.unresolve_domain( host ) if not project_slug: # Block domains that look like ours, may be phishing. if external_domain in host or public_domain in host: log.warning("Weird variation on our hostname.", host=host) return render( request, "core/dns-404.html", context={"host": host}, status=400, ) # Some person is CNAMEing to us without configuring a domain - 404. log.debug("CNAME 404.", host=host) return render(request, "core/dns-404.html", context={"host": host}, status=404) # Custom domain. if domain_object: request.cname = True request.domain = domain_object log.debug('Proxito CNAME.', host=host) if domain_object.https and not request.is_secure(): # Redirect HTTP -> HTTPS (302) for this custom domain. log.debug('Proxito CNAME HTTPS Redirect.', host=host) request.canonicalize = constants.REDIRECT_HTTPS # NOTE: consider redirecting non-canonical custom domains to the canonical one # Whether that is another custom domain or the public domain return project_slug # Pull request previews. if external_version_slug: request.external_domain = True request.host_version_slug = external_version_slug log.debug("Proxito External Version Domain.", host=host) return project_slug # Normal doc serving. request.subdomain = True log.debug("Proxito Public Domain.", host=host) if ( Domain.objects.filter(project__slug=project_slug) .filter( canonical=True, https=True, ) .exists() ): log.debug("Proxito Public Domain -> Canonical Domain Redirect.", host=host) request.canonicalize = constants.REDIRECT_CANONICAL_CNAME elif ProjectRelationship.objects.filter(child__slug=project_slug).exists(): log.debug( "Proxito Public Domain -> Subproject Main Domain Redirect.", host=host ) request.canonicalize = constants.REDIRECT_SUBPROJECT_MAIN_DOMAIN return project_slug
def map_host_to_project_slug(request): # pylint: disable=too-many-return-statements """ Take the request and map the host to the proper project slug. We check, in order: * The ``HTTP_X_RTD_SLUG`` host header for explicit Project mapping - This sets ``request.rtdheader`` True * The ``PUBLIC_DOMAIN`` where we can use the subdomain as the project name - This sets ``request.subdomain`` True * The hostname without port information, which maps to ``Domain`` objects - This sets ``request.cname`` True * The domain is the canonical one and using HTTPS if supported - This sets ``request.canonicalize`` with the value as the reason """ host = unresolver.get_domain_from_host(request.get_host()) public_domain = unresolver.get_domain_from_host(settings.PUBLIC_DOMAIN) external_domain = unresolver.get_domain_from_host( settings.RTD_EXTERNAL_VERSION_DOMAIN ) # Explicit Project slug being passed in. if "HTTP_X_RTD_SLUG" in request.META: project_slug = request.headers["X-RTD-Slug"].lower() if Project.objects.filter(slug=project_slug).exists(): request.rtdheader = True log.info('Setting project based on X_RTD_SLUG header.', project_slug=project_slug) return project_slug project_slug, domain_object, external_version_slug = unresolver.unresolve_domain( host ) if not project_slug: # Block domains that look like ours, may be phishing. if external_domain in host or public_domain in host: log.warning("Weird variation on our hostname.", host=host) return render( request, "core/dns-404.html", context={"host": host}, status=400, ) # Some person is CNAMEing to us without configuring a domain - 404. log.debug("CNAME 404.", host=host) return render(request, "core/dns-404.html", context={"host": host}, status=404) # Custom domain. if domain_object: request.cname = True request.domain = domain_object log.debug('Proxito CNAME.', host=host) if domain_object.https and not request.is_secure(): # Redirect HTTP -> HTTPS (302) for this custom domain. log.debug('Proxito CNAME HTTPS Redirect.', host=host) request.canonicalize = constants.REDIRECT_HTTPS # NOTE: consider redirecting non-canonical custom domains to the canonical one # Whether that is another custom domain or the public domain return project_slug # Pull request previews. if external_version_slug: request.external_domain = True request.host_version_slug = external_version_slug log.debug("Proxito External Version Domain.", host=host) return project_slug # Normal doc serving. request.subdomain = True log.debug("Proxito Public Domain.", host=host) if ( Domain.objects.filter(project__slug=project_slug) .filter( canonical=True, https=True, ) .exists() ): log.debug("Proxito Public Domain -> Canonical Domain Redirect.", host=host) request.canonicalize = constants.REDIRECT_CANONICAL_CNAME elif ProjectRelationship.objects.filter(child__slug=project_slug).exists(): log.debug( "Proxito Public Domain -> Subproject Main Domain Redirect.", host=host ) request.canonicalize = constants.REDIRECT_SUBPROJECT_MAIN_DOMAIN return project_slug
50,416
def test_enum_with_comma(): dbc = io.BytesIO(textwrap.dedent(u'''\ BA_DEF_ "example0" ENUM "Val1",","; BA_DEF_ BO_ "example1" ENUM "Val 1","vector_leerstring",""," ","'","(",")","[","]","/","-","|","{","}",";",":","<",">",".","?","!","@","#","$","%","^","&","=","`","~"; BA_DEF_ SG_ "example2" ENUM "Val1",","; BA_DEF_ EV_ "example3" ENUM "Val1",","; BA_DEF_ BU_ "example4" ENUM "Val1",","; BA_DEF_DEF_ "example0" ","; BA_DEF_DEF_ "example1" ","; BA_DEF_DEF_ "example2" ","; BA_DEF_DEF_ "example3" ","; BA_DEF_DEF_ "example4" ",";''').encode('utf-8')) matrix = canmatrix.dbc.load(dbc, dbcImportEncoding="utf8") assert matrix.frameDefines[u'example1'].values == ["Val 1"," ",""," ","'","(",")","[","]","/","-","|","{","}",";",":","<",">",".","?","!","@","#","$","%","^","&","=","`","~"] assert matrix.signalDefines[u'example2'].values == ['Val1', ','] assert matrix.buDefines[u'example4'].values == ['Val1', ',']
def test_enum_with_comma(): dbc = io.BytesIO(textwrap.dedent(u'''\ BA_DEF_ "example0" ENUM "Val1",","; BA_DEF_ BO_ "example1" ENUM "Val 1","vector_leerstring",""," ","'","(",")","[","]","/","-","|","{","}",";",":","<",">",".","?","!","@","#","$","%","^","&","=","`","~"; BA_DEF_ SG_ "example2" ENUM "Val1",","; BA_DEF_ EV_ "example3" ENUM "Val1",","; BA_DEF_ BU_ "example4" ENUM "Val1",","; BA_DEF_DEF_ "example0" ","; BA_DEF_DEF_ "example1" ","; BA_DEF_DEF_ "example2" ","; BA_DEF_DEF_ "example3" ","; BA_DEF_DEF_ "example4" ","; ''').encode('utf-8')) matrix = canmatrix.dbc.load(dbc, dbcImportEncoding="utf8") assert matrix.frameDefines[u'example1'].values == ["Val 1"," ",""," ","'","(",")","[","]","/","-","|","{","}",";",":","<",">",".","?","!","@","#","$","%","^","&","=","`","~"] assert matrix.signalDefines[u'example2'].values == ['Val1', ','] assert matrix.buDefines[u'example4'].values == ['Val1', ',']
39,170
def spectrogram( waveform: Tensor, pad: int, window: Tensor, n_fft: int, hop_length: int, win_length: int, power: Optional[float], normalized: bool, center: bool = True, pad_mode: str = "reflect", onesided: bool = True, return_complex: Optional[bool] = None, ) -> Tensor: r"""Create a spectrogram or a batch of spectrograms from a raw audio signal. The spectrogram can be either magnitude-only or complex. Args: waveform (Tensor): Tensor of audio of dimension `(..., time)` pad (int): Two sided padding of signal window (Tensor): Window tensor that is applied/multiplied to each frame/window n_fft (int): Size of FFT hop_length (int): Length of hop between STFT windows win_length (int): Window size power (float or None): Exponent for the magnitude spectrogram, (must be > 0) e.g., 1 for energy, 2 for power, etc. If None, then the complex spectrum is returned instead. normalized (bool): Whether to normalize by magnitude after stft center (bool, optional): whether to pad :attr:`waveform` on both sides so that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`. Default: ``True`` pad_mode (string, optional): controls the padding method used when :attr:`center` is ``True``. Default: ``"reflect"`` onesided (bool, optional): controls whether to return half of results to avoid redundancy. Default: ``True`` return_complex (bool, optional): Deprecated and not used. Returns: Tensor: Dimension `(..., freq, time)`, freq is ``n_fft // 2 + 1`` and ``n_fft`` is the number of Fourier bins, and time is the number of window hops (n_frame). """ if return_complex is not None: warnings.warn( "`return_complex` argument is now deprecated and is not effective." "`torchaudio.functional.spectrogram(power=None)` always return tensor with " "complex dtype. Please remove the argument in the function call." ) if pad > 0: # TODO add "with torch.no_grad():" back when JIT supports it waveform = torch.nn.functional.pad(waveform, (pad, pad), "constant") # pack batch shape = waveform.size() waveform = waveform.reshape(-1, shape[-1]) # default values are consistent with librosa.core.spectrum._spectrogram spec_f = torch.stft( input=waveform, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode, normalized=False, onesided=onesided, return_complex=True, ) # unpack batch spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:]) if normalized: spec_f /= window.pow(2.).sum().sqrt() if power is not None: if power == 1.0: return spec_f.abs() return spec_f.abs().pow(power) return spec_f
def spectrogram( waveform: Tensor, pad: int, window: Tensor, n_fft: int, hop_length: int, win_length: int, power: Optional[float], normalized: bool, center: bool = True, pad_mode: str = "reflect", onesided: bool = True, return_complex: Optional[bool] = None, ) -> Tensor: r"""Create a spectrogram or a batch of spectrograms from a raw audio signal. The spectrogram can be either magnitude-only or complex. Args: waveform (Tensor): Tensor of audio of dimension `(..., time)` pad (int): Two sided padding of signal window (Tensor): Window tensor that is applied/multiplied to each frame/window n_fft (int): Size of FFT hop_length (int): Length of hop between STFT windows win_length (int): Window size power (float or None): Exponent for the magnitude spectrogram, (must be > 0) e.g., 1 for energy, 2 for power, etc. If None, then the complex spectrum is returned instead. normalized (bool): Whether to normalize by magnitude after stft center (bool, optional): whether to pad :attr:`waveform` on both sides so that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`. Default: ``True`` pad_mode (string, optional): controls the padding method used when :attr:`center` is ``True``. Default: ``"reflect"`` onesided (bool, optional): controls whether to return half of results to avoid redundancy. Default: ``True`` return_complex (bool or None, optional): Deprecated and not used. Returns: Tensor: Dimension `(..., freq, time)`, freq is ``n_fft // 2 + 1`` and ``n_fft`` is the number of Fourier bins, and time is the number of window hops (n_frame). """ if return_complex is not None: warnings.warn( "`return_complex` argument is now deprecated and is not effective." "`torchaudio.functional.spectrogram(power=None)` always return tensor with " "complex dtype. Please remove the argument in the function call." ) if pad > 0: # TODO add "with torch.no_grad():" back when JIT supports it waveform = torch.nn.functional.pad(waveform, (pad, pad), "constant") # pack batch shape = waveform.size() waveform = waveform.reshape(-1, shape[-1]) # default values are consistent with librosa.core.spectrum._spectrogram spec_f = torch.stft( input=waveform, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode, normalized=False, onesided=onesided, return_complex=True, ) # unpack batch spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:]) if normalized: spec_f /= window.pow(2.).sum().sqrt() if power is not None: if power == 1.0: return spec_f.abs() return spec_f.abs().pow(power) return spec_f
3,898
def to_numpy_array( G, nodelist=None, dtype=None, order=None, multigraph_weight=sum, weight="weight", nonedge=0.0, ): """Returns the graph adjacency matrix as a NumPy array. Parameters ---------- G : graph The NetworkX graph used to construct the NumPy array. nodelist : list, optional The rows and columns are ordered according to the nodes in `nodelist`. If `nodelist` is None, then the ordering is produced by G.nodes(). dtype : NumPy data type, optional A valid single NumPy data type used to initialize the array. This must be a simple type such as int or numpy.float64 and not a compound data type (see to_numpy_recarray) If None, then the NumPy default is used. order : {'C', 'F'}, optional Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory. If None, then the NumPy default is used. multigraph_weight : {sum, min, max}, optional An operator that determines how weights in multigraphs are handled. The default is to sum the weights of the multiple edges. weight : string or None optional (default = 'weight') The edge attribute that holds the numerical value used for the edge weight. If an edge does not have that attribute, then the value 1 is used instead. nonedge : float (default = 0.0) The array values corresponding to nonedges are typically set to zero. However, this could be undesirable if there are array values corresponding to actual edges that also have the value zero. If so, one might prefer nonedges to have some other value, such as nan. Returns ------- A : NumPy ndarray Graph adjacency matrix See Also -------- from_numpy_array Notes ----- For directed graphs, entry i,j corresponds to an edge from i to j. Entries in the adjacency matrix are assigned to the weight edge attribute. When an edge does not have a weight attribute, the value of the entry is set to the number 1. For multiple (parallel) edges, the values of the entries are determined by the `multigraph_weight` parameter. The default is to sum the weight attributes for each of the parallel edges. When `nodelist` does not contain every node in `G`, the adjacency matrix is built from the subgraph of `G` that is induced by the nodes in `nodelist`. The convention used for self-loop edges in graphs is to assign the diagonal array entry value to the weight attribute of the edge (or the number 1 if the edge has no weight attribute). If the alternate convention of doubling the edge weight is desired the resulting NumPy array can be modified as follows: >>> import numpy as np >>> G = nx.Graph([(1, 1)]) >>> A = nx.to_numpy_array(G) >>> A array([[1.]]) >>> A[np.diag_indices_from(A)] *= 2 >>> A array([[2.]]) Examples -------- >>> G = nx.MultiDiGraph() >>> G.add_edge(0, 1, weight=2) 0 >>> G.add_edge(1, 0) 0 >>> G.add_edge(2, 2, weight=3) 0 >>> G.add_edge(2, 2) 1 >>> nx.to_numpy_array(G, nodelist=[0, 1, 2]) array([[0., 2., 0.], [1., 0., 0.], [0., 0., 4.]]) """ import numpy as np if nodelist is None: nodelist = list(G) nlen = len(nodelist) # Input validation nodeset = set(nodelist) if nodeset - set(G): raise nx.NetworkXError(f"Node {nodeset - set(G)} in nodelist is not in G") if len(nodeset) < nlen: raise nx.NetworkXError("nodelist contains duplicates.") A = np.full((nlen, nlen), fill_value=nonedge, dtype=dtype, order=order) # Corner cases: empty nodelist or graph without any edges if nlen == 0 or G.number_of_edges() == 0: return A # Map nodes to row/col in matrix idx = dict(zip(nodelist, range(nlen))) G = G.subgraph(nodelist) # TODO: Add separate code paths for graph/multigraphs to speed up # non-multigraph case d = defaultdict(list) for u, v, wt in G.edges(data=weight, default=1.0): d[(idx[u], idx[v])].append(wt) i, j = np.array(list(d.keys())).T # indices wts = [multigraph_weight(ws) for ws in d.values()] # reduced weights # Set array values with advanced indexing A[i, j] = wts if not G.is_directed(): A[j, i] = wts return A
def to_numpy_array( G, nodelist=None, dtype=None, order=None, multigraph_weight=sum, weight="weight", nonedge=0.0, ): """Returns the graph adjacency matrix as a NumPy array. Parameters ---------- G : graph The NetworkX graph used to construct the NumPy array. nodelist : list, optional The rows and columns are ordered according to the nodes in `nodelist`. If `nodelist` is None, then the ordering is produced by G.nodes(). dtype : NumPy data type, optional A valid single NumPy data type used to initialize the array. This must be a simple type such as int or numpy.float64 and not a compound data type (see to_numpy_recarray) If None, then the NumPy default is used. order : {'C', 'F'}, optional Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory. If None, then the NumPy default is used. multigraph_weight : {sum, min, max}, optional An operator that determines how weights in multigraphs are handled. The default is to sum the weights of the multiple edges. weight : string or None optional (default = 'weight') The edge attribute that holds the numerical value used for the edge weight. If an edge does not have that attribute, then the value 1 is used instead. nonedge : float (default = 0.0) The array values corresponding to nonedges are typically set to zero. However, this could be undesirable if there are array values corresponding to actual edges that also have the value zero. If so, one might prefer nonedges to have some other value, such as nan. Returns ------- A : NumPy ndarray Graph adjacency matrix See Also -------- from_numpy_array Notes ----- For directed graphs, entry i,j corresponds to an edge from i to j. Entries in the adjacency matrix are assigned to the weight edge attribute. When an edge does not have a weight attribute, the value of the entry is set to the number 1. For multiple (parallel) edges, the values of the entries are determined by the `multigraph_weight` parameter. The default is to sum the weight attributes for each of the parallel edges. When `nodelist` does not contain every node in `G`, the adjacency matrix is built from the subgraph of `G` that is induced by the nodes in `nodelist`. The convention used for self-loop edges in graphs is to assign the diagonal array entry value to the weight attribute of the edge (or the number 1 if the edge has no weight attribute). If the alternate convention of doubling the edge weight is desired the resulting NumPy array can be modified as follows: >>> import numpy as np >>> G = nx.Graph([(1, 1)]) >>> A = nx.to_numpy_array(G) >>> A array([[1.]]) >>> A[np.diag_indices_from(A)] *= 2 >>> A array([[2.]]) Examples -------- >>> G = nx.MultiDiGraph() >>> G.add_edge(0, 1, weight=2) 0 >>> G.add_edge(1, 0) 0 >>> G.add_edge(2, 2, weight=3) 0 >>> G.add_edge(2, 2) 1 >>> nx.to_numpy_array(G, nodelist=[0, 1, 2]) array([[0., 2., 0.], [1., 0., 0.], [0., 0., 4.]]) """ import numpy as np if nodelist is None: nodelist = list(G) nlen = len(nodelist) # Input validation nodeset = set(nodelist) if nodeset - set(G): raise nx.NetworkXError(f"Nodes {nodeset - set(G)} in nodelist is not in G") if len(nodeset) < nlen: raise nx.NetworkXError("nodelist contains duplicates.") A = np.full((nlen, nlen), fill_value=nonedge, dtype=dtype, order=order) # Corner cases: empty nodelist or graph without any edges if nlen == 0 or G.number_of_edges() == 0: return A # Map nodes to row/col in matrix idx = dict(zip(nodelist, range(nlen))) G = G.subgraph(nodelist) # TODO: Add separate code paths for graph/multigraphs to speed up # non-multigraph case d = defaultdict(list) for u, v, wt in G.edges(data=weight, default=1.0): d[(idx[u], idx[v])].append(wt) i, j = np.array(list(d.keys())).T # indices wts = [multigraph_weight(ws) for ws in d.values()] # reduced weights # Set array values with advanced indexing A[i, j] = wts if not G.is_directed(): A[j, i] = wts return A
30,552
def get_all_incidents(from_date): contents = demisto.executeCommand("getIncidents", {"fromdate": from_date})[0]['Contents'] incidents = contents['data'] size = len(incidents) total = contents['total'] page = 1 while total > size: contents = demisto.executeCommand("getIncidents", {"fromdate": from_date, "page": page})[0]['Contents'] new_incidents = contents['data'] incidents = incidents + new_incidents size = len(incidents) page = page + 1 return incidents
def get_all_incidents(from_date): contents = demisto.executeCommand("getIncidents", {"fromdate": from_date})[0]['Contents'] incidents = contents['data'] size = len(incidents) total = contents['total'] page = 1 while total > size: contents = demisto.executeCommand("getIncidents", {"fromdate": from_date, "page": page})[0]['Contents'] new_incidents = contents['data'] incidents = incidents + new_incidents size = len(incidents) page += 1 return incidents