ext
stringclasses
9 values
sha
stringlengths
40
40
content
stringlengths
3
1.04M
py
b41786cba561f176a009308ce5140bc5039660c3
from pathlib import Path from fastapi import APIRouter, responses from fastapi.responses import FileResponse CWD = Path(__file__).parent WEB_PATH = CWD.parent.joinpath("dist") BASE_HTML = WEB_PATH.joinpath("index.html") router = APIRouter() @router.get("/favicon.ico", include_in_schema=False) def facivon(): return responses.RedirectResponse(url="/mealie/favicon.ico") @router.get("/", include_in_schema=False) def root(): return FileResponse(BASE_HTML) @router.get("/{full_path:path}", include_in_schema=False) def root_plus(full_path): print(full_path) return FileResponse(BASE_HTML)
py
b41788feceb32b9c6d55b39bf50476382ce87fef
# Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution(object): def dfs(self, cur): if cur: ldepth = self.dfs(cur.left) rdepth = self.dfs(cur.right) self.m = max(self.m, ldepth + rdepth) return max(ldepth, rdepth) + 1 else: return 0 def diameterOfBinaryTree(self, root): """ :type root: TreeNode :rtype: int """ self.m = 0 self.dfs(root) return self.m
py
b4178913176b712db6baa0d7b4480e42c569b7cc
# -*- coding: utf-8 -*- import sys from runwith import main if __name__ == '__main__': # pragma: no cover sys.exit(main(sys.argv[1:]))
py
b41789aa92a5950b2b3440c44b3367b8d6663947
''' Spatial Error with Heteroskedasticity and Regimes family of models ''' __author__ = "Luc Anselin [email protected], Pedro V. Amaral [email protected]" import numpy as np import multiprocessing as mp from . import user_output as USER from . import summary_output as SUMMARY from . import utils as UTILS from . import regimes as REGI from .ols import BaseOLS from .twosls import BaseTSLS from .error_sp_het import BaseGM_Error_Het, BaseGM_Endog_Error_Het, get_psi_sigma, get_vc_het, get_vm_het, get_P_hat, get_a1a2, get_vc_het_tsls, get_Omega_GS2SLS from .utils import RegressionPropsY, spdot, set_endog, sphstack, set_warn, sp_att from scipy import sparse as SP from pysal.lib.weights.spatial_lag import lag_spatial from platform import system class GM_Error_Het_Regimes(RegressionPropsY, REGI.Regimes_Frame): """ GMM method for a spatial error model with heteroskedasticity and regimes; based on Arraiz et al :cite:`Arraiz2010`, following Anselin :cite:`Anselin2011`. Parameters ---------- y : array nx1 array for dependent variable x : array Two dimensional array with n rows and one column for each independent (exogenous) variable, excluding the constant regimes : list List of n values with the mapping of each observation to a regime. Assumed to be aligned with 'x'. w : pysal W object Spatial weights object constant_regi: ['one', 'many'] Switcher controlling the constant term setup. It may take the following values: * 'one': a vector of ones is appended to x and held constant across regimes * 'many': a vector of ones is appended to x and considered different per regime (default) cols2regi : list, 'all' Argument indicating whether each column of x should be considered as different per regime or held constant across regimes (False). If a list, k booleans indicating for each variable the option (True if one per regime, False to be held constant). If 'all' (default), all the variables vary by regime. regime_err_sep: boolean If True, a separate regression is run for each regime. regime_lag_sep : boolean Always False, kept for consistency, ignored. max_iter : int Maximum number of iterations of steps 2a and 2b from Arraiz et al. Note: epsilon provides an additional stop condition. epsilon : float Minimum change in lambda required to stop iterations of steps 2a and 2b from Arraiz et al. Note: max_iter provides an additional stop condition. step1c : boolean If True, then include Step 1c from Arraiz et al. vm : boolean If True, include variance-covariance matrix in summary results cores : boolean Specifies if multiprocessing is to be used Default: no multiprocessing, cores = False Note: Multiprocessing may not work on all platforms. name_y : string Name of dependent variable for use in output name_x : list of strings Names of independent variables for use in output name_w : string Name of weights matrix for use in output name_ds : string Name of dataset for use in output name_regimes : string Name of regime variable for use in the output Attributes ---------- summary : string Summary of regression results and diagnostics (note: use in conjunction with the print command) betas : array kx1 array of estimated coefficients u : array nx1 array of residuals e_filtered : array nx1 array of spatially filtered residuals predy : array nx1 array of predicted y values n : integer Number of observations k : integer Number of variables for which coefficients are estimated (including the constant) Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) y : array nx1 array for dependent variable x : array Two dimensional array with n rows and one column for each independent (exogenous) variable, including the constant Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) iter_stop : string Stop criterion reached during iteration of steps 2a and 2b from Arraiz et al. Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) iteration : integer Number of iterations of steps 2a and 2b from Arraiz et al. Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) mean_y : float Mean of dependent variable std_y : float Standard deviation of dependent variable pr2 : float Pseudo R squared (squared correlation between y and ypred) Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) vm : array Variance covariance matrix (kxk) sig2 : float Sigma squared used in computations Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) std_err : array 1xk array of standard errors of the betas Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) z_stat : list of tuples z statistic; each tuple contains the pair (statistic, p-value), where each is a float Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) name_y : string Name of dependent variable for use in output name_x : list of strings Names of independent variables for use in output name_w : string Name of weights matrix for use in output name_ds : string Name of dataset for use in output name_regimes : string Name of regime variable for use in the output title : string Name of the regression method used Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) regimes : list List of n values with the mapping of each observation to a regime. Assumed to be aligned with 'x'. constant_regi: ['one', 'many'] Ignored if regimes=False. Constant option for regimes. Switcher controlling the constant term setup. It may take the following values: * 'one': a vector of ones is appended to x and held constant across regimes * 'many': a vector of ones is appended to x and considered different per regime cols2regi : list, 'all' Ignored if regimes=False. Argument indicating whether each column of x should be considered as different per regime or held constant across regimes (False). If a list, k booleans indicating for each variable the option (True if one per regime, False to be held constant). If 'all', all the variables vary by regime. regime_err_sep : boolean If True, a separate regression is run for each regime. kr : int Number of variables/columns to be "regimized" or subject to change by regime. These will result in one parameter estimate by regime for each variable (i.e. nr parameters per variable) kf : int Number of variables/columns to be considered fixed or global across regimes and hence only obtain one parameter estimate nr : int Number of different regimes in the 'regimes' list multi : dictionary Only available when multiple regressions are estimated, i.e. when regime_err_sep=True and no variable is fixed across regimes. Contains all attributes of each individual regression Examples -------- We first need to import the needed modules, namely numpy to convert the data we read into arrays that ``spreg`` understands and ``pysal`` to perform all the analysis. >>> import numpy as np >>> import pysal.lib Open data on NCOVR US County Homicides (3085 areas) using pysal.lib.io.open(). This is the DBF associated with the NAT shapefile. Note that pysal.lib.io.open() also reads data in CSV format; since the actual class requires data to be passed in as numpy arrays, the user can read their data in using any method. >>> db = pysal.lib.io.open(pysal.lib.examples.get_path("NAT.dbf"),'r') Extract the HR90 column (homicide rates in 1990) from the DBF file and make it the dependent variable for the regression. Note that PySAL requires this to be an numpy array of shape (n, 1) as opposed to the also common shape of (n, ) that other packages accept. >>> y_var = 'HR90' >>> y = np.array([db.by_col(y_var)]).reshape(3085,1) Extract UE90 (unemployment rate) and PS90 (population structure) vectors from the DBF to be used as independent variables in the regression. Other variables can be inserted by adding their names to x_var, such as x_var = ['Var1','Var2','...] Note that PySAL requires this to be an nxj numpy array, where j is the number of independent variables (not including a constant). By default this model adds a vector of ones to the independent variables passed in. >>> x_var = ['PS90','UE90'] >>> x = np.array([db.by_col(name) for name in x_var]).T The different regimes in this data are given according to the North and South dummy (SOUTH). >>> r_var = 'SOUTH' >>> regimes = db.by_col(r_var) Since we want to run a spatial error model, we need to specify the spatial weights matrix that includes the spatial configuration of the observations. To do that, we can open an already existing gal file or create a new one. In this case, we will create one from ``NAT.shp``. >>> w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("NAT.shp")) Unless there is a good reason not to do it, the weights have to be row-standardized so every row of the matrix sums to one. Among other things, this allows to interpret the spatial lag of a variable as the average value of the neighboring observations. In PySAL, this can be easily performed in the following way: >>> w.transform = 'r' We are all set with the preliminaries, we are good to run the model. In this case, we will need the variables and the weights matrix. If we want to have the names of the variables printed in the output summary, we will have to pass them in as well, although this is optional. >>> reg = GM_Error_Het_Regimes(y, x, regimes, w=w, step1c=True, name_y=y_var, name_x=x_var, name_regimes=r_var, name_ds='NAT.dbf') Once we have run the model, we can explore a little bit the output. The regression object we have created has many attributes so take your time to discover them. This class offers an error model that explicitly accounts for heteroskedasticity and that unlike the models from ``spreg.error_sp``, it allows for inference on the spatial parameter. Alternatively, we can have a summary of the output by typing: model.summary >>> print reg.name_x ['0_CONSTANT', '0_PS90', '0_UE90', '1_CONSTANT', '1_PS90', '1_UE90', 'lambda'] >>> np.around(reg.betas, decimals=6) array([[ 0.009121], [ 0.812973], [ 0.549355], [ 5.00279 ], [ 1.200929], [ 0.614681], [ 0.429277]]) >>> np.around(reg.std_err, decimals=6) array([ 0.355844, 0.221743, 0.059276, 0.686764, 0.35843 , 0.092788, 0.02524 ]) """ def __init__(self, y, x, regimes, w, max_iter=1, epsilon=0.00001, step1c=False, constant_regi='many', cols2regi='all', regime_err_sep=False, regime_lag_sep=False, cores=False, vm=False, name_y=None, name_x=None, name_w=None, name_ds=None, name_regimes=None): n = USER.check_arrays(y, x) USER.check_y(y, n) USER.check_weights(w, y, w_required=True) self.constant_regi = constant_regi self.cols2regi = cols2regi self.regime_err_sep = regime_err_sep self.name_ds = USER.set_name_ds(name_ds) self.name_y = USER.set_name_y(name_y) self.name_w = USER.set_name_w(name_w, w) self.name_regimes = USER.set_name_ds(name_regimes) self.n, self.step1c = n, step1c self.y = y x_constant = USER.check_constant(x) name_x = USER.set_name_x(name_x, x) self.name_x_r = name_x cols2regi = REGI.check_cols2regi(constant_regi, cols2regi, x) self.regimes_set = REGI._get_regimes_set(regimes) self.regimes = regimes USER.check_regimes(self.regimes_set, self.n, x.shape[1]) self.regime_err_sep = regime_err_sep if regime_err_sep == True: if set(cols2regi) == set([True]): self._error_regimes_multi(y, x, regimes, w, cores, max_iter, epsilon, step1c, cols2regi, vm, name_x) else: raise Exception("All coefficients must vary accross regimes if regime_err_sep = True.") else: self.x, self.name_x = REGI.Regimes_Frame.__init__(self, x_constant, regimes, constant_regi=None, cols2regi=cols2regi, names=name_x) ols = BaseOLS(y=y, x=self.x) self.k = ols.x.shape[1] wA1 = UTILS.get_A1_het(w.sparse) # 1b. GMM --> \tilde{\lambda1} moments = UTILS._moments2eqs(wA1, w.sparse, ols.u) lambda1 = UTILS.optim_moments(moments) if step1c: # 1c. GMM --> \tilde{\lambda2} sigma = get_psi_sigma(w.sparse, ols.u, lambda1) vc1 = get_vc_het(w.sparse, wA1, sigma) lambda2 = UTILS.optim_moments(moments, vc1) else: lambda2 = lambda1 lambda_old = lambda2 self.iteration, eps = 0, 1 while self.iteration < max_iter and eps > epsilon: # 2a. reg -->\hat{betas} xs = UTILS.get_spFilter(w, lambda_old, x_constant) ys = UTILS.get_spFilter(w, lambda_old, y) xs = REGI.Regimes_Frame.__init__(self, xs, regimes, constant_regi=None, cols2regi=cols2regi)[0] ols_s = BaseOLS(y=ys, x=xs) self.predy = spdot(self.x, ols_s.betas) self.u = self.y - self.predy # 2b. GMM --> \hat{\lambda} sigma_i = get_psi_sigma(w.sparse, self.u, lambda_old) vc_i = get_vc_het(w.sparse, wA1, sigma_i) moments_i = UTILS._moments2eqs(wA1, w.sparse, self.u) lambda3 = UTILS.optim_moments(moments_i, vc_i) eps = abs(lambda3 - lambda_old) lambda_old = lambda3 self.iteration += 1 self.iter_stop = UTILS.iter_msg(self.iteration, max_iter) sigma = get_psi_sigma(w.sparse, self.u, lambda3) vc3 = get_vc_het(w.sparse, wA1, sigma) self.vm = get_vm_het(moments_i[0], lambda3, self, w.sparse, vc3) self.betas = np.vstack((ols_s.betas, lambda3)) self.e_filtered = self.u - lambda3 * lag_spatial(w, self.u) self.title = "SPATIALLY WEIGHTED LEAST SQUARES (HET) - REGIMES" self.name_x.append('lambda') self.kf += 1 self.chow = REGI.Chow(self) self._cache = {} SUMMARY.GM_Error_Het(reg=self, w=w, vm=vm, regimes=True) def _error_regimes_multi(self, y, x, regimes, w, cores, max_iter, epsilon, step1c, cols2regi, vm, name_x): regi_ids = dict( (r, list(np.where(np.array(regimes) == r)[0])) for r in self.regimes_set) results_p = {} """ for r in self.regimes_set: if system() == 'Windows': is_win = True results_p[r] = _work_error(*(y,x,regi_ids,r,w,max_iter,epsilon,step1c,self.name_ds,self.name_y,name_x+['lambda'],self.name_w,self.name_regimes)) else: pool = mp.Pool(cores) results_p[r] = pool.apply_async(_work_error,args=(y,x,regi_ids,r,w,max_iter,epsilon,step1c,self.name_ds,self.name_y,name_x+['lambda'],self.name_w,self.name_regimes, )) is_win = False """ for r in self.regimes_set: if cores: pool = mp.Pool(None) results_p[r] = pool.apply_async(_work_error, args=( y, x, regi_ids, r, w, max_iter, epsilon, step1c, self.name_ds, self.name_y, name_x + ['lambda'], self.name_w, self.name_regimes, )) else: results_p[r] = _work_error(*(y, x, regi_ids, r, w, max_iter, epsilon, step1c, self.name_ds, self.name_y, name_x + ['lambda'], self.name_w, self.name_regimes)) self.kryd = 0 self.kr = len(cols2regi) + 1 self.kf = 0 self.nr = len(self.regimes_set) self.vm = np.zeros((self.nr * self.kr, self.nr * self.kr), float) self.betas = np.zeros((self.nr * self.kr, 1), float) self.u = np.zeros((self.n, 1), float) self.predy = np.zeros((self.n, 1), float) self.e_filtered = np.zeros((self.n, 1), float) """ if not is_win: pool.close() pool.join() """ if cores: pool.close() pool.join() results = {} self.name_y, self.name_x = [], [] counter = 0 for r in self.regimes_set: """ if is_win: results[r] = results_p[r] else: results[r] = results_p[r].get() """ if not cores: results[r] = results_p[r] else: results[r] = results_p[r].get() self.vm[(counter * self.kr):((counter + 1) * self.kr), (counter * self.kr):((counter + 1) * self.kr)] = results[r].vm self.betas[ (counter * self.kr):((counter + 1) * self.kr), ] = results[r].betas self.u[regi_ids[r], ] = results[r].u self.predy[regi_ids[r], ] = results[r].predy self.e_filtered[regi_ids[r], ] = results[r].e_filtered self.name_y += results[r].name_y self.name_x += results[r].name_x counter += 1 self.chow = REGI.Chow(self) self.multi = results SUMMARY.GM_Error_Het_multi( reg=self, multireg=self.multi, vm=vm, regimes=True) class GM_Endog_Error_Het_Regimes(RegressionPropsY, REGI.Regimes_Frame): """ GMM method for a spatial error model with heteroskedasticity, regimes and endogenous variables, with results and diagnostics; based on Arraiz et al :cite:`Arraiz2010`, following Anselin :cite:`Anselin2011`. Parameters ---------- y : array nx1 array for dependent variable x : array Two dimensional array with n rows and one column for each independent (exogenous) variable, excluding the constant yend : array Two dimensional array with n rows and one column for each endogenous variable q : array Two dimensional array with n rows and one column for each external exogenous variable to use as instruments (note: this should not contain any variables from x) regimes : list List of n values with the mapping of each observation to a regime. Assumed to be aligned with 'x'. w : pysal W object Spatial weights object constant_regi: ['one', 'many'] Switcher controlling the constant term setup. It may take the following values: * 'one': a vector of ones is appended to x and held constant across regimes * 'many': a vector of ones is appended to x and considered different per regime (default) cols2regi : list, 'all' Argument indicating whether each column of x should be considered as different per regime or held constant across regimes (False). If a list, k booleans indicating for each variable the option (True if one per regime, False to be held constant). If 'all' (default), all the variables vary by regime. regime_err_sep : boolean If True, a separate regression is run for each regime. regime_lag_sep : boolean Always False, kept for consistency, ignored. max_iter : int Maximum number of iterations of steps 2a and 2b from Arraiz et al. Note: epsilon provides an additional stop condition. epsilon : float Minimum change in lambda required to stop iterations of steps 2a and 2b from Arraiz et al. Note: max_iter provides an additional stop condition. step1c : boolean If True, then include Step 1c from Arraiz et al. inv_method : string If "power_exp", then compute inverse using the power expansion. If "true_inv", then compute the true inverse. Note that true_inv will fail for large n. vm : boolean If True, include variance-covariance matrix in summary results cores : boolean Specifies if multiprocessing is to be used Default: no multiprocessing, cores = False Note: Multiprocessing may not work on all platforms. name_y : string Name of dependent variable for use in output name_x : list of strings Names of independent variables for use in output name_yend : list of strings Names of endogenous variables for use in output name_q : list of strings Names of instruments for use in output name_w : string Name of weights matrix for use in output name_ds : string Name of dataset for use in output name_regimes : string Name of regime variable for use in the output Attributes ---------- summary : string Summary of regression results and diagnostics (note: use in conjunction with the print command) betas : array kx1 array of estimated coefficients u : array nx1 array of residuals e_filtered : array nx1 array of spatially filtered residuals predy : array nx1 array of predicted y values n : integer Number of observations k : integer Number of variables for which coefficients are estimated (including the constant) Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) y : array nx1 array for dependent variable x : array Two dimensional array with n rows and one column for each independent (exogenous) variable, including the constant Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) yend : array Two dimensional array with n rows and one column for each endogenous variable Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) q : array Two dimensional array with n rows and one column for each external exogenous variable used as instruments Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) z : array nxk array of variables (combination of x and yend) Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) h : array nxl array of instruments (combination of x and q) Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) iter_stop : string Stop criterion reached during iteration of steps 2a and 2b from Arraiz et al. Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) iteration : integer Number of iterations of steps 2a and 2b from Arraiz et al. Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) mean_y : float Mean of dependent variable std_y : float Standard deviation of dependent variable vm : array Variance covariance matrix (kxk) pr2 : float Pseudo R squared (squared correlation between y and ypred) Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) std_err : array 1xk array of standard errors of the betas Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) z_stat : list of tuples z statistic; each tuple contains the pair (statistic, p-value), where each is a float Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) name_y : string Name of dependent variable for use in output name_x : list of strings Names of independent variables for use in output name_yend : list of strings Names of endogenous variables for use in output name_z : list of strings Names of exogenous and endogenous variables for use in output name_q : list of strings Names of external instruments name_h : list of strings Names of all instruments used in ouput name_w : string Name of weights matrix for use in output name_ds : string Name of dataset for use in output name_regimes : string Name of regimes variable for use in output title : string Name of the regression method used Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) regimes : list List of n values with the mapping of each observation to a regime. Assumed to be aligned with 'x'. constant_regi : ['one', 'many'] Ignored if regimes=False. Constant option for regimes. Switcher controlling the constant term setup. It may take the following values: * 'one': a vector of ones is appended to x and held constant across regimes * 'many': a vector of ones is appended to x and considered different per regime cols2regi : list, 'all' Ignored if regimes=False. Argument indicating whether each column of x should be considered as different per regime or held constant across regimes (False). If a list, k booleans indicating for each variable the option (True if one per regime, False to be held constant). If 'all', all the variables vary by regime. regime_err_sep : boolean If True, a separate regression is run for each regime. kr : int Number of variables/columns to be "regimized" or subject to change by regime. These will result in one parameter estimate by regime for each variable (i.e. nr parameters per variable) kf : int Number of variables/columns to be considered fixed or global across regimes and hence only obtain one parameter estimate nr : int Number of different regimes in the 'regimes' list multi : dictionary Only available when multiple regressions are estimated, i.e. when regime_err_sep=True and no variable is fixed across regimes. Contains all attributes of each individual regression Examples -------- We first need to import the needed modules, namely numpy to convert the data we read into arrays that ``spreg`` understands and ``pysal`` to perform all the analysis. >>> import numpy as np >>> import pysal.lib Open data on NCOVR US County Homicides (3085 areas) using pysal.lib.io.open(). This is the DBF associated with the NAT shapefile. Note that pysal.lib.io.open() also reads data in CSV format; since the actual class requires data to be passed in as numpy arrays, the user can read their data in using any method. >>> db = pysal.lib.io.open(pysal.lib.examples.get_path("NAT.dbf"),'r') Extract the HR90 column (homicide rates in 1990) from the DBF file and make it the dependent variable for the regression. Note that PySAL requires this to be an numpy array of shape (n, 1) as opposed to the also common shape of (n, ) that other packages accept. >>> y_var = 'HR90' >>> y = np.array([db.by_col(y_var)]).reshape(3085,1) Extract UE90 (unemployment rate) and PS90 (population structure) vectors from the DBF to be used as independent variables in the regression. Other variables can be inserted by adding their names to x_var, such as x_var = ['Var1','Var2','...] Note that PySAL requires this to be an nxj numpy array, where j is the number of independent variables (not including a constant). By default this model adds a vector of ones to the independent variables passed in. >>> x_var = ['PS90','UE90'] >>> x = np.array([db.by_col(name) for name in x_var]).T For the endogenous models, we add the endogenous variable RD90 (resource deprivation) and we decide to instrument for it with FP89 (families below poverty): >>> yd_var = ['RD90'] >>> yend = np.array([db.by_col(name) for name in yd_var]).T >>> q_var = ['FP89'] >>> q = np.array([db.by_col(name) for name in q_var]).T The different regimes in this data are given according to the North and South dummy (SOUTH). >>> r_var = 'SOUTH' >>> regimes = db.by_col(r_var) Since we want to run a spatial error model, we need to specify the spatial weights matrix that includes the spatial configuration of the observations into the error component of the model. To do that, we can open an already existing gal file or create a new one. In this case, we will create one from ``NAT.shp``. >>> w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("NAT.shp")) Unless there is a good reason not to do it, the weights have to be row-standardized so every row of the matrix sums to one. Among other things, this allows to interpret the spatial lag of a variable as the average value of the neighboring observations. In PySAL, this can be easily performed in the following way: >>> w.transform = 'r' We are all set with the preliminaries, we are good to run the model. In this case, we will need the variables (exogenous and endogenous), the instruments and the weights matrix. If we want to have the names of the variables printed in the output summary, we will have to pass them in as well, although this is optional. >>> reg = GM_Endog_Error_Het_Regimes(y, x, yend, q, regimes, w=w, step1c=True, name_y=y_var, name_x=x_var, name_yend=yd_var, name_q=q_var, name_regimes=r_var, name_ds='NAT.dbf') Once we have run the model, we can explore a little bit the output. The regression object we have created has many attributes so take your time to discover them. This class offers an error model that explicitly accounts for heteroskedasticity and that unlike the models from ``spreg.error_sp``, it allows for inference on the spatial parameter. Hence, we find the same number of betas as of standard errors, which we calculate taking the square root of the diagonal of the variance-covariance matrix Alternatively, we can have a summary of the output by typing: model.summary >>> print reg.name_z ['0_CONSTANT', '0_PS90', '0_UE90', '1_CONSTANT', '1_PS90', '1_UE90', '0_RD90', '1_RD90', 'lambda'] >>> print np.around(reg.betas,4) [[ 3.5944] [ 1.065 ] [ 0.1587] [ 9.184 ] [ 1.8784] [-0.2466] [ 2.4617] [ 3.5756] [ 0.2908]] >>> print np.around(np.sqrt(reg.vm.diagonal()),4) [ 0.5043 0.2132 0.0581 0.6681 0.3504 0.0999 0.3686 0.3402 0.028 ] """ def __init__(self, y, x, yend, q, regimes, w, max_iter=1, epsilon=0.00001, step1c=False, constant_regi='many', cols2regi='all', regime_err_sep=False, regime_lag_sep=False, inv_method='power_exp', cores=False, vm=False, name_y=None, name_x=None, name_yend=None, name_q=None, name_w=None, name_ds=None, name_regimes=None, summ=True, add_lag=False): n = USER.check_arrays(y, x, yend, q) USER.check_y(y, n) USER.check_weights(w, y, w_required=True) self.constant_regi = constant_regi self.cols2regi = cols2regi self.name_ds = USER.set_name_ds(name_ds) self.name_regimes = USER.set_name_ds(name_regimes) self.name_w = USER.set_name_w(name_w, w) self.n, self.step1c = n, step1c self.y = y name_x = USER.set_name_x(name_x, x) if summ: name_yend = USER.set_name_yend(name_yend, yend) self.name_y = USER.set_name_y(name_y) name_q = USER.set_name_q(name_q, q) self.name_x_r = name_x + name_yend cols2regi = REGI.check_cols2regi( constant_regi, cols2regi, x, yend=yend) self.regimes_set = REGI._get_regimes_set(regimes) self.regimes = regimes USER.check_regimes(self.regimes_set, self.n, x.shape[1]) self.regime_err_sep = regime_err_sep if regime_err_sep == True: if set(cols2regi) == set([True]): self._endog_error_regimes_multi(y, x, regimes, w, yend, q, cores, max_iter, epsilon, step1c, inv_method, cols2regi, vm, name_x, name_yend, name_q, add_lag) else: raise Exception("All coefficients must vary accross regimes if regime_err_sep = True.") else: x_constant = USER.check_constant(x) q, name_q = REGI.Regimes_Frame.__init__(self, q, regimes, constant_regi=None, cols2regi='all', names=name_q) x, name_x = REGI.Regimes_Frame.__init__(self, x_constant, regimes, constant_regi=None, cols2regi=cols2regi, names=name_x) yend2, name_yend = REGI.Regimes_Frame.__init__(self, yend, regimes, constant_regi=None, cols2regi=cols2regi, yend=True, names=name_yend) # 1a. S2SLS --> \tilde{\delta} tsls = BaseTSLS(y=y, x=x, yend=yend2, q=q) self.k = tsls.z.shape[1] self.x = tsls.x self.yend, self.z, self.h = tsls.yend, tsls.z, tsls.h wA1 = UTILS.get_A1_het(w.sparse) # 1b. GMM --> \tilde{\lambda1} moments = UTILS._moments2eqs(wA1, w.sparse, tsls.u) lambda1 = UTILS.optim_moments(moments) if step1c: # 1c. GMM --> \tilde{\lambda2} self.u = tsls.u zs = UTILS.get_spFilter(w, lambda1, self.z) vc1 = get_vc_het_tsls( w.sparse, wA1, self, lambda1, tsls.pfora1a2, zs, inv_method, filt=False) lambda2 = UTILS.optim_moments(moments, vc1) else: lambda2 = lambda1 lambda_old = lambda2 self.iteration, eps = 0, 1 while self.iteration < max_iter and eps > epsilon: # 2a. reg -->\hat{betas} xs = UTILS.get_spFilter(w, lambda1, x_constant) xs = REGI.Regimes_Frame.__init__(self, xs, regimes, constant_regi=None, cols2regi=cols2regi)[0] ys = UTILS.get_spFilter(w, lambda1, y) yend_s = UTILS.get_spFilter(w, lambda1, yend) yend_s = REGI.Regimes_Frame.__init__(self, yend_s, regimes, constant_regi=None, cols2regi=cols2regi, yend=True)[0] tsls_s = BaseTSLS(ys, xs, yend_s, h=tsls.h) self.predy = spdot(self.z, tsls_s.betas) self.u = self.y - self.predy # 2b. GMM --> \hat{\lambda} vc2 = get_vc_het_tsls( w.sparse, wA1, self, lambda_old, tsls_s.pfora1a2, sphstack(xs, yend_s), inv_method) moments_i = UTILS._moments2eqs(wA1, w.sparse, self.u) lambda3 = UTILS.optim_moments(moments_i, vc2) eps = abs(lambda3 - lambda_old) lambda_old = lambda3 self.iteration += 1 self.iter_stop = UTILS.iter_msg(self.iteration, max_iter) zs = UTILS.get_spFilter(w, lambda3, self.z) P = get_P_hat(self, tsls.hthi, zs) vc3 = get_vc_het_tsls( w.sparse, wA1, self, lambda3, P, zs, inv_method, save_a1a2=True) self.vm = get_Omega_GS2SLS( w.sparse, lambda3, self, moments_i[0], vc3, P) self.betas = np.vstack((tsls_s.betas, lambda3)) self.e_filtered = self.u - lambda3 * lag_spatial(w, self.u) self.name_x = USER.set_name_x(name_x, x, constant=True) self.name_yend = USER.set_name_yend(name_yend, yend) self.name_z = self.name_x + self.name_yend self.name_z.append('lambda') # listing lambda last self.name_q = USER.set_name_q(name_q, q) self.name_h = USER.set_name_h(self.name_x, self.name_q) self.kf += 1 self.chow = REGI.Chow(self) self._cache = {} if summ: self.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES (HET) - REGIMES" SUMMARY.GM_Endog_Error_Het(reg=self, w=w, vm=vm, regimes=True) def _endog_error_regimes_multi(self, y, x, regimes, w, yend, q, cores, max_iter, epsilon, step1c, inv_method, cols2regi, vm, name_x, name_yend, name_q, add_lag): regi_ids = dict( (r, list(np.where(np.array(regimes) == r)[0])) for r in self.regimes_set) if add_lag != False: self.cols2regi += [True] cols2regi += [True] self.predy_e = np.zeros((self.n, 1), float) self.e_pred = np.zeros((self.n, 1), float) results_p = {} """ for r in self.regimes_set: if system() == 'Windows': is_win = True results_p[r] = _work_endog_error(*(y,x,yend,q,regi_ids,r,w,max_iter,epsilon,step1c,inv_method,self.name_ds,self.name_y,name_x,name_yend,name_q,self.name_w,self.name_regimes,add_lag)) else: pool = mp.Pool(cores) results_p[r] = pool.apply_async(_work_endog_error,args=(y,x,yend,q,regi_ids,r,w,max_iter,epsilon,step1c,inv_method,self.name_ds,self.name_y,name_x,name_yend,name_q,self.name_w,self.name_regimes,add_lag, )) is_win = False """ for r in self.regimes_set: if cores: pool = mp.Pool(None) results_p[r] = pool.apply_async(_work_endog_error, args=(y, x, yend, q, regi_ids, r, w, max_iter, epsilon, step1c, inv_method, self.name_ds, self.name_y, name_x, name_yend, name_q, self.name_w, self.name_regimes, add_lag, )) else: results_p[r] = _work_endog_error(*(y, x, yend, q, regi_ids, r, w, max_iter, epsilon, step1c, inv_method, self.name_ds, self.name_y, name_x, name_yend, name_q, self.name_w, self.name_regimes, add_lag)) self.kryd, self.kf = 0, 0 self.kr = len(cols2regi) + 1 self.nr = len(self.regimes_set) self.vm = np.zeros((self.nr * self.kr, self.nr * self.kr), float) self.betas = np.zeros((self.nr * self.kr, 1), float) self.u = np.zeros((self.n, 1), float) self.predy = np.zeros((self.n, 1), float) self.e_filtered = np.zeros((self.n, 1), float) """ if not is_win: pool.close() pool.join() """ if cores: pool.close() pool.join() results = {} self.name_y, self.name_x, self.name_yend, self.name_q, self.name_z, self.name_h = [ ], [], [], [], [], [] counter = 0 for r in self.regimes_set: """ if is_win: results[r] = results_p[r] else: results[r] = results_p[r].get() """ if not cores: results[r] = results_p[r] else: results[r] = results_p[r].get() self.vm[(counter * self.kr):((counter + 1) * self.kr), (counter * self.kr):((counter + 1) * self.kr)] = results[r].vm self.betas[ (counter * self.kr):((counter + 1) * self.kr), ] = results[r].betas self.u[regi_ids[r], ] = results[r].u self.predy[regi_ids[r], ] = results[r].predy self.e_filtered[regi_ids[r], ] = results[r].e_filtered self.name_y += results[r].name_y self.name_x += results[r].name_x self.name_yend += results[r].name_yend self.name_q += results[r].name_q self.name_z += results[r].name_z self.name_h += results[r].name_h if add_lag != False: self.predy_e[regi_ids[r], ] = results[r].predy_e self.e_pred[regi_ids[r], ] = results[r].e_pred counter += 1 self.chow = REGI.Chow(self) self.multi = results if add_lag != False: SUMMARY.GM_Combo_Het_multi( reg=self, multireg=self.multi, vm=vm, regimes=True) else: SUMMARY.GM_Endog_Error_Het_multi( reg=self, multireg=self.multi, vm=vm, regimes=True) class GM_Combo_Het_Regimes(GM_Endog_Error_Het_Regimes): """ GMM method for a spatial lag and error model with heteroskedasticity, regimes and endogenous variables, with results and diagnostics; based on Arraiz et al :cite:`Arraiz2010`, following Anselin :cite:`Anselin2011`. Parameters ---------- y : array nx1 array for dependent variable x : array Two dimensional array with n rows and one column for each independent (exogenous) variable, excluding the constant yend : array Two dimensional array with n rows and one column for each endogenous variable q : array Two dimensional array with n rows and one column for each external exogenous variable to use as instruments (note: this should not contain any variables from x) regimes : list List of n values with the mapping of each observation to a regime. Assumed to be aligned with 'x'. w : pysal W object Spatial weights object (always needed) constant_regi: ['one', 'many'] Switcher controlling the constant term setup. It may take the following values: * 'one': a vector of ones is appended to x and held constant across regimes * 'many': a vector of ones is appended to x and considered different per regime (default) cols2regi : list, 'all' Argument indicating whether each column of x should be considered as different per regime or held constant across regimes (False). If a list, k booleans indicating for each variable the option (True if one per regime, False to be held constant). If 'all' (default), all the variables vary by regime. regime_err_sep : boolean If True, a separate regression is run for each regime. regime_lag_sep : boolean If True, the spatial parameter for spatial lag is also computed according to different regimes. If False (default), the spatial parameter is fixed accross regimes. w_lags : integer Orders of W to include as instruments for the spatially lagged dependent variable. For example, w_lags=1, then instruments are WX; if w_lags=2, then WX, WWX; and so on. lag_q : boolean If True, then include spatial lags of the additional instruments (q). max_iter : int Maximum number of iterations of steps 2a and 2b from Arraiz et al. Note: epsilon provides an additional stop condition. epsilon : float Minimum change in lambda required to stop iterations of steps 2a and 2b from Arraiz et al. Note: max_iter provides an additional stop condition. step1c : boolean If True, then include Step 1c from Arraiz et al. inv_method : string If "power_exp", then compute inverse using the power expansion. If "true_inv", then compute the true inverse. Note that true_inv will fail for large n. vm : boolean If True, include variance-covariance matrix in summary results cores : boolean Specifies if multiprocessing is to be used Default: no multiprocessing, cores = False Note: Multiprocessing may not work on all platforms. name_y : string Name of dependent variable for use in output name_x : list of strings Names of independent variables for use in output name_yend : list of strings Names of endogenous variables for use in output name_q : list of strings Names of instruments for use in output name_w : string Name of weights matrix for use in output name_ds : string Name of dataset for use in output name_regimes : string Name of regime variable for use in the output Attributes ---------- summary : string Summary of regression results and diagnostics (note: use in conjunction with the print command) betas : array kx1 array of estimated coefficients u : array nx1 array of residuals e_filtered : array nx1 array of spatially filtered residuals e_pred : array nx1 array of residuals (using reduced form) predy : array nx1 array of predicted y values predy_e : array nx1 array of predicted y values (using reduced form) n : integer Number of observations k : integer Number of variables for which coefficients are estimated (including the constant) Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) y : array nx1 array for dependent variable x : array Two dimensional array with n rows and one column for each independent (exogenous) variable, including the constant Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) yend : array Two dimensional array with n rows and one column for each endogenous variable Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) q : array Two dimensional array with n rows and one column for each external exogenous variable used as instruments Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) z : array nxk array of variables (combination of x and yend) Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) h : array nxl array of instruments (combination of x and q) Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) iter_stop : string Stop criterion reached during iteration of steps 2a and 2b from Arraiz et al. Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) iteration : integer Number of iterations of steps 2a and 2b from Arraiz et al. Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) mean_y : float Mean of dependent variable std_y : float Standard deviation of dependent variable vm : array Variance covariance matrix (kxk) pr2 : float Pseudo R squared (squared correlation between y and ypred) Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) pr2_e : float Pseudo R squared (squared correlation between y and ypred_e (using reduced form)) Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) std_err : array 1xk array of standard errors of the betas Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) z_stat : list of tuples z statistic; each tuple contains the pair (statistic, p-value), where each is a float Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) name_y : string Name of dependent variable for use in output name_x : list of strings Names of independent variables for use in output name_yend : list of strings Names of endogenous variables for use in output name_z : list of strings Names of exogenous and endogenous variables for use in output name_q : list of strings Names of external instruments name_h : list of strings Names of all instruments used in ouput name_w : string Name of weights matrix for use in output name_ds : string Name of dataset for use in output name_regimes : string Name of regimes variable for use in output title : string Name of the regression method used Only available in dictionary 'multi' when multiple regressions (see 'multi' below for details) regimes : list List of n values with the mapping of each observation to a regime. Assumed to be aligned with 'x'. constant_regi : ['one', 'many'] Ignored if regimes=False. Constant option for regimes. Switcher controlling the constant term setup. It may take the following values: * 'one': a vector of ones is appended to x and held constant across regimes * 'many': a vector of ones is appended to x and considered different per regime cols2regi : list, 'all' Ignored if regimes=False. Argument indicating whether each column of x should be considered as different per regime or held constant across regimes (False). If a list, k booleans indicating for each variable the option (True if one per regime, False to be held constant). If 'all', all the variables vary by regime. regime_err_sep: boolean If True, a separate regression is run for each regime. regime_lag_sep: boolean If True, the spatial parameter for spatial lag is also computed according to different regimes. If False (default), the spatial parameter is fixed accross regimes. kr : int Number of variables/columns to be "regimized" or subject to change by regime. These will result in one parameter estimate by regime for each variable (i.e. nr parameters per variable) kf : int Number of variables/columns to be considered fixed or global across regimes and hence only obtain one parameter estimate nr : int Number of different regimes in the 'regimes' list multi : dictionary Only available when multiple regressions are estimated, i.e. when regime_err_sep=True and no variable is fixed across regimes. Contains all attributes of each individual regression Examples -------- We first need to import the needed modules, namely numpy to convert the data we read into arrays that ``spreg`` understands and ``pysal`` to perform all the analysis. >>> import numpy as np >>> import pysal.lib Open data on NCOVR US County Homicides (3085 areas) using pysal.lib.io.open(). This is the DBF associated with the NAT shapefile. Note that pysal.lib.io.open() also reads data in CSV format; since the actual class requires data to be passed in as numpy arrays, the user can read their data in using any method. >>> db = pysal.lib.io.open(pysal.lib.examples.get_path("NAT.dbf"),'r') Extract the HR90 column (homicide rates in 1990) from the DBF file and make it the dependent variable for the regression. Note that PySAL requires this to be an numpy array of shape (n, 1) as opposed to the also common shape of (n, ) that other packages accept. >>> y_var = 'HR90' >>> y = np.array([db.by_col(y_var)]).reshape(3085,1) Extract UE90 (unemployment rate) and PS90 (population structure) vectors from the DBF to be used as independent variables in the regression. Other variables can be inserted by adding their names to x_var, such as x_var = ['Var1','Var2','...] Note that PySAL requires this to be an nxj numpy array, where j is the number of independent variables (not including a constant). By default this model adds a vector of ones to the independent variables passed in. >>> x_var = ['PS90','UE90'] >>> x = np.array([db.by_col(name) for name in x_var]).T The different regimes in this data are given according to the North and South dummy (SOUTH). >>> r_var = 'SOUTH' >>> regimes = db.by_col(r_var) Since we want to run a spatial combo model, we need to specify the spatial weights matrix that includes the spatial configuration of the observations. To do that, we can open an already existing gal file or create a new one. In this case, we will create one from ``NAT.shp``. >>> w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("NAT.shp")) Unless there is a good reason not to do it, the weights have to be row-standardized so every row of the matrix sums to one. Among other things, this allows to interpret the spatial lag of a variable as the average value of the neighboring observations. In PySAL, this can be easily performed in the following way: >>> w.transform = 'r' We are all set with the preliminaries, we are good to run the model. In this case, we will need the variables and the weights matrix. If we want to have the names of the variables printed in the output summary, we will have to pass them in as well, although this is optional. Example only with spatial lag The Combo class runs an SARAR model, that is a spatial lag+error model. In this case we will run a simple version of that, where we have the spatial effects as well as exogenous variables. Since it is a spatial model, we have to pass in the weights matrix. If we want to have the names of the variables printed in the output summary, we will have to pass them in as well, although this is optional. We can have a summary of the output by typing: model.summary Alternatively, we can check the betas: >>> reg = GM_Combo_Het_Regimes(y, x, regimes, w=w, step1c=True, name_y=y_var, name_x=x_var, name_regimes=r_var, name_ds='NAT') >>> print reg.name_z ['0_CONSTANT', '0_PS90', '0_UE90', '1_CONSTANT', '1_PS90', '1_UE90', '_Global_W_HR90', 'lambda'] >>> print np.around(reg.betas,4) [[ 1.4613] [ 0.9587] [ 0.5658] [ 9.1157] [ 1.1324] [ 0.6518] [-0.4587] [ 0.7174]] This class also allows the user to run a spatial lag+error model with the extra feature of including non-spatial endogenous regressors. This means that, in addition to the spatial lag and error, we consider some of the variables on the right-hand side of the equation as endogenous and we instrument for this. In this case we consider RD90 (resource deprivation) as an endogenous regressor. We use FP89 (families below poverty) for this and hence put it in the instruments parameter, 'q'. >>> yd_var = ['RD90'] >>> yd = np.array([db.by_col(name) for name in yd_var]).T >>> q_var = ['FP89'] >>> q = np.array([db.by_col(name) for name in q_var]).T And then we can run and explore the model analogously to the previous combo: >>> reg = GM_Combo_Het_Regimes(y, x, regimes, yd, q, w=w, step1c=True, name_y=y_var, name_x=x_var, name_yend=yd_var, name_q=q_var, name_regimes=r_var, name_ds='NAT') >>> print reg.name_z ['0_CONSTANT', '0_PS90', '0_UE90', '1_CONSTANT', '1_PS90', '1_UE90', '0_RD90', '1_RD90', '_Global_W_HR90', 'lambda'] >>> print reg.betas [[ 3.41936197] [ 1.04071048] [ 0.16747219] [ 8.85820215] [ 1.847382 ] [-0.24545394] [ 2.43189808] [ 3.61328423] [ 0.03132164] [ 0.29544224]] >>> print np.sqrt(reg.vm.diagonal()) [ 0.53103804 0.20835827 0.05755679 1.00496234 0.34332131 0.10259525 0.3454436 0.37932794 0.07611667 0.07067059] >>> print 'lambda: ', np.around(reg.betas[-1], 4) lambda: [ 0.2954] """ def __init__(self, y, x, regimes, yend=None, q=None, w=None, w_lags=1, lag_q=True, max_iter=1, epsilon=0.00001, step1c=False, cores=False, inv_method='power_exp', constant_regi='many', cols2regi='all', regime_err_sep=False, regime_lag_sep=False, vm=False, name_y=None, name_x=None, name_yend=None, name_q=None, name_w=None, name_ds=None, name_regimes=None): n = USER.check_arrays(y, x) self.step1c = step1c USER.check_y(y, n) USER.check_weights(w, y, w_required=True) name_x = USER.set_name_x(name_x, x, constant=True) self.name_y = USER.set_name_y(name_y) name_yend = USER.set_name_yend(name_yend, yend) name_q = USER.set_name_q(name_q, q) name_q.extend( USER.set_name_q_sp(name_x, w_lags, name_q, lag_q, force_all=True)) cols2regi = REGI.check_cols2regi( constant_regi, cols2regi, x, yend=yend, add_cons=False) self.regimes_set = REGI._get_regimes_set(regimes) self.regimes = regimes USER.check_regimes(self.regimes_set, n, x.shape[1]) self.regime_err_sep = regime_err_sep self.regime_lag_sep = regime_lag_sep if regime_lag_sep == True: if regime_err_sep == False: raise Exception("For spatial combo models, if spatial lag is set by regimes (regime_lag_sep=True), spatial error must also be set by regimes (regime_err_sep=True).") add_lag = [w_lags, lag_q] else: cols2regi += [False] add_lag = False if regime_err_sep == True: raise Exception("For spatial combo models, if spatial error is set by regimes (regime_err_sep=True), all coefficients including lambda (regime_lag_sep=True) must be set by regimes.") yend, q = set_endog(y, x, w, yend, q, w_lags, lag_q) name_yend.append(USER.set_name_yend_sp(self.name_y)) GM_Endog_Error_Het_Regimes.__init__(self, y=y, x=x, yend=yend, q=q, regimes=regimes, w=w, constant_regi=constant_regi, cols2regi=cols2regi, regime_err_sep=regime_err_sep, max_iter=max_iter, epsilon=epsilon, step1c=step1c, inv_method=inv_method, cores=cores, vm=vm, name_y=name_y, name_x=name_x, name_yend=name_yend, name_q=name_q, name_w=name_w, name_ds=name_ds, name_regimes=name_regimes, summ=False, add_lag=add_lag) if regime_err_sep != True: self.rho = self.betas[-2] self.predy_e, self.e_pred, warn = UTILS.sp_att(w, self.y, self.predy, yend[:, -1].reshape(self.n, 1), self.rho) UTILS.set_warn(self, warn) self.regime_lag_sep = regime_lag_sep self.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES (HET) - REGIMES" SUMMARY.GM_Combo_Het(reg=self, w=w, vm=vm, regimes=True) def _work_error(y, x, regi_ids, r, w, max_iter, epsilon, step1c, name_ds, name_y, name_x, name_w, name_regimes): w_r, warn = REGI.w_regime(w, regi_ids[r], r, transform=True) y_r = y[regi_ids[r]] x_r = x[regi_ids[r]] x_constant = USER.check_constant(x_r) model = BaseGM_Error_Het( y_r, x_constant, w_r.sparse, max_iter=max_iter, epsilon=epsilon, step1c=step1c) set_warn(model, warn) model.w = w_r model.title = "SPATIALLY WEIGHTED LEAST SQUARES ESTIMATION (HET) - REGIME %s" % r model.name_ds = name_ds model.name_y = '%s_%s' % (str(r), name_y) model.name_x = ['%s_%s' % (str(r), i) for i in name_x] model.name_w = name_w model.name_regimes = name_regimes return model def _work_endog_error(y, x, yend, q, regi_ids, r, w, max_iter, epsilon, step1c, inv_method, name_ds, name_y, name_x, name_yend, name_q, name_w, name_regimes, add_lag): w_r, warn = REGI.w_regime(w, regi_ids[r], r, transform=True) y_r = y[regi_ids[r]] x_r = x[regi_ids[r]] if yend is not None: yend_r = yend[regi_ids[r]] q_r = q[regi_ids[r]] else: yend_r, q_r = None, None if add_lag != False: yend_r, q_r = set_endog( y_r, x_r, w_r, yend_r, q_r, add_lag[0], add_lag[1]) x_constant = USER.check_constant(x_r) model = BaseGM_Endog_Error_Het(y_r, x_constant, yend_r, q_r, w_r.sparse, max_iter=max_iter, epsilon=epsilon, step1c=step1c, inv_method=inv_method) set_warn(model, warn) if add_lag != False: model.rho = model.betas[-2] model.predy_e, model.e_pred, warn = sp_att(w_r, model.y, model.predy, model.yend[:, -1].reshape(model.n, 1), model.rho) set_warn(model, warn) model.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES (HET) - REGIME %s" % r model.name_ds = name_ds model.name_y = '%s_%s' % (str(r), name_y) model.name_x = ['%s_%s' % (str(r), i) for i in name_x] model.name_yend = ['%s_%s' % (str(r), i) for i in name_yend] model.name_z = model.name_x + model.name_yend + ['lambda'] model.name_q = ['%s_%s' % (str(r), i) for i in name_q] model.name_h = model.name_x + model.name_q model.name_w = name_w model.name_regimes = name_regimes return model def _test(): import doctest start_suppress = np.get_printoptions()['suppress'] np.set_printoptions(suppress=True) doctest.testmod() np.set_printoptions(suppress=start_suppress) if __name__ == '__main__': _test()
py
b4178a8ed1f3a34e7b96606453ccfffc1e5a3231
# Copyright (c) 2018. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ----------------------------------------------------------------------- # # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf import horovod.tensorflow as hvd def float32_variable_storage_getter(getter, name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=True, *args, **kwargs): """ Custom variable getter that forces trainable variables to be stored in float32 precision and then casts them to the half-precision """ storage_dtype = tf.float32 if trainable else dtype variable = getter(name, shape, dtype=storage_dtype, initializer=initializer, regularizer=regularizer, trainable=trainable, *args, **kwargs) if trainable and dtype != tf.float32: variable = tf.cast(variable, dtype) return variable def neural_mf(users, items, model_dtype, nb_users, nb_items, mf_dim, mf_reg, mlp_layer_sizes, mlp_layer_regs, dropout_rate, sigmoid=False): """ Constructs the model graph """ # Check params if len(mlp_layer_sizes) != len(mlp_layer_regs): raise RuntimeError('u dummy, layer_sized != layer_regs') if mlp_layer_sizes[0] % 2 != 0: raise RuntimeError('u dummy, mlp_layer_sizes[0] % 2 != 0') nb_mlp_layers = len(mlp_layer_sizes) # Embeddings user_embed = tf.get_variable( "user_embeddings", shape=[nb_users, mf_dim + mlp_layer_sizes[0] // 2], initializer=tf.initializers.random_normal(mean=0.0, stddev=0.01)) item_embed = tf.get_variable( "item_embeddings", shape=[nb_items, mf_dim + mlp_layer_sizes[0] // 2], initializer=tf.initializers.random_normal(mean=0.0, stddev=0.01)) # Matrix Factorization Embeddings xmfu = tf.nn.embedding_lookup(user_embed[:, :mf_dim], users, partition_strategy='div') xmfi = tf.nn.embedding_lookup(item_embed[:, :mf_dim], items, partition_strategy='div') # MLP Network Embeddings xmlpu = tf.nn.embedding_lookup(user_embed[:, mf_dim:], users, partition_strategy='div') xmlpi = tf.nn.embedding_lookup(item_embed[:, mf_dim:], items, partition_strategy='div') # Enforce model to use fp16 data types when manually enabling mixed precision # (Tensorfow ops will use automatically use the data type of the first input) if model_dtype == tf.float16: xmfu = tf.cast(xmfu, model_dtype) xmfi = tf.cast(xmfi, model_dtype) xmlpu = tf.cast(xmlpu, model_dtype) xmlpi = tf.cast(xmlpi, model_dtype) # Matrix Factorization xmf = tf.math.multiply(xmfu, xmfi) # MLP Layers xmlp = tf.concat((xmlpu, xmlpi), 1) for i in range(1, nb_mlp_layers): xmlp = tf.layers.Dense( mlp_layer_sizes[i], activation=tf.nn.relu, kernel_initializer=tf.glorot_uniform_initializer() ).apply(xmlp) xmlp = tf.layers.Dropout(rate=dropout_rate).apply(xmlp) # Final fully-connected layer logits = tf.concat((xmf, xmlp), 1) logits = tf.layers.Dense( 1, kernel_initializer=tf.keras.initializers.lecun_uniform() ).apply(logits) if sigmoid: logits = tf.math.sigmoid(logits) # Cast model outputs back to float32 if manually enabling mixed precision for loss calculation if model_dtype == tf.float16: logits = tf.cast(logits, tf.float32) return logits def compute_eval_metrics(logits, dup_mask, val_batch_size, K): """ Constructs the graph to compute Hit Rate and NDCG """ # Replace duplicate (uid, iid) pairs with -inf logits = logits * (1. - dup_mask) logits = logits + (dup_mask * logits.dtype.min) # Reshape tensors so that each row corresponds with a user logits_by_user = tf.reshape(logits, [-1, val_batch_size]) dup_mask_by_user = tf.cast(tf.reshape(logits, [-1, val_batch_size]), tf.bool) # Get the topk items for each user top_item_indices = tf.math.top_k(logits_by_user, K)[1] # Check that the positive sample (last index) is in the top K is_positive = tf.cast(tf.equal(top_item_indices, val_batch_size-1), tf.int32) found_positive = tf.reduce_sum(is_positive, axis=1) # Extract the rankings of the positive samples positive_ranks = tf.reduce_sum(is_positive * tf.expand_dims(tf.range(K), 0), axis=1) dcg = tf.log(2.) / tf.log(tf.cast(positive_ranks, tf.float32) + 2) dcg *= tf.cast(found_positive, dcg.dtype) return found_positive, dcg def ncf_model_ops(users, items, labels, dup_mask, params, mode='TRAIN'): """ Constructs the training and evaluation graphs """ # Validation params val_batch_size = params['val_batch_size'] K = params['top_k'] # Training params learning_rate = params['learning_rate'] beta_1 = params['beta_1'] beta_2 = params['beta_2'] epsilon = params['epsilon'] # Model params fp16 = params['fp16'] nb_users = params['num_users'] nb_items = params['num_items'] mf_dim = params['num_factors'] mf_reg = params['mf_reg'] mlp_layer_sizes = params['layer_sizes'] mlp_layer_regs = params['layer_regs'] dropout = params['dropout'] sigmoid = False #params['sigmoid'] loss_scale = params['loss_scale'] model_dtype = tf.float16 if fp16 else tf.float32 # If manually enabling mixed precision, use the custom variable getter custom_getter = None if not fp16 else float32_variable_storage_getter # Allow soft device placement with tf.device(None), \ tf.variable_scope('neumf', custom_getter=custom_getter): # Model graph logits = neural_mf( users, items, model_dtype, nb_users, nb_items, mf_dim, mf_reg, mlp_layer_sizes, mlp_layer_regs, dropout, sigmoid ) logits = tf.squeeze(logits) if mode == 'INFERENCE': return logits # Evaluation Ops found_positive, dcg = compute_eval_metrics(logits, dup_mask, val_batch_size, K) # Metrics hit_rate = tf.metrics.mean(found_positive, name='hit_rate') ndcg = tf.metrics.mean(dcg, name='ndcg') eval_op = tf.group(hit_rate[1], ndcg[1]) if mode == 'EVAL': return hit_rate[0], ndcg[0], eval_op, None # Labels labels = tf.reshape(labels, [-1, 1]) logits = tf.reshape(logits, [-1, 1]) # Use adaptive momentum optimizer optimizer = tf.train.AdamOptimizer( learning_rate=learning_rate, beta1=beta_1, beta2=beta_2, epsilon=epsilon) loss = tf.losses.sigmoid_cross_entropy( labels, logits, reduction=tf.losses.Reduction.MEAN) # Apply loss scaling if manually enabling mixed precision if fp16: if loss_scale is None: loss_scale_manager = tf.contrib.mixed_precision.ExponentialUpdateLossScaleManager(2**32, 1000) else: loss_scale_manager = tf.contrib.mixed_precision.FixedLossScaleManager(loss_scale) optimizer = tf.contrib.mixed_precision.LossScaleOptimizer(optimizer, loss_scale_manager) # Horovod wrapper for distributed training optimizer = hvd.DistributedOptimizer(optimizer) # Update ops global_step = tf.train.get_global_step() train_op = optimizer.minimize(loss, global_step=global_step) return hit_rate[0], ndcg[0], eval_op, train_op
py
b4178b255cbfc9844ae78f2f2f4d40fd53b133c9
from firewall import Firewall def version(): VERSION = '1.0.0' return "Assimilator Tools - Version: {0}".format(VERSION)
py
b4178b7a1f4ddc761a146a15cf003f4f088e76b9
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License" # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from abc import abstractmethod import paddle import paddle.nn as nn import paddle.nn.functional as F from ..builder import build_loss from paddlevideo.utils import get_logger, get_dist_info logger = get_logger("paddlevideo") class BaseHead(nn.Layer): """Base class for head part. All head should subclass it. All subclass should overwrite: - Methods: ```init_weights```, initializing weights. - Methods: ```forward```, forward function. Args: num_classes (int): The number of classes to be classified. in_channels (int): The number of channels in input feature. loss_cfg (dict): Config for building loss. Default: dict(type='CrossEntropyLoss'). ls_eps (float): label smoothing epsilon. Default: 0. . """ def __init__( self, num_classes, in_channels, loss_cfg=dict( name="CrossEntropyLoss" ), #TODO(shipping): only pass a name or standard build cfg format. #multi_class=False, NOTE(shipping): not supported now. ls_eps=0.): super().__init__() self.num_classes = num_classes self.in_channels = in_channels self.loss_func = build_loss(loss_cfg) #self.multi_class = multi_class NOTE(shipping): not supported now self.ls_eps = ls_eps @abstractmethod def init_weights(self): """Initiate the parameters. """ raise NotImplemented @abstractmethod def forward(self, x): """Define how the head is going to run. """ raise NotImplemented def loss(self, scores, labels, valid_mode=False, **kwargs): """Calculate the loss accroding to the model output ```scores```, and the target ```labels```. Args: scores (paddle.Tensor): The output of the model. labels (paddle.Tensor): The target output of the model. Returns: losses (dict): A dict containing field 'loss'(mandatory) and 'top1_acc', 'top5_acc'(optional). """ if len(labels) == 1: #commonly case labels = labels[0] losses = dict() if self.ls_eps != 0. and not valid_mode: # label_smooth loss = self.label_smooth_loss(scores, labels, **kwargs) else: loss = self.loss_func(scores, labels, **kwargs) top1, top5 = self.get_acc(scores, labels, valid_mode) losses['top1'] = top1 losses['top5'] = top5 losses['loss'] = loss return losses elif len(labels) == 3: # mix_up labels_a, labels_b, lam = labels lam = lam[0] # get lam value losses = dict() if self.ls_eps != 0: loss_a = self.label_smooth_loss(scores, labels_a, **kwargs) loss_b = self.label_smooth_loss(scores, labels_b, **kwargs) else: loss_a = self.loss_func(scores, labels_a, **kwargs) loss_b = self.loss_func(scores, labels_b, **kwargs) loss = lam * loss_a + (1 - lam) * loss_b top1a, top5a = self.get_acc(scores, labels_a, valid_mode) top1b, top5b = self.get_acc(scores, labels_b, valid_mode) top1 = lam * top1a + (1 - lam) * top1b top5 = lam * top5a + (1 - lam) * top5b losses['top1'] = top1 losses['top5'] = top5 losses['loss'] = loss return losses else: raise NotImplemented def label_smooth_loss(self, scores, labels, **kwargs): labels = F.one_hot(labels, self.num_classes) labels = F.label_smooth(labels, epsilon=self.ls_eps) labels = paddle.squeeze(labels, axis=1) loss = self.loss_func(scores, labels, soft_label=True, **kwargs) return loss def get_acc(self, scores, labels, valid_mode): top1 = paddle.metric.accuracy(input=scores, label=labels, k=1) top5 = paddle.metric.accuracy(input=scores, label=labels, k=5) _, world_size = get_dist_info() #NOTE(shipping): deal with multi cards validate if world_size > 1 and valid_mode: #reduce sum when valid top1 = paddle.distributed.all_reduce( top1, op=paddle.distributed.ReduceOp.SUM) / world_size top5 = paddle.distributed.all_reduce( top5, op=paddle.distributed.ReduceOp.SUM) / world_size return top1, top5
py
b4178ba0b4cacd150e39ef00cc57de8baca7db9b
import csv import logging import sys from collections import OrderedDict from pathlib import Path from typing import Any, Dict, List import rapidjson from colorama import Fore, Style from colorama import init as colorama_init from tabulate import tabulate from finrl.config import setup_utils_configuration from finrl.constants import USERPATH_HYPEROPTS, USERPATH_STRATEGIES from finrl.exceptions import OperationalException from finrl.exchange import available_exchanges, ccxt_exchanges, market_is_active from finrl.misc import plural from finrl.resolvers import ExchangeResolver from finrl.state import RunMode logger = logging.getLogger(__name__) """ TODO MAKE LIST AGENTS, LIST MODELS, LIST ENVIRONMENTS ARGS_LIST_EXCHANGES = ["print_one_column", "list_exchanges_all"] ARGS_LIST_TIMEFRAMES = ["exchange", "print_one_column"] ARGS_LIST_PAIRS = ["exchange", "print_list", "list_pairs_print_json", "print_one_column", "print_csv", "base_currencies", "quote_currencies", "list_pairs_all"] ARGS_TEST_PAIRLIST = ["config", "quote_currencies", "print_one_column", "list_pairs_print_json"] """ def start_list_exchanges(args: Dict[str, Any]) -> None: """ Print available exchanges param args: Cli args from Arguments() return: None """ exchanges = ccxt_exchanges() if args['list_exchanges_all'] else available_exchanges() if args['print_one_column']: print('\n'.join(exchanges)) else: if args['list_exchanges_all']: print(f"All exchanges supported by the ccxt library: {', '.join(exchanges)}") else: print(f"Exchanges available for Freqtrade: {', '.join(exchanges)}") def _print_objs_tabular(objs: List, print_colorized: bool) -> None: if print_colorized: colorama_init(autoreset=True) red = Fore.RED yellow = Fore.YELLOW reset = Style.RESET_ALL else: red = ''; yellow = ''; reset = '' names = [s['name'] for s in objs] objss_to_print = [{'name': s['name'] if s['name'] else "--", 'location': s['location'].name, 'status': (red + "LOAD FAILED" + reset if s['class'] is None else "OK" if names.count(s['name']) == 1 else yellow + "DUPLICATE NAME" + reset) } for s in objs] print(tabulate(objss_to_print, headers='keys', tablefmt='psql', stralign='right')) def start_list_timeframes(args: Dict[str, Any]) -> None: """ Print ticker intervals (timeframes) available on Exchange """ config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE) # Do not use timeframe set in the config config['timeframe'] = None # Init exchange exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False) if args['print_one_column']: print('\n'.join(exchange.timeframes)) else: print(f"Timeframes available for the exchange `{exchange.name}`: " f"{', '.join(exchange.timeframes)}") def start_list_markets(args: Dict[str, Any], pairs_only: bool = False) -> None: """ Print pairs/markets on the exchange param args: Cli args from Arguments() param pairs_only: if True print only pairs, otherwise print all instruments (markets) return: None """ config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE) # Init exchange exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False) # By default only active pairs/markets are to be shown active_only = not args.get('list_pairs_all', False) base_currencies = args.get('base_currencies', []) quote_currencies = args.get('quote_currencies', []) try: pairs = exchange.get_markets(base_currencies=base_currencies, quote_currencies=quote_currencies, pairs_only=pairs_only, active_only=active_only) # Sort the pairs/markets by symbol pairs = OrderedDict(sorted(pairs.items())) except Exception as e: raise OperationalException(f"Cannot get markets. Reason: {e}") from e else: summary_str = ((f"Exchange {exchange.name} has {len(pairs)}") + (" active " if active_only else "") + (plural(len(pairs), "pair" if pairs_only else "market")) + (f" with {', '.join(base_currencies)} as base " f"{plural(len(base_currencies), 'currency', 'currencies')}" if base_currencies else "") + (" and" if base_currencies and quote_currencies else "") + (f" with {', '.join(quote_currencies)} as quote " f"{plural(len(quote_currencies), 'currency', 'currencies')}" if quote_currencies else "") ) headers = ["Id", "Symbol", "Base", "Quote", "Active", *(['Is pair'] if not pairs_only else [])] tabular_data = [] for _, v in pairs.items(): tabular_data.append({'Id': v['id'], 'Symbol': v['symbol'], 'Base': v['base'], 'Quote': v['quote'], 'Active': market_is_active(v), **({'Is pair': exchange.market_is_tradable(v)} if not pairs_only else {}) }) if (args.get('print_one_column', False) or args.get('list_pairs_print_json', False) or args.get('print_csv', False)): # Print summary string in the log in case of machine-readable # regular formats. logger.info(f"{summary_str}.") else: # Print empty string separating leading logs and output in case of # human-readable formats. print() if len(pairs): if args.get('print_list', False): # print data as a list, with human-readable summary print(f"{summary_str}: {', '.join(pairs.keys())}.") elif args.get('print_one_column', False): print('\n'.join(pairs.keys())) elif args.get('list_pairs_print_json', False): print(rapidjson.dumps(list(pairs.keys()), default=str)) elif args.get('print_csv', False): writer = csv.DictWriter(sys.stdout, fieldnames=headers) writer.writeheader() writer.writerows(tabular_data) else: # print data as a table, with the human-readable summary print(f"{summary_str}:") print(tabulate(tabular_data, headers='keys', tablefmt='psql', stralign='right')) elif not (args.get('print_one_column', False) or args.get('list_pairs_print_json', False) or args.get('print_csv', False)): print(f"{summary_str}.") def start_show_trades(args: Dict[str, Any]) -> None: """ Show trades """ import json from freqtrade.persistence import Trade, init_db config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE) if 'db_url' not in config: raise OperationalException("--db-url is required for this command.") logger.info(f'Using DB: "{config["db_url"]}"') init_db(config['db_url'], clean_open_orders=False) tfilter = [] if config.get('trade_ids'): tfilter.append(Trade.id.in_(config['trade_ids'])) trades = Trade.get_trades(tfilter).all() logger.info(f"Printing {len(trades)} Trades: ") if config.get('print_json', False): print(json.dumps([trade.to_json() for trade in trades], indent=4)) else: for trade in trades: print(trade)
py
b4178c033fadaaa54263262f1f9ebfd5506fa5e6
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Initialisation de listes Il existe plusieurs façon de initialisater des listes """ # on peut créer un liste f(x) = x^2 de plusieurs manières # en utilisant la méthode append squares = [] for x in range(10): squares.append(x**2) print(squares) # avec lambda squares = list(map(lambda x: x**2, range(10))) print(squares) # ou plus lisible squares = [x**2 for x in range(10)] print(squares) # On peut imbriquer des boucles tuples = [(x, y) for x in [1, 3, 5] for y in [3, 4, 5] if x != y] print(tuples)
py
b4178d1d53725ca12fda2007070d3aac0ff53748
# Copyright 2017 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains code related to lifecycle management of Kubernetes Pods.""" import json, yaml import logging import posixpath import os from perfkitbenchmarker import context from perfkitbenchmarker import disk from perfkitbenchmarker import errors from perfkitbenchmarker import flags from perfkitbenchmarker import kubernetes_helper from perfkitbenchmarker import providers from perfkitbenchmarker import virtual_machine, linux_virtual_machine from perfkitbenchmarker import vm_util from perfkitbenchmarker.providers.aws import aws_virtual_machine from perfkitbenchmarker.providers.azure import azure_virtual_machine from perfkitbenchmarker.providers.gcp import gce_virtual_machine from perfkitbenchmarker.providers.kubernetes import kubernetes_disk from perfkitbenchmarker.vm_util import OUTPUT_STDOUT as STDOUT FLAGS = flags.FLAGS UBUNTU_IMAGE = 'ubuntu-upstart' SELECTOR_PREFIX = 'pkb' class KubernetesVirtualMachine(virtual_machine.BaseVirtualMachine): """ Object representing a Kubernetes POD. """ CLOUD = providers.KUBERNETES DEFAULT_IMAGE = None CONTAINER_COMMAND = None HOME_DIR = '/root' def __init__(self, vm_spec): """Initialize a Kubernetes virtual machine. Args: vm_spec: KubernetesPodSpec object of the vm. """ super(KubernetesVirtualMachine, self).__init__(vm_spec) self.num_scratch_disks = 0 self.name = self.name.replace('_', '-') self.user_name = FLAGS.username self.image = self.image or self.DEFAULT_IMAGE self.resource_limits = vm_spec.resource_limits self.resource_requests = vm_spec.resource_requests self.instances = [] self.deleteResource = False def GetResourceMetadata(self): metadata = super(KubernetesVirtualMachine, self).GetResourceMetadata() if self.resource_limits: metadata.update({ 'pod_cpu_limit': self.resource_limits.cpus, 'pod_memory_limit_mb': self.resource_limits.memory, }) if self.resource_requests: metadata.update({ 'pod_cpu_request': self.resource_requests.cpus, 'pod_memory_request_mb': self.resource_requests.memory, }) return metadata def _CreateDependencies(self): self._CheckPrerequisites() self._CreateVolumes() def _DeleteDependencies(self): self._DeleteVolumes() def _Create(self): try: self._CreatePod() except Exception as e: logging.info("Create Failed") self._WaitForPodBootCompletion() @vm_util.Retry() def _PostCreate(self): self._GetInternalIp() self._ConfigureProxy() self._SetupDevicesPaths() def _Delete(self): self.deleteResource = True self._DeletePod() def _CheckPrerequisites(self): """ Exits if any of the prerequisites is not met. """ if not FLAGS.kubectl: raise Exception('Please provide path to kubectl tool using --kubectl ' 'flag. Exiting.') if not FLAGS.kubeconfig: raise Exception('Please provide path to kubeconfig using --kubeconfig ' 'flag. Exiting.') if self.disk_specs and self.disk_specs[0].disk_type == disk.STANDARD: if not FLAGS.ceph_monitors: raise Exception('Please provide a list of Ceph Monitors using ' '--ceph_monitors flag.') def _CreatePod(self): """ Creates a POD (Docker container with optional volumes). """ #create_rc_body = self._BuildPodBody() #logging.info('About to create a pod with the following configuration:') try: logging.info("Name: "+self.name) ''' files = [] for r, d, f in os.walk(FLAGS.kube_config_dir): for file in f: if '.yml' in file: files.append(os.path.join(r, file)) for f in files: logging.info("LOAD: "+f) content = open(f).read() kubernetes_helper.CreateResource(content) ''' create_cmd = [FLAGS.kubectl, '--kubeconfig=%s' % FLAGS.kubeconfig, 'apply', '-k', FLAGS.kube_config_dir] pod_info, _, _w = vm_util.IssueCommand(create_cmd, suppress_warning=True) ''' logging.info("LOAD: "+FLAGS.kube_db_controller) dbctrl = open(FLAGS.kube_db_controller).read() kubernetes_helper.CreateResource(dbctrl) logging.info("LOAD: "+FLAGS.kube_db_service) dbserv = open(FLAGS.kube_db_service).read() kubernetes_helper.CreateResource(dbserv) logging.info("LOAD: "+FLAGS.kube_web_controller) webctrl = open(FLAGS.kube_web_controller).read() kubernetes_helper.CreateResource(webctrl) logging.info("LOAD: "+FLAGS.kube_web_service) webserv = open(FLAGS.kube_web_service).read() kubernetes_helper.CreateResource(webserv) ''' except Exception as e: logging.info("EE"+str(e)) @vm_util.Retry(poll_interval=10, max_retries=100, log_errors=False) def _WaitForPodBootCompletion(self): """ Need to wait for the PODs to get up - PODs are created with a little delay. """ exists_cmd = [FLAGS.kubectl, '--kubeconfig=%s' % FLAGS.kubeconfig, 'get', 'pod', "-o", "json"] logging.info('Waiting for POD %s' % self.name) pod_info, _, _w = vm_util.IssueCommand(exists_cmd, suppress_warning=True) if pod_info: pod_info = json.loads(pod_info) all_ready = True self.instances = pod_info["items"] for item in pod_info["items"]: pod_status = item['status']['phase'] logging.info("POD STATUS: "+pod_status) if pod_status != "Running": all_ready = False break #TODO implement to support multiple clients name = item["metadata"]["name"] if name.startswith(FLAGS.kube_ctrl_name): self.name = name if all_ready: logging.info('PODs are up and running.') return #containers = pod_info['spec']['containers'] #print containers #if len(containers) == 1: # pod_status = pod_info['status']['phase'] # if (containers[0]['name'].startswith(self.name) # and pod_status == 'Running'): # logging.info('POD is up and running.') # return raise Exception('POD %s is not running. Retrying to check status.' % self.name) def _DeletePod(self): """ Deletes a POD. """ delete_pod = [FLAGS.kubectl, '--kubeconfig=%s' % FLAGS.kubeconfig, 'delete', 'pod', self.name] #TODO uncomment # output = vm_util.IssueCommand(delete_pod) #logging.info(output[STDOUT].rstrip()) @vm_util.Retry(poll_interval=10, max_retries=20) def _Exists(self): """ POD should have been already created but this is a double check. """ exists_cmd = [FLAGS.kubectl, '--kubeconfig=%s' % FLAGS.kubeconfig, 'get', 'pod', '-o=json', self.name] pod_info, _, _ = vm_util.IssueCommand(exists_cmd, suppress_warning=True) # TODO delete this mockup result if self.deleteResource: return False if pod_info: return True return False def _CreateVolumes(self): """ Creates volumes for scratch disks. These volumes have to be created BEFORE containers creation because Kubernetes doesn't allow to attach volume to currently running containers. """ self.scratch_disks = kubernetes_disk.CreateDisks(self.disk_specs, self.name) @vm_util.Retry(poll_interval=10, max_retries=20, log_errors=False) def _DeleteVolumes(self): """ Deletes volumes. """ for scratch_disk in self.scratch_disks[:]: scratch_disk.Delete() self.scratch_disks.remove(scratch_disk) def DeleteScratchDisks(self): pass def _GetInternalIp(self): """ Gets the POD's internal ip address. """ ext_ip = kubernetes_helper.Get('services', FLAGS.kube_service_name, '', '.status.loadBalancer.ingress[0].ip') pod_ip = kubernetes_helper.Get('services', FLAGS.kube_service_name, '', '.spec.clusterIP') if not pod_ip: raise Exception('Internal POD IP address not found. Retrying.') if not ext_ip: raise Exception("External IP address not found") self.ip_address = ext_ip self.internal_ip = pod_ip def _ConfigureProxy(self): """ In Docker containers environment variables from /etc/environment are not sourced - this results in connection problems when running behind proxy. Prepending proxy environment variables to bashrc solves the problem. Note: APPENDING to bashrc will not work because the script exits when it is NOT executed in interactive shell. """ if FLAGS.http_proxy: http_proxy = 'sed -i \'1i export http_proxy=%s\' /etc/bash.bashrc' self.RemoteCommand(http_proxy % FLAGS.http_proxy) if FLAGS.https_proxy: https_proxy = 'sed -i \'1i export https_proxy=%s\' /etc/bash.bashrc' self.RemoteCommand(https_proxy % FLAGS.http_proxy) if FLAGS.ftp_proxy: ftp_proxy = 'sed -i \'1i export ftp_proxy=%s\' /etc/bash.bashrc' self.RemoteCommand(ftp_proxy % FLAGS.ftp_proxy) def _SetupDevicesPaths(self): """ Sets the path to each scratch disk device. """ for scratch_disk in self.scratch_disks: scratch_disk.SetDevicePath(self) def _BuildPodBody(self): """ Builds a JSON which will be passed as a body of POST request to Kuberneres API in order to create a POD. """ container = self._BuildContainerBody() volumes = self._BuildVolumesBody() template = { 'kind': 'Pod', 'apiVersion': 'v1', 'metadata': { 'name': self.name, 'labels': { SELECTOR_PREFIX: self.name } }, 'spec': { 'volumes': volumes, 'containers': [container], 'dnsPolicy': 'ClusterFirst', } } if FLAGS.kubernetes_anti_affinity: template['spec']['affinity'] = { 'podAntiAffinity': { 'requiredDuringSchedulingIgnoredDuringExecution': [{ 'labelSelector': { 'matchExpressions': [{ 'key': 'pkb_anti_affinity', 'operator': 'In', 'values': [''], }], }, 'topologyKey': 'kubernetes.io/hostname', }], }, } template['metadata']['labels']['pkb_anti_affinity'] = '' return json.dumps(template) def _BuildVolumesBody(self): """ Constructs volumes-related part of POST request to create POD. """ volumes = [] for scratch_disk in self.scratch_disks: scratch_disk.AttachVolumeInfo(volumes) return volumes def _BuildContainerBody(self): """ Constructs containers-related part of POST request to create POD. """ registry = getattr(context.GetThreadBenchmarkSpec(), 'registry', None) if (not FLAGS.static_container_image and registry is not None): image = registry.GetFullRegistryTag(self.image) else: image = self.image container = { 'image': image, 'name': self.name, 'workingDir': self.HOME_DIR, 'securityContext': { 'privileged': FLAGS.docker_in_privileged_mode }, 'volumeMounts': [ ] } for scratch_disk in self.scratch_disks: scratch_disk.AttachVolumeMountInfo(container['volumeMounts']) resource_body = self._BuildResourceBody() if resource_body: container['resources'] = resource_body if self.CONTAINER_COMMAND: container['command'] = self.CONTAINER_COMMAND return container def _BuildResourceBody(self): """Constructs a dictionary that specifies resource limits and requests. The syntax for including GPUs is specific to GKE and is likely to change in the future. See https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus Returns: kubernetes pod resource body containing pod limits and requests. """ resources = { 'limits': {}, 'requests': {}, } if self.resource_requests: resources['requests'].update({ 'cpu': str(self.resource_requests.cpus), 'memory': '{0}Mi'.format(self.resource_requests.memory), }) if self.resource_limits: resources['limits'].update({ 'cpu': str(self.resource_limits.cpus), 'memory': '{0}Mi'.format(self.resource_limits.memory), }) if self.gpu_count: gpu_dict = { 'nvidia.com/gpu': str(self.gpu_count) } resources['limits'].update(gpu_dict) resources['requests'].update(gpu_dict) result_with_empty_values_removed = ( {k: v for k, v in resources.iteritems() if v}) return result_with_empty_values_removed class DebianBasedKubernetesVirtualMachine(KubernetesVirtualMachine, linux_virtual_machine.DebianMixin): DEFAULT_IMAGE = UBUNTU_IMAGE def RemoteHostCommandWithReturnCode(self, command, should_log=False, retries=None, ignore_failure=False, login_shell=False, suppress_warning=False, timeout=None, on_host=False): """Runs a command in the Kubernetes container.""" cmd = [FLAGS.kubectl, '--kubeconfig=%s' % FLAGS.kubeconfig, 'exec', '-i', self.name, '--', '/bin/bash', '-c', command] if on_host: cmd = ["/bin/bash", "-c", command] ignore_failure = True stdout, stderr, retcode = vm_util.IssueCommand( cmd, force_info_log=should_log, suppress_warning=suppress_warning, timeout=timeout) if not ignore_failure and retcode: error_text = ('Got non-zero return code (%s) executing %s\n' 'Full command: %s\nSTDOUT: %sSTDERR: %s' % (retcode, command, ' '.join(cmd), stdout, stderr)) raise errors.VirtualMachine.RemoteCommandError(error_text) return stdout, stderr, retcode def MoveHostFile(self, target, source_path, remote_path=''): """Copies a file from one VM to a target VM. Args: target: The target BaseVirtualMachine object. source_path: The location of the file on the REMOTE machine. remote_path: The destination of the file on the TARGET machine, default is the home directory. """ file_name = vm_util.PrependTempDir(posixpath.basename(source_path)) self.RemoteHostCopy(file_name, source_path, copy_to=False) target.RemoteHostCopy(file_name, remote_path) def RemoteHostCopy(self, file_path, remote_path='', copy_to=True): """Copies a file to or from the VM. Args: file_path: Local path to file. remote_path: Optional path of where to copy file on remote host. copy_to: True to copy to vm, False to copy from vm. Raises: RemoteCommandError: If there was a problem copying the file. """ if copy_to: file_name = posixpath.basename(file_path) src_spec, dest_spec = file_path, '%s:%s' % (self.name, file_name) else: remote_path, _ = self.RemoteCommand('readlink -f %s' % remote_path) remote_path = remote_path.strip() src_spec, dest_spec = '%s:%s' % (self.name, remote_path), file_path cmd = [FLAGS.kubectl, '--kubeconfig=%s' % FLAGS.kubeconfig, 'cp', src_spec, dest_spec] stdout, stderr, retcode = vm_util.IssueCommand(cmd) if retcode: error_text = ('Got non-zero return code (%s) executing %s\n' 'STDOUT: %sSTDERR: %s' % (retcode, ' '.join(cmd), stdout, stderr)) raise errors.VirtualMachine.RemoteCommandError(error_text) if copy_to: file_name = posixpath.basename(file_path) remote_path = remote_path or file_name self.RemoteCommand('mv %s %s; chmod 777 %s' % (file_name, remote_path, remote_path)) @vm_util.Retry(log_errors=False, poll_interval=1) def PrepareVMEnvironment(self): super(DebianBasedKubernetesVirtualMachine, self).PrepareVMEnvironment() # Don't rely on SSH being installed in Kubernetes containers, # so install it and restart the service so that it is ready to go. # Although ssh is not required to connect to the container, MPI # benchmarks require it. self.InstallPackages('ssh') self.RemoteCommand('sudo /etc/init.d/ssh restart', ignore_failure=True) self.RemoteCommand('mkdir -p ~/.ssh') with open(self.ssh_public_key) as f: key = f.read() self.RemoteCommand('echo "%s" >> ~/.ssh/authorized_keys' % key) # Don't assume the relevant CLI is installed in the Kubernetes environment. if FLAGS.container_cluster_cloud == 'GCP': self.InstallGcloudCli() elif FLAGS.container_cluster_cloud == 'AWS': self.InstallAwsCli() elif FLAGS.container_cluster_cloud == 'Azure': self.InstallAzureCli() def InstallAwsCli(self): """Installs the AWS CLI; used for downloading preprovisioned data.""" self.Install('aws_credentials') self.Install('awscli') def InstallAzureCli(self): """Installs the Azure CLI; used for downloading preprovisioned data.""" self.Install('azure_cli') self.Install('azure_credentials') # TODO(ferneyhough): Consider making this a package. def InstallGcloudCli(self): """Installs the Gcloud CLI; used for downloading preprovisioned data.""" self.InstallPackages('curl') self.RemoteCommand('echo "deb http://packages.cloud.google.com/apt ' 'cloud-sdk-$(lsb_release -c -s) main" | sudo tee -a ' '/etc/apt/sources.list.d/google-cloud-sdk.list') self.RemoteCommand('curl https://packages.cloud.google.com/apt/doc/' 'apt-key.gpg | sudo apt-key add -') self.RemoteCommand('sudo apt-get update && sudo apt-get install ' '-y google-cloud-sdk') def DownloadPreprovisionedData(self, install_path, module_name, filename): """Downloads a preprovisioned data file. This function works by looking up the VirtualMachine class which matches the cloud we are running on (defined by FLAGS.container_cluster_cloud). Then we look for a module-level function defined in the same module as the VirtualMachine class which generates a string used to download preprovisioned data for the given cloud. Note that this implementation is specific to debian os types. Windows support will need to be handled in WindowsBasedKubernetesVirtualMachine. Args: install_path: The install path on this VM. module_name: Name of the module associated with this data file. filename: The name of the file that was downloaded. Raises: NotImplementedError: if this method does not support the specified cloud. AttributeError: if the VirtualMachine class does not implement GenerateDownloadPreprovisionedDataCommand. """ cloud = FLAGS.container_cluster_cloud if cloud == 'GCP': download_function = (gce_virtual_machine. GenerateDownloadPreprovisionedDataCommand) elif cloud == 'AWS': download_function = (aws_virtual_machine. GenerateDownloadPreprovisionedDataCommand) elif cloud == 'Azure': download_function = (azure_virtual_machine. GenerateDownloadPreprovisionedDataCommand) else: raise NotImplementedError( 'Cloud {0} does not support downloading preprovisioned ' 'data on Kubernetes VMs.'.format(cloud)) self.RemoteCommand( download_function(install_path, module_name, filename)) def _install_sudo_command(): """Return a bash command that installs sudo and runs tail indefinitely. This is useful for some docker images that don't have sudo installed. Returns: a sequence of arguments that use bash to install sudo and never run tail indefinitely. """ # The canonical ubuntu images as well as the nvidia/cuda # image do not have sudo installed so install it and configure # the sudoers file such that the root user's environment is # preserved when running as sudo. Then run tail indefinitely so that # the container does not exit. container_command = ' && '.join([ 'apt-get update', 'apt-get install -y sudo', 'sed -i \'/env_reset/d\' /etc/sudoers', 'sed -i \'/secure_path/d\' /etc/sudoers', 'sudo ldconfig', 'tail -f /dev/null', ]) return ['bash', '-c', container_command] class Ubuntu1404BasedKubernetesVirtualMachine( DebianBasedKubernetesVirtualMachine, linux_virtual_machine.Ubuntu1404Mixin): # All Ubuntu images below are from https://hub.docker.com/_/ubuntu/ # Note that they do not include all packages that are typically # included with Ubuntu. For example, sudo is not installed. # KubernetesVirtualMachine takes care of this by installing # sudo in the container startup script. DEFAULT_IMAGE = 'ubuntu:14.04' CONTAINER_COMMAND = _install_sudo_command() class Ubuntu1604BasedKubernetesVirtualMachine( DebianBasedKubernetesVirtualMachine, linux_virtual_machine.Ubuntu1604Mixin): DEFAULT_IMAGE = 'ubuntu:16.04' CONTAINER_COMMAND = _install_sudo_command() class Ubuntu1710BasedKubernetesVirtualMachine( DebianBasedKubernetesVirtualMachine, linux_virtual_machine.Ubuntu1710Mixin): DEFAULT_IMAGE = 'ubuntu:17.10' CONTAINER_COMMAND = _install_sudo_command() class Ubuntu1604Cuda9BasedKubernetesVirtualMachine( DebianBasedKubernetesVirtualMachine, linux_virtual_machine.Ubuntu1604Cuda9Mixin): # Image is from https://hub.docker.com/r/nvidia/cuda/ DEFAULT_IMAGE = 'nvidia/cuda:9.0-devel-ubuntu16.04' CONTAINER_COMMAND = _install_sudo_command()
py
b4178da2fcf1133deab06801241bf16de518abcd
''' Queue data structure by Saadkhalid913 ''' class Queue(list): def __init__(self): super().__init__() def push(self, i): self.append(i) def deque(self): return self.pop(0) def peek(self): return self[0] def isempty(self): return len(self) == 0 if __name__ == "__main__": q = Queue() q.push(1) q.push(2) q.push(3) print(q) print(q.deque()) print(q)
py
b4178e3c7abe52a3ffc425264d1b0ed7b01c1f76
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Boolean, Column, DateTime, BigInteger from sqlalchemy import MetaData, Integer, String, Table from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # add column: bw_usage_cache = Table('bw_usage_cache', meta, autoload=True) last_ctr_in = Column('last_ctr_in', BigInteger()) last_ctr_out = Column('last_ctr_out', BigInteger()) bw_usage_cache.create_column(last_ctr_in) bw_usage_cache.create_column(last_ctr_out) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # drop column: bw_usage_cache = Table('bw_usage_cache', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True, nullable=False), Column('mac', String(255)), Column('uuid', String(36)), Column('start_period', DateTime(timezone=False), nullable=False), Column('last_refreshed', DateTime(timezone=False)), Column('bw_in', BigInteger()), Column('bw_out', BigInteger()), Column('last_ctr_in', BigInteger()), Column('last_ctr_out', BigInteger()), extend_existing=True) bw_usage_cache.drop_column('last_ctr_in') bw_usage_cache.drop_column('last_ctr_out')
py
b4178e5612b78d5c4a747f1d3a90c248c5622199
from PyPDF2 import PdfFileWriter, PdfFileReader print("The file should be in same FOLDER as this script") pdfNameInput = input("Enter EXACT name of the PDF in this FOLDER: ") pdfName = pdfNameInput + ".pdf" # reading the pdf pdf = PdfFileReader(pdfName) # object for writing the file write_obj = PdfFileWriter() # Getting the number of pages and writing each page in the writer object for i in range(pdf.getNumPages()): page = pdf.getPage(i) write_obj.addPage(page) # Encrypting by a password password = input("Enter Password for the Encryption to PDF: ") write_obj.encrypt(user_pwd=password, owner_pwd=None, use_128bit=True) new_PDF_Name_Input = input("Enter new PDF name: ") new_PDF_Name = new_PDF_Name_Input + '.pdf' encrypted_PDF = open(new_PDF_Name, 'wb') write_obj.write(encrypted_PDF)
py
b4178eaccd74838f6fabe68d030bbd31402ce07c
import helpers class Measure: def __init__(self,magnitude,unit, conversionUnit): self.magnitude = magnitude self.unit = unit self.conversionUnit = conversionUnit def convert(self): currentMeasureValue = helpers.getUnit(self.unit) futureMeasureValue = helpers.getUnit(self.conversionUnit) self.targetMeasure = helpers.convert(self.magnitude,currentMeasureValue,futureMeasureValue)
py
b417911a80b663b3071f3af495d6b913eb2681e7
import argparse import os import sys import pickle import time import pandas as pd import re from src.utils import np from src.problem import Problem from src.controller import Parallel_Controller from src.MMAS_solver import MMAS_Solver from pathlib import Path def parse_arguments(): parser = argparse.ArgumentParser() parser.add_argument( '--method', choices=['DGA', 'RGA', 'MMAS'], default='MMAS', type=str, required=True, help='algorithm') parser.add_argument( '--idx', type=int, required=True, help='Index of slurm') '--instance', type=str, required=True, help='instance to apply the algorithm to it') # parser.add_argument( # '--batch', # # choices=[101], # # default=31, # type=int, # required=True, # help='size of batch of run for RGA') # if 0 : it takes the seed from input # if 2 : it generates a random seed # if 1 : it goes for 31 runs. parser.add_argument( '--ls', choices=['none', '1'], default='none', type=str, required=False, help='Local search method') parser.add_argument( '--n-ants', default=10, type=int, required=False, help='number of ants for population size') parser.add_argument( '--maxiter', default=1000, type=int, required=False, help='Maximum number of iterations') try: args = parser.parse_args() except: parser.print_help() import sys sys.exit(0) return args def parse_instance(instance_name): find_all = np.asarray(re.findall(r'\b\d+\b', instance_name),dtype=int) return tuple(find_all[-3:]) if __name__ == '__main__': df = pd.read_csv('mmas-irace-configs-found.csv') kwargs = {} # Parse command line arguments args = parse_arguments() (n_demand, n_machine, direction) = parse_instance(args.instance) conf_params = df.loc[df['Instance'] == '({}, {}, {})'.format(n_demand, n_machine, direction)] problem = Problem(number_machine = n_machine, number_demand = n_demand, parcel = True, local = 'none', method='MMAS', direction= direction) # str_instance = '({},{},{},{})'.format(args.n_machine, args.n_demand, int(args.parcel), args.dir) obj_batch = [] # mkdir Path('./raw_mmas').mkdir(parents=True, exist_ok=True) Path('./raw_mmas_local').mkdir(parents=True, exist_ok=True) # run if args.method == 'MMAS': Path('./mmas_data').mkdir(parents=True, exist_ok=True) # str_alg = '({},{})'.format(args.greedy_param, args.sp) # if args.batch == 101: # batch_range = np.arange(101) # else: # batch_range = np.arange(1) # seeds for replication preseed = np.array([226024, 894631, 118599, 802361, 23414, 976405, 798742, 647772, 82428, 566941 , 175144, 435676, 331388, 428582, 873627, 41918, 7806, 562734, 523424, 609150 , 93564, 209194, 220472, 63488, 570335, 153744, 543934, 625362, 84325, 636283 , 464398, 529193, 318544, 205037, 852066, 988015, 15880, 665647, 658019, 690671 , 362619, 803845, 868070, 394902, 161626, 636900, 332690, 442120, 113993, 276401 , 942972, 134143, 137052, 921830, 727872, 61800, 943104, 108918, 233229, 936444 , 689071, 862780, 944836, 552032, 357025, 92066, 869317, 216829, 493700, 51734 , 691270, 146044, 728563, 471856, 132138, 736886, 77208, 443348, 224069, 656098 , 990195, 516716, 854011, 698891, 184790, 161487, 336484, 22868, 246949, 410368 , 194817, 318576, 98816, 312131, 22585, 889346, 900289, 789335, 25676, 591257 , 839707]) # for idx_batch in batch_range: # solver = Parallel_Controller(problem=problem, greedy_param = args.greedy_param, selection_pressure=args.sp, ant_kw=None) if args.ls == 'none': local_search = 0 else: local_search = 1 # batch_range = np.arange(int(args.batch)) if int(conf_params['bestant'])==0: tba = 'BSFA' else: tba = 'IBA' # print(conf_params) solver = MMAS_Solver(problem=problem, alpha=int(conf_params['alpha']), beta=int(conf_params['beta']), rho=float(conf_params['rho']), tau0=1, population_size=args.n_ants, iteration_max=args.maxiter, selection_pressure=float(conf_params['sp']), type_best_ant=tba, local_search=local_search) print(conf_params) try: seed = preseed[args.idx] # print(seed) solver.problem.initialise_seed(seed=seed) except: # else: raise Exception('Problem in seed initialisation') solver.run() print('Best-solution: {}'.format(solver.total_obj)) obj_batch.append(solver.total_obj) # str_alg = '({},{})'.format(args.greedy_param, args.sp) # solver.export_best_solution_simple() # solver.export()
py
b4179161cc25d52bc7d1e5db4989579d98f5cf0a
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. import oci # noqa: F401 from oci.util import WAIT_RESOURCE_NOT_FOUND # noqa: F401 class LinkClientCompositeOperations(object): """ This class provides a wrapper around :py:class:`~oci.tenant_manager_control_plane.LinkClient` and offers convenience methods for operations that would otherwise need to be chained together. For example, instead of performing an action on a resource (e.g. launching an instance, creating a load balancer) and then using a waiter to wait for the resource to enter a given state, you can call a single method in this class to accomplish the same functionality """ def __init__(self, client, **kwargs): """ Creates a new LinkClientCompositeOperations object :param LinkClient client: The service client which will be wrapped by this object """ self.client = client def delete_link_and_wait_for_state(self, link_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.tenant_manager_control_plane.LinkClient.delete_link` and waits for the :py:class:`~oci.tenant_manager_control_plane.models.WorkRequest` to enter the given state(s). :param str link_id: (required) OCID of the link to terminate. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.tenant_manager_control_plane.models.WorkRequest.status` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.tenant_manager_control_plane.LinkClient.delete_link` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = None try: operation_result = self.client.delete_link(link_id, **operation_kwargs) except oci.exceptions.ServiceError as e: if e.status == 404: return WAIT_RESOURCE_NOT_FOUND else: raise e if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.headers['opc-work-request-id'] try: waiter_result = oci.wait_until( self.client, self.client.get_work_request(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
py
b417916be3747d969e6c9ca883b52b71195ba2f7
# coding: utf-8 # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2019-Present Datadog, Inc. import re # noqa: F401 import sys # noqa: F401 from datadog_api_client.v1.api_client import ApiClient, Endpoint from datadog_api_client.v1.model_utils import ( # noqa: F401 check_allowed_values, check_validations, date, datetime, file_type, none_type, validate_and_convert_types ) from datadog_api_client.v1.model.api_error_response import APIErrorResponse from datadog_api_client.v1.model.synthetics_api_test_result_full import SyntheticsAPITestResultFull from datadog_api_client.v1.model.synthetics_browser_test_result_full import SyntheticsBrowserTestResultFull from datadog_api_client.v1.model.synthetics_ci_test_body import SyntheticsCITestBody from datadog_api_client.v1.model.synthetics_delete_tests_payload import SyntheticsDeleteTestsPayload from datadog_api_client.v1.model.synthetics_delete_tests_response import SyntheticsDeleteTestsResponse from datadog_api_client.v1.model.synthetics_get_api_test_latest_results_response import SyntheticsGetAPITestLatestResultsResponse from datadog_api_client.v1.model.synthetics_get_browser_test_latest_results_response import SyntheticsGetBrowserTestLatestResultsResponse from datadog_api_client.v1.model.synthetics_global_variable import SyntheticsGlobalVariable from datadog_api_client.v1.model.synthetics_list_tests_response import SyntheticsListTestsResponse from datadog_api_client.v1.model.synthetics_locations import SyntheticsLocations from datadog_api_client.v1.model.synthetics_test_details import SyntheticsTestDetails from datadog_api_client.v1.model.synthetics_trigger_ci_tests_response import SyntheticsTriggerCITestsResponse from datadog_api_client.v1.model.synthetics_update_test_pause_status_payload import SyntheticsUpdateTestPauseStatusPayload class SyntheticsApi(object): """NOTE: This class is auto generated by OpenAPI Generator Ref: https://openapi-generator.tech Do not edit the class manually. """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def __create_global_variable( self, body, **kwargs ): """Create a global variable # noqa: E501 Create a Synthetics global variable. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_global_variable(body, async_req=True) >>> result = thread.get() Args: body (SyntheticsGlobalVariable): Details of the global variable to create. Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: SyntheticsGlobalVariable If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['body'] = \ body return self.call_with_http_info(**kwargs) self.create_global_variable = Endpoint( settings={ 'response_type': (SyntheticsGlobalVariable,), 'auth': [ 'apiKeyAuth', 'appKeyAuth' ], 'endpoint_path': '/api/v1/synthetics/variables', 'operation_id': 'create_global_variable', 'http_method': 'POST', 'servers': None, }, params_map={ 'all': [ 'body', ], 'required': [ 'body', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'body': (SyntheticsGlobalVariable,), }, 'attribute_map': { }, 'location_map': { 'body': 'body', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [ 'application/json' ] }, api_client=api_client, callable=__create_global_variable ) def __create_test( self, body, **kwargs ): """Create a test # noqa: E501 Create a Synthetic test. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_test(body, async_req=True) >>> result = thread.get() Args: body (SyntheticsTestDetails): Details of the test to create. Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: SyntheticsTestDetails If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['body'] = \ body return self.call_with_http_info(**kwargs) self.create_test = Endpoint( settings={ 'response_type': (SyntheticsTestDetails,), 'auth': [ 'apiKeyAuth', 'appKeyAuth' ], 'endpoint_path': '/api/v1/synthetics/tests', 'operation_id': 'create_test', 'http_method': 'POST', 'servers': None, }, params_map={ 'all': [ 'body', ], 'required': [ 'body', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'body': (SyntheticsTestDetails,), }, 'attribute_map': { }, 'location_map': { 'body': 'body', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [ 'application/json' ] }, api_client=api_client, callable=__create_test ) def __delete_global_variable( self, variable_id, **kwargs ): """Delete a global variable # noqa: E501 Delete a Synthetics global variable. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_global_variable(variable_id, async_req=True) >>> result = thread.get() Args: variable_id (str): The ID of the global variable. Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: None If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['variable_id'] = \ variable_id return self.call_with_http_info(**kwargs) self.delete_global_variable = Endpoint( settings={ 'response_type': None, 'auth': [ 'apiKeyAuth', 'appKeyAuth' ], 'endpoint_path': '/api/v1/synthetics/variables/{variable_id}', 'operation_id': 'delete_global_variable', 'http_method': 'DELETE', 'servers': None, }, params_map={ 'all': [ 'variable_id', ], 'required': [ 'variable_id', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'variable_id': (str,), }, 'attribute_map': { 'variable_id': 'variable_id', }, 'location_map': { 'variable_id': 'path', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client, callable=__delete_global_variable ) def __delete_tests( self, body, **kwargs ): """Delete tests # noqa: E501 Delete multiple Synthetic tests by ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_tests(body, async_req=True) >>> result = thread.get() Args: body (SyntheticsDeleteTestsPayload): Public ID list of the Synthetic tests to be deleted. Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: SyntheticsDeleteTestsResponse If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['body'] = \ body return self.call_with_http_info(**kwargs) self.delete_tests = Endpoint( settings={ 'response_type': (SyntheticsDeleteTestsResponse,), 'auth': [ 'apiKeyAuth', 'appKeyAuth' ], 'endpoint_path': '/api/v1/synthetics/tests/delete', 'operation_id': 'delete_tests', 'http_method': 'POST', 'servers': None, }, params_map={ 'all': [ 'body', ], 'required': [ 'body', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'body': (SyntheticsDeleteTestsPayload,), }, 'attribute_map': { }, 'location_map': { 'body': 'body', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [ 'application/json' ] }, api_client=api_client, callable=__delete_tests ) def __edit_global_variable( self, variable_id, body, **kwargs ): """Edit a global variable # noqa: E501 Edit a Synthetics global variable. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.edit_global_variable(variable_id, body, async_req=True) >>> result = thread.get() Args: variable_id (str): The ID of the global variable. body (SyntheticsGlobalVariable): Details of the global variable to update. Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: SyntheticsGlobalVariable If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['variable_id'] = \ variable_id kwargs['body'] = \ body return self.call_with_http_info(**kwargs) self.edit_global_variable = Endpoint( settings={ 'response_type': (SyntheticsGlobalVariable,), 'auth': [ 'apiKeyAuth', 'appKeyAuth' ], 'endpoint_path': '/api/v1/synthetics/variables/{variable_id}', 'operation_id': 'edit_global_variable', 'http_method': 'PUT', 'servers': None, }, params_map={ 'all': [ 'variable_id', 'body', ], 'required': [ 'variable_id', 'body', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'variable_id': (str,), 'body': (SyntheticsGlobalVariable,), }, 'attribute_map': { 'variable_id': 'variable_id', }, 'location_map': { 'variable_id': 'path', 'body': 'body', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [ 'application/json' ] }, api_client=api_client, callable=__edit_global_variable ) def __get_api_test_latest_results( self, public_id, **kwargs ): """Get the test's latest results summaries (API) # noqa: E501 Get the last 50 test results summaries for a given Synthetics API test. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_test_latest_results(public_id, async_req=True) >>> result = thread.get() Args: public_id (str): The public ID of the test for which to search results for. Keyword Args: from_ts (int): Timestamp from which to start querying results.. [optional] to_ts (int): Timestamp up to which to query results.. [optional] probe_dc ([str]): Locations for which to query results.. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: SyntheticsGetAPITestLatestResultsResponse If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['public_id'] = \ public_id return self.call_with_http_info(**kwargs) self.get_api_test_latest_results = Endpoint( settings={ 'response_type': (SyntheticsGetAPITestLatestResultsResponse,), 'auth': [ 'apiKeyAuth', 'appKeyAuth' ], 'endpoint_path': '/api/v1/synthetics/tests/{public_id}/results', 'operation_id': 'get_api_test_latest_results', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ 'public_id', 'from_ts', 'to_ts', 'probe_dc', ], 'required': [ 'public_id', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'public_id': (str,), 'from_ts': (int,), 'to_ts': (int,), 'probe_dc': ([str],), }, 'attribute_map': { 'public_id': 'public_id', 'from_ts': 'from_ts', 'to_ts': 'to_ts', 'probe_dc': 'probe_dc', }, 'location_map': { 'public_id': 'path', 'from_ts': 'query', 'to_ts': 'query', 'probe_dc': 'query', }, 'collection_format_map': { 'probe_dc': 'multi', } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client, callable=__get_api_test_latest_results ) def __get_api_test_result( self, public_id, result_id, **kwargs ): """Get a test result (API) # noqa: E501 Get a specific full result from a given (API) Synthetic test. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_test_result(public_id, result_id, async_req=True) >>> result = thread.get() Args: public_id (str): The public ID of the API test to which the target result belongs. result_id (str): The ID of the result to get. Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: SyntheticsAPITestResultFull If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['public_id'] = \ public_id kwargs['result_id'] = \ result_id return self.call_with_http_info(**kwargs) self.get_api_test_result = Endpoint( settings={ 'response_type': (SyntheticsAPITestResultFull,), 'auth': [ 'apiKeyAuth', 'appKeyAuth' ], 'endpoint_path': '/api/v1/synthetics/tests/{public_id}/results/{result_id}', 'operation_id': 'get_api_test_result', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ 'public_id', 'result_id', ], 'required': [ 'public_id', 'result_id', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'public_id': (str,), 'result_id': (str,), }, 'attribute_map': { 'public_id': 'public_id', 'result_id': 'result_id', }, 'location_map': { 'public_id': 'path', 'result_id': 'path', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client, callable=__get_api_test_result ) def __get_browser_test( self, public_id, **kwargs ): """Get a test configuration (browser) # noqa: E501 Get the detailed configuration (including steps) associated with a Synthetics browser test. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_browser_test(public_id, async_req=True) >>> result = thread.get() Args: public_id (str): The public ID of the test to get details from. Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: SyntheticsTestDetails If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['public_id'] = \ public_id return self.call_with_http_info(**kwargs) self.get_browser_test = Endpoint( settings={ 'response_type': (SyntheticsTestDetails,), 'auth': [ 'apiKeyAuth', 'appKeyAuth' ], 'endpoint_path': '/api/v1/synthetics/tests/browser/{public_id}', 'operation_id': 'get_browser_test', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ 'public_id', ], 'required': [ 'public_id', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'public_id': (str,), }, 'attribute_map': { 'public_id': 'public_id', }, 'location_map': { 'public_id': 'path', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client, callable=__get_browser_test ) def __get_browser_test_latest_results( self, public_id, **kwargs ): """Get the test's latest results summaries (browser) # noqa: E501 Get the last 50 test results summaries for a given Synthetics Browser test. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_browser_test_latest_results(public_id, async_req=True) >>> result = thread.get() Args: public_id (str): The public ID of the browser test for which to search results for. Keyword Args: from_ts (int): Timestamp from which to start querying results.. [optional] to_ts (int): Timestamp up to which to query results.. [optional] probe_dc ([str]): Locations for which to query results.. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: SyntheticsGetBrowserTestLatestResultsResponse If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['public_id'] = \ public_id return self.call_with_http_info(**kwargs) self.get_browser_test_latest_results = Endpoint( settings={ 'response_type': (SyntheticsGetBrowserTestLatestResultsResponse,), 'auth': [ 'apiKeyAuth', 'appKeyAuth' ], 'endpoint_path': '/api/v1/synthetics/tests/browser/{public_id}/results', 'operation_id': 'get_browser_test_latest_results', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ 'public_id', 'from_ts', 'to_ts', 'probe_dc', ], 'required': [ 'public_id', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'public_id': (str,), 'from_ts': (int,), 'to_ts': (int,), 'probe_dc': ([str],), }, 'attribute_map': { 'public_id': 'public_id', 'from_ts': 'from_ts', 'to_ts': 'to_ts', 'probe_dc': 'probe_dc', }, 'location_map': { 'public_id': 'path', 'from_ts': 'query', 'to_ts': 'query', 'probe_dc': 'query', }, 'collection_format_map': { 'probe_dc': 'multi', } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client, callable=__get_browser_test_latest_results ) def __get_browser_test_result( self, public_id, result_id, **kwargs ): """Get a test result (browser) # noqa: E501 Get a specific full result from a given (browser) Synthetic test. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_browser_test_result(public_id, result_id, async_req=True) >>> result = thread.get() Args: public_id (str): The public ID of the browser test to which the target result belongs. result_id (str): The ID of the result to get. Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: SyntheticsBrowserTestResultFull If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['public_id'] = \ public_id kwargs['result_id'] = \ result_id return self.call_with_http_info(**kwargs) self.get_browser_test_result = Endpoint( settings={ 'response_type': (SyntheticsBrowserTestResultFull,), 'auth': [ 'apiKeyAuth', 'appKeyAuth' ], 'endpoint_path': '/api/v1/synthetics/tests/browser/{public_id}/results/{result_id}', 'operation_id': 'get_browser_test_result', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ 'public_id', 'result_id', ], 'required': [ 'public_id', 'result_id', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'public_id': (str,), 'result_id': (str,), }, 'attribute_map': { 'public_id': 'public_id', 'result_id': 'result_id', }, 'location_map': { 'public_id': 'path', 'result_id': 'path', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client, callable=__get_browser_test_result ) def __get_test( self, public_id, **kwargs ): """Get a test configuration (API) # noqa: E501 Get the detailed configuration associated with a Synthetics test. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_test(public_id, async_req=True) >>> result = thread.get() Args: public_id (str): The public ID of the test to get details from. Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: SyntheticsTestDetails If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['public_id'] = \ public_id return self.call_with_http_info(**kwargs) self.get_test = Endpoint( settings={ 'response_type': (SyntheticsTestDetails,), 'auth': [ 'apiKeyAuth', 'appKeyAuth' ], 'endpoint_path': '/api/v1/synthetics/tests/{public_id}', 'operation_id': 'get_test', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ 'public_id', ], 'required': [ 'public_id', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'public_id': (str,), }, 'attribute_map': { 'public_id': 'public_id', }, 'location_map': { 'public_id': 'path', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client, callable=__get_test ) def __list_locations( self, **kwargs ): """Get all locations (public and private) # noqa: E501 Get the list of public and private locations available for Synthetics tests. No arguments required. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_locations(async_req=True) >>> result = thread.get() Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: SyntheticsLocations If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') return self.call_with_http_info(**kwargs) self.list_locations = Endpoint( settings={ 'response_type': (SyntheticsLocations,), 'auth': [ 'apiKeyAuth', 'appKeyAuth' ], 'endpoint_path': '/api/v1/synthetics/locations', 'operation_id': 'list_locations', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ ], 'required': [], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { }, 'attribute_map': { }, 'location_map': { }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client, callable=__list_locations ) def __list_tests( self, **kwargs ): """Get a list of tests # noqa: E501 Get the list of all Synthetic tests (can be filtered by type). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_tests(async_req=True) >>> result = thread.get() Keyword Args: check_type (str): API or browser to filter the list by test type, undefined to get the unfiltered list.. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: SyntheticsListTestsResponse If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') return self.call_with_http_info(**kwargs) self.list_tests = Endpoint( settings={ 'response_type': (SyntheticsListTestsResponse,), 'auth': [ 'apiKeyAuth', 'appKeyAuth' ], 'endpoint_path': '/api/v1/synthetics/tests', 'operation_id': 'list_tests', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ 'check_type', ], 'required': [], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'check_type': (str,), }, 'attribute_map': { 'check_type': 'check_type', }, 'location_map': { 'check_type': 'query', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client, callable=__list_tests ) def __trigger_ci_tests( self, body, **kwargs ): """Trigger some Synthetics tests for CI # noqa: E501 Trigger a set of Synthetics tests for continuous integration # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.trigger_ci_tests(body, async_req=True) >>> result = thread.get() Args: body (SyntheticsCITestBody): Details of the test to trigger. Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: SyntheticsTriggerCITestsResponse If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['body'] = \ body return self.call_with_http_info(**kwargs) self.trigger_ci_tests = Endpoint( settings={ 'response_type': (SyntheticsTriggerCITestsResponse,), 'auth': [ 'apiKeyAuth', 'appKeyAuth' ], 'endpoint_path': '/api/v1/synthetics/tests/trigger/ci', 'operation_id': 'trigger_ci_tests', 'http_method': 'POST', 'servers': None, }, params_map={ 'all': [ 'body', ], 'required': [ 'body', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'body': (SyntheticsCITestBody,), }, 'attribute_map': { }, 'location_map': { 'body': 'body', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [ 'application/json' ] }, api_client=api_client, callable=__trigger_ci_tests ) def __update_test( self, public_id, body, **kwargs ): """Edit a test # noqa: E501 Edit the configuration of a Synthetic test. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_test(public_id, body, async_req=True) >>> result = thread.get() Args: public_id (str): The public ID of the test to get details from. body (SyntheticsTestDetails): New test details to be saved. Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: SyntheticsTestDetails If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['public_id'] = \ public_id kwargs['body'] = \ body return self.call_with_http_info(**kwargs) self.update_test = Endpoint( settings={ 'response_type': (SyntheticsTestDetails,), 'auth': [ 'apiKeyAuth', 'appKeyAuth' ], 'endpoint_path': '/api/v1/synthetics/tests/{public_id}', 'operation_id': 'update_test', 'http_method': 'PUT', 'servers': None, }, params_map={ 'all': [ 'public_id', 'body', ], 'required': [ 'public_id', 'body', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'public_id': (str,), 'body': (SyntheticsTestDetails,), }, 'attribute_map': { 'public_id': 'public_id', }, 'location_map': { 'public_id': 'path', 'body': 'body', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [ 'application/json' ] }, api_client=api_client, callable=__update_test ) def __update_test_pause_status( self, public_id, body, **kwargs ): """Pause or start a test # noqa: E501 Pause or start a Synthetics test by changing the status. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_test_pause_status(public_id, body, async_req=True) >>> result = thread.get() Args: public_id (str): The public ID of the Synthetic test to update. body (SyntheticsUpdateTestPauseStatusPayload): Status to set the given Synthetic test to. Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: bool If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['public_id'] = \ public_id kwargs['body'] = \ body return self.call_with_http_info(**kwargs) self.update_test_pause_status = Endpoint( settings={ 'response_type': (bool,), 'auth': [ 'apiKeyAuth', 'appKeyAuth' ], 'endpoint_path': '/api/v1/synthetics/tests/{public_id}/status', 'operation_id': 'update_test_pause_status', 'http_method': 'PUT', 'servers': None, }, params_map={ 'all': [ 'public_id', 'body', ], 'required': [ 'public_id', 'body', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'public_id': (str,), 'body': (SyntheticsUpdateTestPauseStatusPayload,), }, 'attribute_map': { 'public_id': 'public_id', }, 'location_map': { 'public_id': 'path', 'body': 'body', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [ 'application/json' ] }, api_client=api_client, callable=__update_test_pause_status )
py
b41792218d2e9c2443ee5b5ddafeff1ff69af8ce
import torch import torch.nn.functional as F import torch.nn as nn from models.gcn_layers import ResidualGatedGCNLayer, MLP from utils.model_utils import * class ResidualGatedGCNModel(nn.Module): """Residual Gated GCN Model for outputting predictions as edge adjacency matrices. References: Paper: https://arxiv.org/pdf/1711.07553v2.pdf Code: https://github.com/xbresson/spatial_graph_convnets """ def __init__(self, config, dtypeFloat, dtypeLong): super(ResidualGatedGCNModel, self).__init__() self.dtypeFloat = dtypeFloat self.dtypeLong = dtypeLong # Define net parameters self.num_nodes = config.num_nodes self.node_dim = config.node_dim self.voc_nodes_in = config['voc_nodes_in'] self.voc_nodes_out = config['num_nodes'] # config['voc_nodes_out'] self.voc_edges_in = config['voc_edges_in'] self.voc_edges_out = config['voc_edges_out'] self.hidden_dim = config['hidden_dim'] self.num_layers = config['num_layers'] self.mlp_layers = config['mlp_layers'] self.aggregation = config['aggregation'] # Node and edge embedding layers/lookups self.nodes_coord_embedding = nn.Linear(self.node_dim, self.hidden_dim, bias=False) self.edges_values_embedding = nn.Linear(1, self.hidden_dim//2, bias=False) self.edges_embedding = nn.Embedding(self.voc_edges_in, self.hidden_dim//2) # Define GCN Layers gcn_layers = [] for layer in range(self.num_layers): gcn_layers.append(ResidualGatedGCNLayer(self.hidden_dim, self.aggregation)) self.gcn_layers = nn.ModuleList(gcn_layers) # Define MLP classifiers self.mlp_edges = MLP(self.hidden_dim, self.voc_edges_out, self.mlp_layers) # self.mlp_nodes = MLP(self.hidden_dim, self.voc_nodes_out, self.mlp_layers) def forward(self, x_edges, x_edges_values, x_nodes, x_nodes_coord, y_edges, edge_cw, num_neg = 4, loss_type = "CE", gamma = 1): """ Args: x_edges: Input edge adjacency matrix (batch_size, num_nodes, num_nodes) x_edges_values: Input edge distance matrix (batch_size, num_nodes, num_nodes) x_nodes: Input nodes (batch_size, num_nodes) x_nodes_coord: Input node coordinates (batch_size, num_nodes, node_dim) y_edges: Targets for edges (batch_size, num_nodes, num_nodes) edge_cw: Class weights for edges loss # y_nodes: Targets for nodes (batch_size, num_nodes, num_nodes) # node_cw: Class weights for nodes loss Returns: y_pred_edges: Predictions for edges (batch_size, num_nodes, num_nodes) # y_pred_nodes: Predictions for nodes (batch_size, num_nodes) loss: Value of loss function """ # Node and edge embedding x = self.nodes_coord_embedding(x_nodes_coord) # B x V x H e_vals = self.edges_values_embedding(x_edges_values.unsqueeze(3)) # B x V x V x H e_tags = self.edges_embedding(x_edges) # B x V x V x H e = torch.cat((e_vals, e_tags), dim=3) # permute kaibin QIU x = x.permute(0, 2, 1) # B x H x V e = e.permute(0, 3, 1, 2) # B x H x V x V # GCN layers for layer in range(self.num_layers): x, e = self.gcn_layers[layer](x, e) # B x V x H, B x V x V x H # MLP classifier y_pred_edges = self.mlp_edges(e) # B x V x V x voc_edges_out # y_pred_nodes = self.mlp_nodes(x) # B x V x voc_nodes_out # Compute loss edge_cw = torch.Tensor(edge_cw).type(self.dtypeFloat) # Convert to tensors loss = loss_edges(y_pred_edges, y_edges, edge_cw, loss_type = loss_type, gamma = gamma) return y_pred_edges, loss
py
b41792f576a2e906f7944feb1e81553efe7525e9
import logging import multiprocessing as mp import time from crankycoin.models.block import Block from crankycoin.models.transaction import Transaction from crankycoin.models.enums import MessageType, TransactionType from crankycoin.services.queue import Queue from crankycoin import config, logger class Miner(object): HOST = config['user']['ip'] REWARD_ADDRESS = config['user']['public_key'] MAX_TRANSACTIONS_PER_BLOCK = config['network']['max_transactions_per_block'] miner_process = None def __init__(self, blockchain, mempool): mp.log_to_stderr() mp_logger = mp.get_logger() mp_logger.setLevel(logging.DEBUG) self.blockchain = blockchain self.mempool = mempool def start(self): logger.debug("mining process starting with reward address %s...", self.REWARD_ADDRESS) # run parallel self.miner_process = mp.Process(target=self.mine) self.miner_process.start() # run alone # self.mine() def shutdown(self): logger.debug("mining process with reward address %s shutting down...", self.REWARD_ADDRESS) self.miner_process.terminate() def mine(self): while True: block = self.mine_block() if not block: continue logger.info("Block {} found at height {} and nonce {}" .format(block.block_header.hash, block.height, block.block_header.nonce)) if self.blockchain.add_block(block): self.mempool.remove_unconfirmed_transactions(block.transactions[1:]) message = {"host": self.HOST, "type": MessageType.BLOCK_HEADER.value, "data": block.block_header.to_json()} Queue.enqueue(message) return def mine_block(self): latest_block = self.blockchain.get_tallest_block_header() if latest_block is not None: latest_block_header = latest_block[0] latest_block_height = latest_block[2] new_block_height = latest_block_height + 1 previous_hash = latest_block_header.hash else: new_block_height = 1 previous_hash = "" transactions = self.mempool.get_unconfirmed_transactions_chunk(self.MAX_TRANSACTIONS_PER_BLOCK) print("[mine_block] len(transactions):{}".format(len(transactions))) if transactions is None or len(transactions) == 0: fees = 0 else: fees = sum(t.fee for t in transactions) coinbase_prev_hash = "0" if new_block_height == 1 \ else self.blockchain.get_coinbase_hash_by_block_hash(previous_hash) # coinbase coinbase = Transaction( "0", self.REWARD_ADDRESS, self.blockchain.get_reward(new_block_height) + fees, 0, prev_hash=coinbase_prev_hash, tx_type=TransactionType.COINBASE.value, signature="" ) print("[mine_block] new_block_height: {}".format(new_block_height)) for t in range(len(transactions)): print("B: {} {}".format(t, transactions[t].prev_hash)) transactions[t].prev_hash = transactions[t].tx_hash print("A: {} {}".format(t, transactions[t].prev_hash)) transactions.insert(0, coinbase) timestamp = int(time.time()) i = 0 block = Block(new_block_height, transactions, previous_hash, timestamp) print("[mine_block] new_block_height: {}".format(new_block_height)) print("[mine_block] block.block_header.hash_difficulty: {}".format(block.block_header.hash_difficulty)) acc = self.blockchain.calculate_hash_difficulty() while block.block_header.hash_difficulty < acc: print("[mine_block] self.blockchain.calculate_hash_difficulty(): {}".format(acc)) latest_block = self.blockchain.get_tallest_block_header() if latest_block is not None: latest_block_header = latest_block[0] latest_block_height = latest_block[2] if latest_block_height >= new_block_height or latest_block_header.hash != previous_hash: # Next block in sequence was mined by another node. Stop mining current block. return None i += 1 block.block_header.nonce = i acc = self.blockchain.calculate_hash_difficulty() # print("[mine_block] block.block_header.nonce: {}".format(block.block_header.nonce)) return block
py
b417934b64fa1c745263fb32a89bce8fe9ce3941
from menu import Menu, MenuItem from coffee_maker import CoffeeMaker from money_machine import MoneyMachine money_machine = MoneyMachine() coffee_maker = CoffeeMaker() menu = Menu() is_on = True while is_on: options = menu.get_items() choice = input(f"What would you like? {options}: ") if choice == "off": is_on = False elif choice == "report": coffee_maker.report() money_machine.report() else: drink = menu.find_drink(choice) if coffee_maker.is_resource_sufficient(drink) and money_machine.make_payment(drink.cost): coffee_maker.make_coffee(drink)
py
b417944f9761cf7994abffc822eb9a003e8a57d4
from django.shortcuts import render from django.views.generic import ListView from pipetaxon.settings import VALID_RANKS from taxonomy.models import Taxonomy, Division, ALL_RANKS class Index(ListView): template_name = 'taxonomy/index.html' model = Taxonomy paginate_by = 19 def get_queryset(self): division = self.request.GET.get('division', None) rank = self.request.GET.get('rank', None) search_string = self.request.GET.get('search_string', None) filters = {} if division: filters['division'] = division if rank: filters['rank'] = rank if search_string: filters['name__contains'] = search_string queryset = Taxonomy.objects.filter(**filters) return queryset @property def filters(self): return {'rank': self.request.GET.get('rank', None), 'division': self.request.GET.get('division', None)} @property def division_list(self): return Division.objects.all().order_by('id') @property def rank_list(self): return VALID_RANKS if VALID_RANKS else ALL_RANKS def api(request): return render(request, 'taxonomy/api.html', {})
py
b41795100ba0a1beb001b1bbe362f673cfd3373e
from e2cnn.nn import GeometricTensor from e2cnn.nn import FieldType from e2cnn.gspaces import * from .equivariant_module import EquivariantModule from .branching_module import BranchingModule from .merge_module import MergeModule from typing import List, Tuple, Union, Any import torch import numpy as np __all__ = ["MultipleModule"] class MultipleModule(EquivariantModule): def __init__(self, in_type: FieldType, labels: List[str], modules: List[Tuple[EquivariantModule, Union[str, List[str]]]], reshuffle: int = 0 ): r""" Split the input tensor in multiple branches identified by the input ``labels`` and apply to each of them the corresponding module in ``modules`` A label is associated to each field in the input type, while ``modules`` assigns a module to apply to each label (or set of labels). ``modules`` should be a list of pairs, each containing an :class:`~e2cnn.nn.EquivariantModule` and a label (or a list of labels). During forward, fields are grouped by the labels and the input tensor is split accordingly. Then, each subtensor is passed to the corresponding module in ``modules``. If ``reshuffle`` is set to a positive integer, a copy of the input tensor is first built sorting the fields according to the value set: - 1: fields are sorted by their labels - 2: fields are sorted by their labels and, then, by their size - 3: fields are sorted by their labels, by their size and, then, by their type In this way, fields that need to be retrieved together are contiguous and it is possible to exploit slicing to split the tensor. By default, ``reshuffle = 0`` which means that no sorting is performed and, so, if input fields are not contiguous this layer will use indexing to retrieve sub-tensors. This modules wraps a :class:`~e2cnn.nn.BranchingModule` followed by a :class:`~e2cnn.nn.MergeModule`. Args: in_type (FieldType): the input field type labels (list): the list of labels to group the fields modules (list): list of modules to apply to the labeled fields reshuffle (int, optional): set how to reshuffle the input fields before splitting the tensor. By default (``0``) no reshuffling is done """ assert isinstance(in_type.gspace, GeneralOnR2) super(MultipleModule, self).__init__() self.gspace = in_type.gspace self.in_type = in_type all_labels = set(labels) modules_labels = [] for _, l in modules: if isinstance(l, list): modules_labels += l else: modules_labels.append(l) modules_labels = set(modules_labels) assert (modules_labels in all_labels) or (modules_labels == all_labels), "Error! Some labels assigned to the modules don't appear among the channels labels" # print(labels) reshuffle_level = int(reshuffle) self.branching = BranchingModule(in_type, labels, reshuffle=reshuffle_level) for module, l in modules: if isinstance(l, str): assert module.in_type == self.branching.out_type[l], f"Label {l}, branch class and module ({module}) class don't match:\n [{module.in_type}] \n [{self.branching.out_type[l]}]\n" else: for i, lb in enumerate(l): assert module.in_type[i] == self.branching.out_type[lb], f"Label {lb}, branch class and module ({module}) class [{i}] don't match: \n [{module.in_type[i]}] \n [{self.branching.out_type[lb]}]\n" self.merging = MergeModule(modules) self.out_type = self.merging.out_type def forward(self, input: GeometricTensor) -> GeometricTensor: r""" Split the input tensor according to the labels, apply each module to the corresponding input sub-tensors and stack the results. Args: input (GeometricTensor): the input GeometricTensor Returns: the concatenation of the output of each module """ assert input.type == self.in_type sub_tensors = self.branching(input) return self.merging(sub_tensors) def evaluate_output_shape(self, input_shape: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]: assert len(input_shape) == 4 assert input_shape[1] == self.in_type.size branches_shapes = self.branching.evaluate_output_shape(input_shape) out_shape = self.merging.evaluate_output_shape(branches_shapes) return out_shape def check_equivariance(self, atol: float = 2e-6, rtol: float = 1e-5, full_space_action: bool = True) -> List[Tuple[Any, float]]: if full_space_action: return super(MultipleModule, self).check_equivariance(atol=atol, rtol=rtol) else: c = self.in_type.size x = torch.randn(10, c, 9, 9) print(c, self.out_type.size) print([r.name for r in self.in_type.representations]) print([r.name for r in self.out_type.representations]) x = GeometricTensor(x, self.in_type) errors = [] for el in self.gspace.testing_elements: out1 = self(x).transform_fibers(el) out2 = self(x.transform_fibers(el)) errs = (out1.tensor - out2.tensor).detach().numpy() errs = np.abs(errs).reshape(-1) print(el, errs.max(), errs.mean(), errs.var()) if not torch.allclose(out1.tensor, out2.tensor, atol=atol, rtol=rtol): tmp = np.abs((out1.tensor - out2.tensor).detach().numpy()) tmp = tmp.reshape(out1.tensor.shape[0], out1.tensor.shape[1], -1).max(axis=2)#.mean(axis=0) np.set_printoptions(precision=2, threshold=200000000, suppress=True, linewidth=500) print(tmp.shape) print(tmp) assert torch.allclose(out1.tensor, out2.tensor, atol=atol, rtol=rtol), \ 'The error found during equivariance check with element "{}" is too high: max = {}, mean = {} var ={}' \ .format(el, errs.max(), errs.mean(), errs.var()) errors.append((el, errs.mean())) return errors
py
b417951cc3e5ae443940662455845cb5e610f47b
# Copyright (c) 2009-2022 The Regents of the University of Michigan. # Part of HOOMD-blue, released under the BSD 3-Clause License. """HPMC updaters. HPMC updaters work with the `hpmc.integrate.HPMCIntegrator` to apply changes to the system consistent with the particle shape and defined interaction energies. The `BoxMC`, `Clusters`, and `MuVT` updaters apply trial moves that enable enhanced sampling or the equilibration of different ensembles. `QuickCompress` helps prepare non-overlapping configurations of particles in a given box shape. """ from . import _hpmc from . import integrate from hoomd import _hoomd from hoomd.logging import log from hoomd.data.parameterdicts import TypeParameterDict, ParameterDict from hoomd.data.typeparam import TypeParameter import hoomd.data.typeconverter from hoomd.operation import Updater import hoomd class BoxMC(Updater): r"""Apply box updates to sample isobaric and related ensembles. Args: betaP (`float` or :py:mod:`hoomd.variant.Variant`): :math:`\frac{p}{k_{\mathrm{B}}T}` :math:`[\mathrm{length}^{-2}]` in 2D or :math:`[\mathrm{length}^{-3}]` in 3D. trigger (hoomd.trigger.Trigger): Select the timesteps to perform box trial moves. Use `BoxMC` in conjunction with an HPMC integrator to allow the simulation box to undergo random fluctuations at constant pressure, or random deformations at constant volume. `BoxMC` supports both isotropic and anisotropic volume change moves as well as shearing of the simulation box. A single `BoxMC` instance may apply multiple types of box moves during a simulation run. .. rubric:: Box move types By default, no moves are applied (the *weight* values for all move types default to 0). In a given timestep, the type of move is selected randomly with probability: .. math:: p = \frac{w_k}{\sum_k w_k} where :math:`w_k` is the weight of the move type. A given box move proposes a trial simulation box :math:`(L_x^t, L_y^t, L_z^t, xy^t, xz^t, yz^t)` as a change from the current box: :math:`(L_x, L_y, L_z, xy, xz, yz)`. The form of the change depends on the selected move type: * `volume` (``mode='standard'``): Change the volume (or area in 2D) of the simulation box while maining fixed aspect ratios :math:`Lx/Ly`, :math:`Lx/Lz`. In 3D: .. math:: V^t &= V + u \\ L_x^t &= \left( \frac{Lx}{Ly} \frac{Lx}{Lz} V^t \right)^{1/3} \\ L_y^t &= L_x^t \frac{Ly}{Lx} \\ L_z^t &= L_x^t \frac{Lz}{Lx} \\ xy^t &= xy \\ xz^t &= xz \\ yz^t &= yz \\ where :math:`u` is a random value uniformly distributed in the interval :math:`[-\delta_\mathrm{volume}, \delta_\mathrm{volume}]`. In 2D: .. math:: V^t &= V + u \\ L_x^t &= \left( \frac{Lx}{Ly} V^t \right)^{1/2} \\ L_y^t &= L_x^t \frac{Ly}{Lx} \\ xy^t &= xy \\ * `volume` (``mode='ln'``): Change the volume (or area in 2D) of the simulation box while maining fixed aspect ratios :math:`Lx/Ly`, :math:`Lx/Lz`. In 3D: .. math:: V^t &= V e^u \\ L_x^t &= \left( \frac{Lx}{Ly} \frac{Lx}{Lz} V^t \right)^{1/3} \\ L_y^t &= L_x^t \frac{Ly}{Lx} \\ L_z^t &= L_x^t \frac{Lz}{Lx} \\ xy^t &= xy \\ xz^t &= xz \\ yz^t &= yz \\ where :math:`u` is a random value uniformly distributed in the interval :math:`[-\delta_\mathrm{volume}, \delta_\mathrm{volume}]`. In 2D: .. math:: V^t &= V e^u \\ L_x^t &= \left( \frac{Lx}{Ly} V^t \right)^{1/2} \\ L_y^t &= L_x^t \frac{Ly}{Lx} \\ xy^t &= xy \\ * `aspect`: Change the aspect ratio of the simulation box while maintaining a fixed volume. In 3D: .. math:: L_k^t & = \begin{cases} L_k(1 + a) & u < 0.5 \\ L_k \frac{1}{1+a} & u \ge 0.5 \end{cases} \\ L_{m \ne k}^t & = L_m \sqrt{\frac{L_k}{L_k^t}} & xy^t &= xy \\ xz^t &= xz \\ yz^t &= yz \\ where :math:`u` is a random value uniformly distributed in the interval :math:`[0, 1]`, :math:`a` is a random value uniformly distributed in the interval :math:`[0, \delta_\mathrm{aspect}]` and :math:`k` is randomly chosen uniformly from the set :math:`\{x, y, z\}`. In 2D: .. math:: L_k^t & = \begin{cases} L_k(1 + a) & u < 0.5 \\ L_k \frac{1}{1+a} & u \ge 0.5 \end{cases} \\ L_{m \ne k}^t & = L_m \frac{L_k}{L_k^t} \\ xy^t &= xy \\ * `length`: Change the box lengths: .. math:: L_k^t = L_k + u where :math:`u` is a random value uniformly distributed in the interval :math:`[-\delta_{\mathrm{length},k}, -\delta_{\mathrm{length},k}]`, and :math:`k` is randomly chosen uniformly from the set :math:`\{a : a \in \{x, y, z\}, \delta_{\mathrm{length},a} \ne 0 \}`. * `shear`: Change the box shear parameters. In 3D: .. math:: (xy^t, xz^t, yz^t) = \begin{cases} \left(xy + s_{xy}, \enspace xz, \enspace yz \right) & u < \frac{1}{3} \\ \left( xy^t = xy, \enspace xz + s_{xz}, \enspace yz \right) & \frac{1}{3} \le u < \frac{2}{3} \\ \left( xy^t = xy, \enspace xz, \enspace yz + s_{yz} \right) & \frac{2}{3} \le u \le 1 \\ \end{cases} \\ where :math:`u` is a random value uniformly distributed in the interval :math:`[0, 1]` and :math:`s_k` is a random value uniformly distributed in the interval :math:`[-\delta_{\mathrm{shear},k}, \delta_{\mathrm{shear},k}]`. `BoxMC` attempts and records trial moves for shear parameters even when :math:`\delta_{\mathrm{shear},k}=0`. In 2D: .. math:: xy^t = xy + s_{xy} .. rubric:: Acceptance All particle particle positions are scaled into the trial box to form the trial configuration :math:`C^t`: .. math:: \vec{r}_i^t = s_x \vec{a}_1^t + s_y \vec{a}_2^t + s_z \vec{a}_3^t - \frac{\vec{a}_1^t + \vec{a}_2^t + \vec{a}_3^t}{2} where :math:`\vec{a}_k^t` are the new box vectors determined by :math:`(L_x^t, L_y^t, L_z^t, xy^t, xz^t, yz^t)` and the scale factors are determined by the current particle position :math:`\vec{r}_i` and the box vectors :math:`\vec{a}_k`: .. math:: \vec{r}_i = s_x \vec{a}_1 + s_y \vec{a}_2 + s_z \vec{a}_3 - \frac{\vec{a}_1 + \vec{a}_2 + \vec{a}_3}{2} The trial move is accepted with the probability: .. math:: p_\mathrm{accept} = \begin{cases} \exp(-(\beta \Delta H + \beta \Delta U)) & \beta \Delta H + \beta \Delta U > 0 \\ 1 & \beta \Delta H + \beta \Delta U \le 0 \\ \end{cases} where :math:`\Delta U = U^t - U` is the difference in potential energy, :math:`\beta \Delta H = \beta P (V^t - V) - N_\mathrm{particles} \cdot \ln(V^t / V)` for most move types. It is :math:`\beta P (V^t - V) - (N_\mathrm{particles}+1) \cdot \ln(V^t / V)` for ln volume moves. When the trial move is accepted, the system state is set to the the trial configuration. When it is not accepted, the move is rejected and the state is not modified. .. rubric:: Mixed precision `BoxMC` uses reduced precision floating point arithmetic when checking for particle overlaps in the local particle reference frame. Attributes: volume (dict): Parameters for isobaric volume moves that scale the box lengths uniformly. The dictionary has the following keys: * ``weight`` (float) - Relative weight of volume box moves. * ``mode`` (str) - ``standard`` proposes changes to the box volume and ``ln`` proposes changes to the logarithm of the volume. Initially starts off in 'standard' mode. * ``delta`` (float) - Maximum change in **V** or **ln(V)** where V is box area (2D) or volume (3D) :math:`\delta_\mathrm{volume}`. aspect (dict): Parameters for isovolume aspect ratio moves. The dictionary has the following keys: * ``weight`` (float) - Relative weight of aspect box moves. * ``delta`` (float) - Maximum relative change of box aspect ratio :math:`\delta_\mathrm{aspect} [\mathrm{dimensionless}]`. length (dict): Parameters for isobaric box length moves that change box lengths independently. The dictionary has the following keys: * ``weight`` (float) - Maximum change of HOOMD-blue box parameters Lx, Ly, and Lz. * ``delta`` (tuple[float, float, float]) - Maximum change of the box lengths :math:`(\delta_{\mathrm{length},x}, \delta_{\mathrm{length},y}, \delta_{\mathrm{length},z}) [\mathrm{length}]`. shear (dict): Parameters for isovolume box shear moves. The dictionary has the following keys: * ``weight`` (float) - Relative weight of shear box moves. * ``delta`` (tuple[float, float, float]) - maximum change of the box tilt factor :math:`(\delta_{\mathrm{shear},xy}, \delta_{\mathrm{shear},xz}, \delta_{\mathrm{shear},yz}) [\mathrm{dimensionless}]`. * ``reduce`` (float) - Maximum number of lattice vectors of shear to allow before applying lattice reduction. Values less than 0.5 disable shear reduction. instance (int): When using multiple `BoxMC` updaters in a single simulation, give each a unique value for `instance` so they generate different streams of random numbers. """ def __init__(self, trigger, betaP): super().__init__(trigger) _default_dict = dict(weight=0.0, delta=0.0) param_dict = ParameterDict( volume={ "mode": hoomd.data.typeconverter.OnlyFrom(['standard', 'ln']), **_default_dict }, aspect=_default_dict, length=dict(weight=0.0, delta=(0.0,) * 3), shear=dict(weight=0.0, delta=(0.0,) * 3, reduce=0.0), betaP=hoomd.variant.Variant, instance=int, ) self._param_dict.update(param_dict) self.volume["mode"] = "standard" self.betaP = betaP self.instance = 0 def _add(self, simulation): """Add the operation to a simulation. HPMC uses RNGs. Warn the user if they did not set the seed. """ if isinstance(simulation, hoomd.Simulation): simulation._warn_if_seed_unset() super()._add(simulation) def _attach(self): integrator = self._simulation.operations.integrator if not isinstance(integrator, integrate.HPMCIntegrator): raise RuntimeError("The integrator must be a HPMC integrator.") if not integrator._attached: raise RuntimeError("Integrator is not attached yet.") self._cpp_obj = _hpmc.UpdaterBoxMC(self._simulation.state._cpp_sys_def, integrator._cpp_obj, self.betaP) super()._attach() @property def counter(self): """Trial move counters. The counter object has the following attributes: * ``volume``: `tuple` [`int`, `int`] - Number of accepted and rejected volume and length moves. * ``shear``: `tuple` [`int`, `int`] - Number of accepted and rejected shear moves. * ``aspect``: `tuple` [`int`, `int`] - Number of accepted and rejected aspect moves. Note: The counts are reset to 0 at the start of each call to `hoomd.Simulation.run`. Before the first call to `Simulation.run`, `counter` is `None`. """ if not self._attached: return None else: return self._cpp_obj.getCounters(1) @log(category="sequence") def volume_moves(self): """tuple[int, int]: The accepted and rejected volume and length moves. (0, 0) before the first call to `Simulation.run`. """ counter = self.counter if counter is None: return (0, 0) else: if self.volume["mode"] == "standard": attr = "volume" else: attr = "ln_volume" return getattr(counter, attr) @log(category="sequence") def shear_moves(self): """tuple[int, int]: The accepted and rejected shear moves. (0, 0) before the first call to `Simulation.run`. """ counter = self.counter if counter is None: return (0, 0) else: return counter.shear @log(category="sequence") def aspect_moves(self): """tuple[int, int]: The accepted and rejected aspect moves. (0, 0) before the first call to `Simulation.run`. """ counter = self.counter if counter is None: return (0, 0) else: return counter.aspect class MuVT(Updater): r"""Insert and remove particles in the muVT ensemble. Args: trigger (int): Number of timesteps between grand canonical insertions transfer_types (list): List of type names that are being transferred from/to the reservoir or between boxes ngibbs (int): The number of partitions to use in Gibbs ensemble simulations (if == 1, perform grand canonical muVT) max_volume_rescale (float): maximum step size in ln(V) (applies to Gibbs ensemble) move_ratio (float): (if set) Set the ratio between volume and exchange/transfer moves (applies to Gibbs ensemble) The muVT (or grand-canonical) ensemble simulates a system at constant fugacity. Gibbs ensemble simulations are also supported, where particles and volume are swapped between two or more boxes. Every box correspond to one MPI partition, and can therefore run on multiple ranks. Use the ``ranks_per_partition`` argument of `hoomd.communicator.Communicator` to enable partitioned simulations. .. rubric:: Mixed precision `MuVT` uses reduced precision floating point arithmetic when checking for particle overlaps in the local particle reference frame. Note: Multiple Gibbs ensembles are also supported in a single parallel job, with the ``ngibbs`` option to update.muvt(), where the number of partitions can be a multiple of ``ngibbs``. Attributes: trigger (int): Select the timesteps on which to perform cluster moves. transfer_types (list): List of type names that are being transferred from/to the reservoir or between boxes max_volume_rescale (float): Maximum step size in ln(V) (applies to Gibbs ensemble) move_ratio (float): The ratio between volume and exchange/transfer moves (applies to Gibbs ensemble) ntrial (float): (**default**: 1) Number of configurational bias attempts to swap depletants """ def __init__(self, transfer_types, ngibbs=1, max_volume_rescale=0.1, volume_move_probability=0.5, trigger=1): super().__init__(trigger) self.ngibbs = int(ngibbs) _default_dict = dict(ntrial=1) param_dict = ParameterDict( transfer_types=list(transfer_types), max_volume_rescale=float(max_volume_rescale), volume_move_probability=float(volume_move_probability), **_default_dict) self._param_dict.update(param_dict) typeparam_fugacity = TypeParameter( 'fugacity', type_kind='particle_types', param_dict=TypeParameterDict(hoomd.variant.Variant, len_keys=1, _defaults=hoomd.variant.Constant(0.0))) self._extend_typeparam([typeparam_fugacity]) def _attach(self): integrator = self._simulation.operations.integrator if not isinstance(integrator, integrate.HPMCIntegrator): raise RuntimeError("The integrator must be a HPMC integrator.") cpp_cls_name = "UpdaterMuVT" cpp_cls_name += integrator.__class__.__name__ cpp_cls = getattr(_hpmc, cpp_cls_name) self._cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def, integrator._cpp_obj, self.ngibbs) super()._attach() @log(category='sequence', requires_run=True) def insert_moves(self): """tuple[int, int]: Count of the accepted and rejected paricle \ insertion moves. None when not attached """ counter = self._cpp_obj.getCounters(1) return counter.insert @log(category='sequence', requires_run=True) def remove_moves(self): """tuple[int, int]: Count of the accepted and rejected paricle removal \ moves. None when not attached """ counter = self._cpp_obj.getCounters(1) return counter.remove @log(category='sequence', requires_run=True) def exchange_moves(self): """tuple[int, int]: Count of the accepted and rejected paricle \ exchange moves. None when not attached """ counter = self._cpp_obj.getCounters(1) return counter.exchange @log(category='sequence', requires_run=True) def volume_moves(self): """tuple[int, int]: Count of the accepted and rejected paricle volume \ moves. None when not attached """ counter = self._cpp_obj.getCounters(1) return counter.volume @log(category='object') def N(self): # noqa: N802 - allow N as a function name """dict: Map of number of particles per type. None when not attached. """ N_dict = None if self._attached: N_dict = self._cpp_obj.N return N_dict class Clusters(Updater): """Apply geometric cluster algorithm (GCA) moves. Args: pivot_move_probability (float): Set the probability for attempting a pivot move. flip_probability (float): Set the probability for transforming an individual cluster. trigger (Trigger): Select the timesteps on which to perform cluster moves. The GCA as described in Liu and Lujten (2004), http://doi.org/10.1103/PhysRevLett.92.035504 is used for hard shape, patch interactions and depletants. Implicit depletants are supported and simulated on-the-fly, as if they were present in the actual system. Supported moves include pivot moves (point reflection) and line reflections (pi rotation around an axis). With anisotropic particles, the pivot move cannot be used because it would create a chiral mirror image of the particle, and only line reflections are employed. In general, line reflections are not rejection free because of periodic boundary conditions, as discussed in Sinkovits et al. (2012), http://doi.org/10.1063/1.3694271 . However, we restrict the line reflections to axes parallel to the box axis, which makes those moves rejection-free for anisotropic particles, but the algorithm is then no longer ergodic for those and needs to be combined with local moves. .. rubric:: Mixed precision `Clusters` uses reduced precision floating point arithmetic when checking for particle overlaps in the local particle reference frame. Attributes: pivot_move_probability (float): Set the probability for attempting a pivot move. flip_probability (float): Set the probability for transforming an individual cluster. trigger (Trigger): Select the timesteps on which to perform cluster moves. """ _remove_for_pickling = Updater._remove_for_pickling + ('_cpp_cell',) _skip_for_equality = Updater._skip_for_equality | {'_cpp_cell'} def __init__(self, pivot_move_probability=0.5, flip_probability=0.5, trigger=1): super().__init__(trigger) param_dict = ParameterDict( pivot_move_probability=float(pivot_move_probability), flip_probability=float(flip_probability)) self._param_dict.update(param_dict) self.instance = 0 def _add(self, simulation): """Add the operation to a simulation. HPMC uses RNGs. Warn the user if they did not set the seed. """ if isinstance(simulation, hoomd.Simulation): simulation._warn_if_seed_unset() super()._add(simulation) def _attach(self): integrator = self._simulation.operations.integrator if not isinstance(integrator, integrate.HPMCIntegrator): raise RuntimeError("The integrator must be a HPMC integrator.") cpp_cls_name = "UpdaterClusters" cpp_cls_name += integrator.__class__.__name__ cpp_cls = getattr(_hpmc, cpp_cls_name) use_gpu = (isinstance(self._simulation.device, hoomd.device.GPU) and (cpp_cls_name + 'GPU') in _hpmc.__dict__) if use_gpu: cpp_cls_name += "GPU" cpp_cls = getattr(_hpmc, cpp_cls_name) if not integrator._attached: raise RuntimeError("Integrator is not attached yet.") if use_gpu: sys_def = self._simulation.state._cpp_sys_def self._cpp_cell = _hoomd.CellListGPU(sys_def) self._cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def, integrator._cpp_obj, self._cpp_cell) else: self._cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def, integrator._cpp_obj) super()._attach() @log(requires_run=True) def avg_cluster_size(self): """float: the typical size of clusters. None when not attached. """ counter = self._cpp_obj.getCounters(1) return counter.average_cluster_size class QuickCompress(Updater): r"""Quickly compress a hard particle system to a target box. Args: trigger (Trigger): Update the box dimensions on triggered time steps. target_box (Box): Dimensions of the target box. max_overlaps_per_particle (float): The maximum number of overlaps to allow per particle (may be less than 1 - e.g. up to 250 overlaps would be allowed when in a system of 1000 particles when max_overlaps_per_particle=0.25). min_scale (float): The minimum scale factor to apply to box dimensions. Use `QuickCompress` in conjunction with an HPMC integrator to scale the system to a target box size. `QuickCompress` can typically compress dilute systems to near random close packing densities in tens of thousands of time steps. It operates by making small changes toward the `target_box`, but only when there are no particle overlaps in the current simulation state. In 3D: .. math:: L_x' &= \begin{cases} \max( L_x \cdot s, L_{\mathrm{target},x} ) & L_{\mathrm{target},x} < L_x \\ \min( L_x / s, L_{\mathrm{target},x} ) & L_{\mathrm{target},x} \ge L_x \end{cases} \\ L_y' &= \begin{cases} \max( L_y \cdot s, L_{\mathrm{target},y} ) & L_{\mathrm{target},y} < L_y \\ \min( L_y / s, L_{\mathrm{target},y} ) & L_{\mathrm{target},y} \ge L_y \end{cases} \\ L_z' &= \begin{cases} \max( L_z \cdot s, L_{\mathrm{target},z} ) & L_{\mathrm{target},z} < L_z \\ \min( L_z / s, L_{\mathrm{target},z} ) & L_{\mathrm{target},z} \ge L_z \end{cases} \\ xy' &= \begin{cases} \max( xy \cdot s, xy_\mathrm{target} ) & xy_\mathrm{target} < xy \\ \min( xy / s, xy_\mathrm{target} ) & xy_\mathrm{target} \ge xy \end{cases} \\ xz' &= \begin{cases} \max( xz \cdot s, xz_\mathrm{target} ) & xz_\mathrm{target} < xz \\ \min( xz / s, xz_\mathrm{target} ) & xz_\mathrm{target} \ge xz \end{cases} \\ yz' &= \begin{cases} \max( yz \cdot s, yz_\mathrm{target} ) & yz_\mathrm{target} < yz \\ \min( yz / s, yz_\mathrm{target} ) & yz_\mathrm{target} \ge yz \end{cases} \\ and in 2D: .. math:: L_x' &= \begin{cases} \max( L_x \cdot s, L_{\mathrm{target},x} ) & L_{\mathrm{target},x} < L_x \\ \min( L_x / s, L_{\mathrm{target},x} ) & L_{\mathrm{target},x} \ge L_x \end{cases} \\ L_y' &= \begin{cases} \max( L_y \cdot s, L_{\mathrm{target},y} ) & L_{\mathrm{target},y} < L_y \\ \min( L_y / s, L_{\mathrm{target},y} ) & L_{\mathrm{target},y} \ge L_y \end{cases} \\ L_z' &= L_z \\ xy' &= \begin{cases} \max( xy \cdot s, xy_\mathrm{target} ) & xy_\mathrm{target} < xy \\ \min( xy / s, xy_\mathrm{target} ) & xy_\mathrm{target} \ge xy \end{cases} \\ xz' &= xz \\ yz' &= yz \\ where the current simulation box is :math:`(L_x, L_y, L_z, xy, xz, yz)`, the target is :math:`(L_{\mathrm{target},x}, L_{\mathrm{target},y}, L_{\mathrm{target},z}, xy_\mathrm{target}, xz_\mathrm{target}, yz_\mathrm{target})`, the new simulation box set is :math:`(L_x', L_y', L_z', xy', xz', yz')` and :math:`s` is the scale factor chosen for this step (see below). `QuickCompress` scales particle coordinates (see `BoxMC` for details) when it sets a new box. When there are more than ``max_overlaps_per_particle * N_particles`` hard particle overlaps in the system in the new box, the box move is rejected. Otherwise, the small number of overlaps remain when the new box is set. `QuickCompress` then waits until `hoomd.hpmc.integrate.HPMCIntegrator` makes local MC trial moves that remove all overlaps. `QuickCompress` adjusts the value of :math:`s` based on the particle and translational trial move sizes to ensure that the trial moves will be able to remove the overlaps. It randomly chooses a value of :math:`s` uniformly distributed between ``max(min_scale, 1.0 - min_move_size / max_diameter)`` and 1.0 where ``min_move_size`` is the smallest MC translational move size adjusted by the acceptance ratio and ``max_diameter`` is the circumsphere diameter of the largest particle type. Tip: Use the `hoomd.hpmc.tune.MoveSize` in conjunction with `QuickCompress` to adjust the move sizes to maintain a constant acceptance ratio as the density of the system increases. Warning: When the smallest MC translational move size is 0, `QuickCompress` will scale the box by 1.0 and not progress toward the target box. Warning: Use `QuickCompress` *OR* `BoxMC`. Do not use both at the same time. .. rubric:: Mixed precision `QuickCompress` uses reduced precision floating point arithmetic when checking for particle overlaps in the local particle reference frame. Attributes: trigger (Trigger): Update the box dimensions on triggered time steps. target_box (Box): Dimensions of the target box. max_overlaps_per_particle (float): The maximum number of overlaps to allow per particle (may be less than 1 - e.g. up to 250 overlaps would be allowed when in a system of 1000 particles when max_overlaps_per_particle=0.25). min_scale (float): The minimum scale factor to apply to box dimensions. instance (int): When using multiple `QuickCompress` updaters in a single simulation, give each a unique value for `instance` so that they generate different streams of random numbers. """ def __init__(self, trigger, target_box, max_overlaps_per_particle=0.25, min_scale=0.99): super().__init__(trigger) param_dict = ParameterDict(max_overlaps_per_particle=float, min_scale=float, target_box=hoomd.Box, instance=int) param_dict['max_overlaps_per_particle'] = max_overlaps_per_particle param_dict['min_scale'] = min_scale param_dict['target_box'] = target_box self._param_dict.update(param_dict) self.instance = 0 def _add(self, simulation): """Add the operation to a simulation. HPMC uses RNGs. Warn the user if they did not set the seed. """ if isinstance(simulation, hoomd.Simulation): simulation._warn_if_seed_unset() super()._add(simulation) def _attach(self): integrator = self._simulation.operations.integrator if not isinstance(integrator, integrate.HPMCIntegrator): raise RuntimeError("The integrator must be a HPMC integrator.") if not integrator._attached: raise RuntimeError("Integrator is not attached yet.") self._cpp_obj = _hpmc.UpdaterQuickCompress( self._simulation.state._cpp_sys_def, integrator._cpp_obj, self.max_overlaps_per_particle, self.min_scale, self.target_box._cpp_obj) super()._attach() @property def complete(self): """True when the box has achieved the target.""" if not self._attached: return False return self._cpp_obj.isComplete()
py
b417961563b050291efc6fe6cd998189171db4d9
from fractions import Fraction numerator_a, denominator_a = map(int, input().split()) numerator_b, denominator_b = map(int, input().split()) numerator_result, denominator_result = Fraction( numerator=numerator_a * denominator_b + numerator_b * denominator_a, denominator=denominator_a * denominator_b, _normalize=True ).as_integer_ratio() print(numerator_result, denominator_result)
py
b41796289c0dd0a0d3bab85069e58157e985cfbb
from __future__ import division from __future__ import print_function from __future__ import absolute_import from __future__ import unicode_literals import os import sys import argparse import json import shutil from collections import defaultdict import numpy as np import pandas as pd from sklearn import linear_model, preprocessing, cluster, metrics, svm, model_selection import matplotlib.pyplot as plt import seaborn as sns import scipy.linalg as slin import scipy.sparse.linalg as sparselin import scipy.sparse as sparse import scipy.io as sio import IPython import data_utils as data import datasets import defenses import defense_testers import upper_bounds from upper_bounds import hinge_loss, hinge_grad ### Parameters verbose = True use_bias = True tol = 1e-5 num_iter_to_throw_out = 0 learning_rate = 0.1 print_interval = 1000 percentile = 70 dataset_num_iter_after_burnin = { 'imdb': 6000, #12000, 'enron': 8000, 'dogfish': 15000, 'mnist_17': 8000 } dataset_learning_rates = { 'imdb': 0.001, 'enron': 0.1, 'dogfish': 0.05, 'mnist_17': 0.1 } ### # By default, we generate upper and lower bounds for the given dataset, # assuming a fixed oracle sphere+slab defense without any integrity constraints. # If we pass in any of the boolean flags, # it switches instead to processing already-generated upper bounds # and attacks (lower bounds) to put them in the same format as the default # upper and lower bounds generated here, so that we can easily compare the results. # Load and make sure parameters match parser = argparse.ArgumentParser() parser.add_argument('dataset_name', help='One of: imdb, enron, dogfish, mnist_17') parser.add_argument('--slab', action='store_true', help='Data-dependent attack') parser.add_argument('--grad', action='store_true', help='Gradient-based attack baseline') parser.add_argument('--labelflip', action='store_true', help='Label flip attack baseline') parser.add_argument('--int', action='store_true', help='Integer-constrained attack') parser.add_argument('--percentile', type=float) args = parser.parse_args() if args.percentile is not None: percentile = args.percentile dataset_name = args.dataset_name process_slab = args.slab process_grad = args.grad process_labelflip = args.labelflip process_int = args.int assert (process_slab + process_grad + process_labelflip + process_int) <= 1 if process_slab + process_grad + process_labelflip + process_int == 0: no_process = True else: no_process = False X_train, Y_train, X_test, Y_test = datasets.load_dataset(dataset_name) assert dataset_name in datasets.DATASET_WEIGHT_DECAYS epsilons = datasets.DATASET_EPSILONS[dataset_name] norm_sq_constraint = datasets.DATASET_NORM_SQ_CONSTRAINTS[dataset_name] learning_rate = dataset_learning_rates[dataset_name] num_iter_after_burnin = dataset_num_iter_after_burnin[dataset_name] upper_total_losses = np.zeros_like(epsilons) upper_good_losses = np.zeros_like(epsilons) upper_bad_losses = np.zeros_like(epsilons) upper_good_acc = np.zeros_like(epsilons) upper_bad_acc = np.zeros_like(epsilons) upper_params_norm_sq = np.zeros_like(epsilons) lower_total_train_losses = np.zeros_like(epsilons) lower_avg_good_train_losses = np.zeros_like(epsilons) lower_avg_bad_train_losses = np.zeros_like(epsilons) lower_test_losses = np.zeros_like(epsilons) lower_overall_train_acc = np.zeros_like(epsilons) lower_good_train_acc = np.zeros_like(epsilons) lower_bad_train_acc = np.zeros_like(epsilons) lower_test_acc = np.zeros_like(epsilons) lower_params_norm_sq = np.zeros_like(epsilons) lower_weight_decays = np.zeros_like(epsilons) ### Initial training on clean data print('=== Training on clean data ===') # Special case for the imdb dataset: # We set the initial guess for the correct weight_decay # to avoid unnecessary computation, since it takes a bit of time # to binary search to find this if (no_process) and (dataset_name == 'imdb'): clean_weight_decay = 0.0102181799337 else if not no_process: standard_f = np.load(datasets.get_bounds_path(dataset_name, norm_sq_constraint)) clean_weight_decay = standard_f['lower_weight_decays'][0] assert np.all(epsilons == standard_f['epsilons']) else: clean_weight_decay = None train_loss, train_acc, test_loss, test_acc, \ params_norm_sq, weight_decay, orig_params, orig_bias = \ upper_bounds.svm_with_rho_squared( X_train, Y_train, X_test, Y_test, norm_sq_constraint, use_bias=use_bias, weight_decay=clean_weight_decay) if epsilons[0] == 0: upper_total_losses[0] = train_loss upper_good_losses[0] = train_loss upper_bad_losses[0] = 0 upper_good_acc[0] = train_acc upper_bad_acc[0] = 0 upper_params_norm_sq[0] = params_norm_sq lower_total_train_losses[0] = train_loss lower_avg_good_train_losses[0] = train_loss lower_avg_bad_train_losses[0] = 0 lower_test_losses[0] = test_loss lower_overall_train_acc[0] = train_acc lower_good_train_acc[0] = train_acc lower_bad_train_acc[0] = 0 lower_test_acc[0] = test_acc lower_params_norm_sq[0] = params_norm_sq lower_weight_decays[0] = weight_decay # Init guess lower_weight_decay = weight_decay if no_process: # If we want to add ignore_slab, here's the place to do it minimizer = upper_bounds.Minimizer() # This is a hack that's needed because we subsequently # do randomized rounding on the attack points, which # pushes stuff out of the feasible set, so we need to # set the percentile to be some conservative low amount if (dataset_name == 'imdb') and (percentile == 70): class_map, centroids, centroid_vec, sphere_radii, _ = data.get_data_params( X_train, Y_train, percentile=15) _, _, _, _, slab_radii = data.get_data_params( X_train, Y_train, percentile=65) else: class_map, centroids, centroid_vec, sphere_radii, slab_radii = data.get_data_params(X_train, Y_train, percentile=percentile) max_iter = num_iter_after_burnin + num_iter_to_throw_out needed_iter = int(np.round(np.max(epsilons) * X_train.shape[0]) + num_iter_to_throw_out) assert max_iter >= needed_iter, 'Not enough samples; increase max_iter to at least %s.' % needed_iter for epsilon_idx, epsilon in enumerate(epsilons): if epsilon == 0: continue print('=== epsilon = %s ===' % epsilon) # Generate our normal upper/lower bound if no_process: init_w = np.zeros_like(orig_params) init_b = 0 X_modified, Y_modified, idx_train, idx_poison = generate_upper_and_lower_bounds( X_train, Y_train, norm_sq_constraint, epsilon, max_iter, num_iter_to_throw_out, learning_rate, init_w, init_b, class_map, centroids, centroid_vec, sphere_radii, slab_radii, minimizer, verbose=verbose, print_interval=print_interval) elif process_slab: if dataset_name == 'dogfish': f = sio.loadmat(datasets.get_slab_mat_path(dataset_name, epsilon, percentile=50)) elif dataset_name == 'mnist_17': f = sio.loadmat(datasets.get_slab_mat_path(dataset_name, epsilon)) metadata_final = f['metadata_final'] int_upper_good_loss = metadata_final[0][0][0][0][0] int_upper_bad_loss = metadata_final[0][0][1][0][0] int_upper_good_acc = metadata_final[0][0][2][0][0] int_upper_bad_acc = None int_upper_norm_theta = metadata_final[0][0][3][0][0] int_upper_bias = metadata_final[0][0][4][0][0] assert f['epsilon'][0][0] == epsilon int_upper_params_norm_sq = int_upper_norm_theta ** 2 + int_upper_bias ** 2 int_upper_total_loss = int_upper_good_loss + epsilon * int_upper_bad_loss upper_total_losses[epsilon_idx] = int_upper_total_loss upper_good_losses[epsilon_idx] = int_upper_good_loss upper_bad_losses[epsilon_idx] = int_upper_bad_loss upper_good_acc[epsilon_idx] = int_upper_good_acc upper_bad_acc[epsilon_idx] = int_upper_bad_acc upper_params_norm_sq[epsilon_idx] = int_upper_params_norm_sq print('Final upper bound:') print(" Total loss (w/o reg) : %s" % int_upper_total_loss) if int_upper_params_norm_sq > norm_sq_constraint: print('*********************************************************') print('* WARNING: params_norm_sq (%s) > norm_sq_constraint (%s)' % (int_upper_params_norm_sq, norm_sq_constraint)) print('*********************************************************') X_poison = f['bestX'][0, ...] Y_poison = f['bestY'][0, ...].reshape(-1) X_modified, Y_modified, idx_train, idx_poison = process_matlab_train_test( f, X_train, Y_train, X_test, Y_train, X_poison, Y_poison) elif process_grad: # Upper bound is not valid for grad attack weight_decay = standard_f['lower_weight_decays'][epsilon_idx] print(standard_f['lower_weight_decays']) # This is super hacky # and a consequence of weight_decay changing slightly for mnist before we started running gradient descent... # Actual weight decays are # [ 0.05666385 0.03622478 0.04557456 0.06812952 0.08532804 0.08444606 0.08047717 0.07430335 0.06812952] # TODO: Fix after deadline if dataset_name == 'mnist_17': weight_decay = [None, 0.0347366333008, 0.0455780029297, 0.068100810051, 0.0852910876274, 0.0848503112793, 0.0804425477982, 0.0742716789246, 0.068100810051][epsilon_idx] # Actual weight decays are # [ 0.00100763 0.0091314 0.03627361 0.08111179 0.10680558 0.11184358 0.10075999 0.09471439 0.08967639] elif dataset_name == 'dogfish': weight_decay = [None, 0.00815894421645, 0.0363878186312, 0.0813030943666, 0.106897273836, 0.105893580523, 0.100875113961, 0.0943511074294, 0.0888307942105][epsilon_idx] X_modified, Y_modified, X_test2, Y_test2, idx_train, idx_poison = datasets.load_attack_npz( dataset_name, datasets.get_grad_attack_wd_npz_path(dataset_name, epsilon, weight_decay), take_path=True) assert np.all(np.isclose(X_test2, X_test)) assert np.all(Y_test2 == Y_test) assert np.all(np.isclose(X_modified[idx_train, :], X_train)) assert np.all(Y_train == Y_modified[idx_train]) elif process_labelflip: X_modified, Y_modified, X_test2, Y_test2, idx_train, idx_poison = datasets.load_attack_npz( dataset_name, datasets.get_labelflip_attack_npz_filename(dataset_name, epsilon, norm_sq_constraint=None)) datasets.check_poisoned_data(X_train, Y_train, X_modified[idx_poison, :], Y_modified[idx_poison], X_modified, Y_modified) else: # Upper bound is not valid for feasible attack # int_upper_good_loss = metadata_final[0][0][0][0][0] # int_upper_bad_loss = metadata_final[0][0][1][0][0] # int_upper_good_acc = metadata_final[0][0][2][0][0] # int_upper_bad_acc = metadata_final[0][0][3][0][0] # int_upper_norm_theta = metadata_final[0][0][4][0][0] # int_upper_bias = metadata_final[0][0][5][0][0] # HARDCODE WARNING if dataset_name == 'imdb': X_modified, Y_modified, X_test2, Y_test2, idx_train, idx_poison = datasets.load_attack_npz( dataset_name, datasets.get_int_attack_npz_filename(dataset_name, epsilon, norm_sq_constraint, percentile=15.0)) assert (X_test2 - X_test).nnz == 0 assert np.all(Y_test2 == Y_test) assert (X_modified[idx_train, :] - X_train).nnz == 0 assert np.all(Y_train == Y_modified[idx_train]) elif dataset_name == 'enron': f = sio.loadmat(datasets.get_int_mat_path(dataset_name, epsilon)) assert f['epsilon'][0][0] == epsilon if sparse.issparse(f['X_train']): assert np.all(f['X_train'].toarray() == X_train) assert np.all(f['X_test'].toarray() == X_test) else: assert np.all(f['X_train'] == X_train) assert np.all(f['X_test'] == X_test) assert np.all(f['y_train'].reshape(-1) == Y_train) assert np.all(f['y_test'].reshape(-1) == Y_test) X_poison = f['X_pert'] # This is not stored as a sparse matrix Y_poison = f['y_pert'].reshape(-1) X_modified, Y_modified, idx_train, idx_poison = process_matlab_train_test( f, X_train, Y_train, X_test, Y_train, X_poison, Y_poison) else: raise ValueError, 'invalid dataset' total_train_loss, avg_good_train_loss, avg_bad_train_loss, test_loss, \ overall_train_acc, good_train_acc, bad_train_acc, test_acc, \ params_norm_sq, lower_weight_decay = upper_bounds.evaluate_attack( X_modified, Y_modified, X_test, Y_test, idx_train, idx_poison, epsilon, lower_weight_decay, norm_sq_constraint, use_bias) lower_total_train_losses[epsilon_idx] = total_train_loss lower_avg_good_train_losses[epsilon_idx] = avg_good_train_loss lower_avg_bad_train_losses[epsilon_idx] = avg_bad_train_loss lower_test_losses[epsilon_idx] = test_loss lower_overall_train_acc[epsilon_idx] = overall_train_acc lower_good_train_acc[epsilon_idx] = good_train_acc lower_bad_train_acc[epsilon_idx] = bad_train_acc lower_test_acc[epsilon_idx] = test_acc lower_params_norm_sq[epsilon_idx] = params_norm_sq lower_weight_decays[epsilon_idx] = lower_weight_decay print('** WARNING: Only looking at top one...') # if process_slab: # for k in range(1, f['bestX'].shape[0]): # X_poison = f['bestX'][k, ...] # Y_poison = f['bestY'][k, ...].reshape(-1) # X_modified = np.concatenate((X_train, X_poison), axis=0) # Y_modified = np.concatenate((Y_train, Y_poison), axis=0) # idx_train = slice(0, X_train.shape[0]) # idx_poison = slice(X_train.shape[0], X_modified.shape[0]) # total_train_loss, avg_good_train_loss, avg_bad_train_loss, test_loss, \ # overall_train_acc, good_train_acc, bad_train_acc, test_acc, \ # params_norm_sq, lower_weight_decay = upper_bounds.evaluate_attack( # X_modified, Y_modified, # X_test, Y_test, # idx_train, idx_poison, # epsilon, # lower_weight_decay, # norm_sq_constraint, # use_bias) # if lower_avg_good_train_losses[epsilon_idx] < avg_good_train_loss: # lower_total_train_losses[epsilon_idx] = total_train_loss # lower_avg_good_train_losses[epsilon_idx] = avg_good_train_loss # lower_avg_bad_train_losses[epsilon_idx] = avg_bad_train_loss # lower_test_losses[epsilon_idx] = test_loss # lower_overall_train_acc[epsilon_idx] = overall_train_acc # lower_good_train_acc[epsilon_idx] = good_train_acc # lower_bad_train_acc[epsilon_idx] = bad_train_acc # lower_test_acc[epsilon_idx] = test_acc # lower_params_norm_sq[epsilon_idx] = params_norm_sq # lower_weight_decays[epsilon_idx] = lower_weight_decay attack_save_path = None if dataset_name in ['dogfish', 'mnist_17', 'enron']: if no_process: attack_save_path = datasets.get_attack_npz_path(dataset_name, epsilon, norm_sq_constraint, percentile) elif process_slab: attack_save_path = datasets.get_slab_attack_npz_path(dataset_name, epsilon, norm_sq_constraint) elif process_grad: attack_save_path = datasets.get_grad_attack_npz_path(dataset_name, epsilon, norm_sq_constraint) elif process_labelflip: attack_save_path = datasets.get_labelflip_attack_npz_path(dataset_name, epsilon, norm_sq_constraint) elif process_int: attack_save_path = datasets.get_int_attack_npz_path(dataset_name, epsilon, norm_sq_constraint) # We generate the imdb data without integrity constraints # and then do the randomized rounding after # so we need a separate call to this script with the --int flag # to fully process its results. # To save space, we don't save it to disk if it's processing slab/grad/etc. elif (dataset_name in ['imdb']) and (no_process): X_poison_sparse = sparse.csr_matrix(data.rround(data.threshold(X_modified[idx_poison, :]))) X_modified = sparse.vstack((X_train, X_poison_sparse)) attack_save_path = datasets.get_int_attack_npz_path(dataset_name, epsilon, norm_sq_constraint, percentile) if attack_save_path is not None: np.savez( attack_save_path, X_modified=X_modified, Y_modified=Y_modified, X_test=X_test, Y_test=Y_test, idx_train=idx_train, idx_poison=idx_poison ) if no_process: bounds_save_path = datasets.get_bounds_path(dataset_name, norm_sq_constraint, percentile) elif process_slab: bounds_save_path = datasets.get_slab_bounds_path(dataset_name, norm_sq_constraint) elif process_grad: bounds_save_path = datasets.get_grad_bounds_path(dataset_name, norm_sq_constraint) elif process_labelflip: bounds_save_path = datasets.get_labelflip_bounds_path(dataset_name, norm_sq_constraint) elif process_int: bounds_save_path = datasets.get_int_bounds_path(dataset_name, norm_sq_constraint) np.savez( bounds_save_path, percentile=percentile, weight_decay=weight_decay, epsilons=epsilons, upper_total_losses=upper_total_losses, upper_good_losses=upper_good_losses, upper_bad_losses=upper_bad_losses, upper_good_acc=upper_good_acc, upper_bad_acc=upper_bad_acc, upper_params_norm_sq=upper_params_norm_sq, lower_total_train_losses=lower_total_train_losses, lower_avg_good_train_losses=lower_avg_good_train_losses, lower_avg_bad_train_losses=lower_avg_bad_train_losses, lower_test_losses=lower_test_losses, lower_overall_train_acc=lower_overall_train_acc, lower_good_train_acc=lower_good_train_acc, lower_bad_train_acc=lower_bad_train_acc, lower_test_acc=lower_test_acc, lower_params_norm_sq=lower_params_norm_sq, lower_weight_decays=lower_weight_decays )
py
b41796c425a0963f8f615c5bfb0a17817970c8aa
import requests from agavepy.util import clients_url, random_client_name __all__ = ['ClientCommands'] class ClientCommands(object): def clients_create(self, client_name=None, description=None, tenant_url=None, username=None, password=None, verify_ssl=None, quiet=False): """ Create an Oauth client Make a request to the API to create an Oauth client. Returns the client API key and secret as a tuple. KEYWORD ARGUMENTS ----------------- client_name: string Name for Oauth2 client. description: string Description of the Oauth2 client tenant_url: string URL of the API tenant to interact with username: string The user's username. password: string The user's password verify_ssl: bool Whether to verify SSL connections RETURNS ------- api_key: string api_secret: string """ # Set request endpoint. if tenant_url is None: tenant_url = getattr(self, 'api_server') endpoint = clients_url(tenant_url) # User credentials if username is None: username = getattr(self, 'username') if password is None: password = getattr(self, 'password') if verify_ssl is None: verify_ssl = getattr(self, 'verify', True) # Make sure client_name is not empty if client_name == '' or client_name is None: client_name = random_client_name(words=2, hostname=True) # Make request. try: data = { 'clientName': client_name, 'description': description, 'tier': 'Unlimited', 'callbackUrl': '', } response = requests.post(endpoint, data=data, auth=(username, password), verify=verify_ssl) del password except Exception: del password raise # Parse the request's response and return api key and secret. result = response.json().get('result', {}) api_key = result.get('consumerKey') api_secret = result.get('consumerSecret') if api_key == '' or api_secret == '': raise requests.exceptions.HTTPError( 'Failed to create client {0}'.format(client_name)) return { 'api_key': api_key, 'api_secret': api_secret, 'client_name': client_name }
py
b4179744bbee1627beac27444e74b638fcfaeeb4
#!python from linkedlist import LinkedList class HashTable(object): def __init__(self, init_size=8): """Initialize this hash table with the given initial size.""" self.buckets = [LinkedList() for i in range(init_size)] self.size = 0 # Number of key-value entries def __str__(self): """Return a formatted string representation of this hash table.""" items = ['{!r}: {!r}'.format(key, val) for key, val in self.items()] return '{' + ', '.join(items) + '}' def __repr__(self): """Return a string representation of this hash table.""" return 'HashTable({!r})'.format(self.items()) def _bucket_index(self, key): """Return the bucket index where the given key would be stored.""" return hash(key) % len(self.buckets) def load_factor(self): """Return the load factor, the ratio of number of entries to buckets. Best and worst case running time: ??? under what conditions? [TODO]""" # TODO: Calculate load factor return self.size / len(self.buckets) # return ... def keys(self): """Return a list of all keys in this hash table. Best and worst case running time: ??? under what conditions? [TODO]""" # Collect all keys in each of the buckets all_keys = [] for bucket in self.buckets: for key, value in bucket.items(): all_keys.append(key) return all_keys def values(self): """Return a list of all values in this hash table. Best and worst case running time: ??? under what conditions? [TODO]""" # Collect all values in each of the buckets all_values = [] for bucket in self.buckets: for key, value in bucket.items(): all_values.append(value) return all_values def items(self): """Return a list of all entries (key-value pairs) in this hash table. Best and worst case running time: ??? under what conditions? [TODO]""" # Collect all pairs of key-value entries in each of the buckets all_items = [] for bucket in self.buckets: all_items.extend(bucket.items()) return all_items def length(self): """Return the number of key-value entries by traversing its buckets. Best and worst case running time: ??? under what conditions? [TODO]""" # Count number of key-value entries in each of the buckets item_count = 0 for bucket in self.buckets: item_count += bucket.length() return item_count # Equivalent to this list comprehension: return sum(bucket.length() for bucket in self.buckets) def contains(self, key): """Return True if this hash table contains the given key, or False. Best case running time: ??? under what conditions? [TODO] Worst case running time: ??? under what conditions? [TODO]""" # Find the bucket the given key belongs in index = self._bucket_index(key) bucket = self.buckets[index] # Check if an entry with the given key exists in that bucket entry = bucket.find(lambda key_value: key_value[0] == key) return entry is not None # True or False def get(self, key): """Return the value associated with the given key, or raise KeyError. Best case running time: ??? under what conditions? [TODO] Worst case running time: ??? under what conditions? [TODO]""" # Find the bucket the given key belongs in index = self._bucket_index(key) bucket = self.buckets[index] # Find the entry with the given key in that bucket, if one exists entry = bucket.find(lambda key_value: key_value[0] == key) if entry is not None: # Found # Return the given key's associated value assert isinstance(entry, tuple) assert len(entry) == 2 return entry[1] else: # Not found raise KeyError('Key not found: {}'.format(key)) def set(self, key, value): """Insert or update the given key with its associated value. Best case running time: ??? under what conditions? [TODO] Worst case running time: ??? under what conditions? [TODO]""" # Find the bucket the given key belongs in index = self._bucket_index(key) bucket = self.buckets[index] # Find the entry with the given key in that bucket, if one exists # Check if an entry with the given key exists in that bucket entry = bucket.find(lambda key_value: key_value[0] == key) if entry is not None: # Found # In this case, the given key's value is being updated # Remove the old key-value entry from the bucket first bucket.delete(entry) self.size -= 1 # Insert the new key-value entry into the bucket in either case bucket.append((key, value)) self.size += 1 if self.load_factor() > 0.75: self._resize() def delete(self, key): """Delete the given key and its associated value, or raise KeyError. Best case running time: ??? under what conditions? [TODO] Worst case running time: ??? under what conditions? [TODO]""" # Find the bucket the given key belongs in index = self._bucket_index(key) bucket = self.buckets[index] # Find the entry with the given key in that bucket, if one exists entry = bucket.find(lambda key_value: key_value[0] == key) if entry is not None: # Found # Remove the key-value entry from the bucket bucket.delete(entry) else: # Not found raise KeyError('Key not found: {}'.format(key)) def _resize(self, new_size=None): """Resize this hash table's buckets and rehash all key-value entries. Should be called automatically when load factor exceeds a threshold such as 0.75 after an insertion (when set is called with a new key). Best and worst case running time: ??? under what conditions? [TODO] Best and worst case space usage: ??? what uses this memory? [TODO]""" # If unspecified, choose new size dynamically based on current size if new_size is None: new_size = len(self.buckets) * 2 # Double size # Option to reduce size if buckets are sparsely filled (low load factor) elif new_size is 0: new_size = len(self.buckets) / 2 # Half size # TODO: Get a list to temporarily hold all current key-value entries # ... copy_of_table = self.items() # TODO: Create a new list of new_size total empty linked list buckets # ... self.buckets = [LinkedList() for i in range(new_size)] self.size = 0 # TODO: Insert each key-value entry into the new list of buckets, # which will rehash them into a new bucket index based on the new size # ... for key, value in copy_of_table: self.set(key, value) def test_hash_table(): ht = HashTable(4) print('HashTable: ' + str(ht)) print('Setting entries:') ht.set('I', 1) print('set(I, 1): ' + str(ht)) ht.set('V', 5) print('set(V, 5): ' + str(ht)) print('size: ' + str(ht.size)) print('length: ' + str(ht.length())) print('buckets: ' + str(len(ht.buckets))) print('load_factor: ' + str(ht.load_factor())) ht.set('X', 10) print('set(X, 10): ' + str(ht)) ht.set('L', 50) # Should trigger resize print('set(L, 50): ' + str(ht)) print('size: ' + str(ht.size)) print('length: ' + str(ht.length())) print('buckets: ' + str(len(ht.buckets))) print('load_factor: ' + str(ht.load_factor())) print('Getting entries:') print('get(I): ' + str(ht.get('I'))) print('get(V): ' + str(ht.get('V'))) print('get(X): ' + str(ht.get('X'))) print('get(L): ' + str(ht.get('L'))) print('contains(X): ' + str(ht.contains('X'))) print('contains(Z): ' + str(ht.contains('Z'))) print('Deleting entries:') ht.delete('I') print('delete(I): ' + str(ht)) ht.delete('V') print('delete(V): ' + str(ht)) ht.delete('X') print('delete(X): ' + str(ht)) ht.delete('L') print('delete(L): ' + str(ht)) print('contains(X): ' + str(ht.contains('X'))) print('size: ' + str(ht.size)) print('length: ' + str(ht.length())) print('buckets: ' + str(len(ht.buckets))) print('load_factor: ' + str(ht.load_factor())) if __name__ == '__main__': test_hash_table()
py
b417978aa76fcd4395673a678988088bdf54ae62
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import functools import re from typing import Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.datafusion_v1beta1.services.data_fusion import pagers from google.cloud.datafusion_v1beta1.types import v1beta1 from google.protobuf import empty_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from .transports.base import DataFusionTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import DataFusionGrpcAsyncIOTransport from .client import DataFusionClient class DataFusionAsyncClient: """Service for creating and managing Data Fusion instances. Data Fusion enables ETL developers to build code-free, data integration pipelines via a point-and-click UI. """ _client: DataFusionClient DEFAULT_ENDPOINT = DataFusionClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = DataFusionClient.DEFAULT_MTLS_ENDPOINT instance_path = staticmethod(DataFusionClient.instance_path) parse_instance_path = staticmethod(DataFusionClient.parse_instance_path) namespace_path = staticmethod(DataFusionClient.namespace_path) parse_namespace_path = staticmethod(DataFusionClient.parse_namespace_path) common_billing_account_path = staticmethod(DataFusionClient.common_billing_account_path) parse_common_billing_account_path = staticmethod(DataFusionClient.parse_common_billing_account_path) common_folder_path = staticmethod(DataFusionClient.common_folder_path) parse_common_folder_path = staticmethod(DataFusionClient.parse_common_folder_path) common_organization_path = staticmethod(DataFusionClient.common_organization_path) parse_common_organization_path = staticmethod(DataFusionClient.parse_common_organization_path) common_project_path = staticmethod(DataFusionClient.common_project_path) parse_common_project_path = staticmethod(DataFusionClient.parse_common_project_path) common_location_path = staticmethod(DataFusionClient.common_location_path) parse_common_location_path = staticmethod(DataFusionClient.parse_common_location_path) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: DataFusionAsyncClient: The constructed client. """ return DataFusionClient.from_service_account_info.__func__(DataFusionAsyncClient, info, *args, **kwargs) # type: ignore @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: DataFusionAsyncClient: The constructed client. """ return DataFusionClient.from_service_account_file.__func__(DataFusionAsyncClient, filename, *args, **kwargs) # type: ignore from_service_account_json = from_service_account_file @property def transport(self) -> DataFusionTransport: """Returns the transport used by the client instance. Returns: DataFusionTransport: The transport used by the client instance. """ return self._client.transport get_transport_class = functools.partial(type(DataFusionClient).get_transport_class, type(DataFusionClient)) def __init__(self, *, credentials: ga_credentials.Credentials = None, transport: Union[str, DataFusionTransport] = "grpc_asyncio", client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the data fusion client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.DataFusionTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. """ self._client = DataFusionClient( credentials=credentials, transport=transport, client_options=client_options, client_info=client_info, ) async def list_available_versions(self, request: v1beta1.ListAvailableVersionsRequest = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListAvailableVersionsAsyncPager: r"""Lists possible versions for Data Fusion instances in the specified project and location. Args: request (:class:`google.cloud.datafusion_v1beta1.types.ListAvailableVersionsRequest`): The request object. Request message for the list available versions request. parent (:class:`str`): Required. The project and location for which to retrieve instance information in the format projects/{project}/locations/{location}. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.datafusion_v1beta1.services.data_fusion.pagers.ListAvailableVersionsAsyncPager: Response message for the list available versions request. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError("If the `request` argument is set, then none of " "the individual field arguments should be set.") request = v1beta1.ListAvailableVersionsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_available_versions, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ("parent", request.parent), )), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.ListAvailableVersionsAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response async def list_instances(self, request: v1beta1.ListInstancesRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListInstancesAsyncPager: r"""Lists Data Fusion instances in the specified project and location. Args: request (:class:`google.cloud.datafusion_v1beta1.types.ListInstancesRequest`): The request object. Request message for listing Data Fusion instances. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.datafusion_v1beta1.services.data_fusion.pagers.ListInstancesAsyncPager: Response message for the list instance request. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. request = v1beta1.ListInstancesRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_instances, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ("parent", request.parent), )), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.ListInstancesAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response async def get_instance(self, request: v1beta1.GetInstanceRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> v1beta1.Instance: r"""Gets details of a single Data Fusion instance. Args: request (:class:`google.cloud.datafusion_v1beta1.types.GetInstanceRequest`): The request object. Request message for getting details about a Data Fusion instance. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.datafusion_v1beta1.types.Instance: Represents a Data Fusion instance. """ # Create or coerce a protobuf request object. request = v1beta1.GetInstanceRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_instance, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ("name", request.name), )), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response async def create_instance(self, request: v1beta1.CreateInstanceRequest = None, *, parent: str = None, instance: v1beta1.Instance = None, instance_id: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Creates a new Data Fusion instance in the specified project and location. Args: request (:class:`google.cloud.datafusion_v1beta1.types.CreateInstanceRequest`): The request object. Request message for creating a Data Fusion instance. parent (:class:`str`): The instance's project and location in the format projects/{project}/locations/{location}. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. instance (:class:`google.cloud.datafusion_v1beta1.types.Instance`): An instance resource. This corresponds to the ``instance`` field on the ``request`` instance; if ``request`` is provided, this should not be set. instance_id (:class:`str`): The name of the instance to create. This corresponds to the ``instance_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.datafusion_v1beta1.types.Instance` Represents a Data Fusion instance. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, instance, instance_id]) if request is not None and has_flattened_params: raise ValueError("If the `request` argument is set, then none of " "the individual field arguments should be set.") request = v1beta1.CreateInstanceRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if instance is not None: request.instance = instance if instance_id is not None: request.instance_id = instance_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_instance, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ("parent", request.parent), )), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Wrap the response in an operation future. response = operation_async.from_gapic( response, self._client._transport.operations_client, v1beta1.Instance, metadata_type=v1beta1.OperationMetadata, ) # Done; return the response. return response async def delete_instance(self, request: v1beta1.DeleteInstanceRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Deletes a single Data Fusion instance. Args: request (:class:`google.cloud.datafusion_v1beta1.types.DeleteInstanceRequest`): The request object. Request message for deleting a Data Fusion instance. name (:class:`str`): The instance resource name in the format projects/{project}/locations/{location}/instances/{instance} This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError("If the `request` argument is set, then none of " "the individual field arguments should be set.") request = v1beta1.DeleteInstanceRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_instance, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ("name", request.name), )), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Wrap the response in an operation future. response = operation_async.from_gapic( response, self._client._transport.operations_client, empty_pb2.Empty, metadata_type=v1beta1.OperationMetadata, ) # Done; return the response. return response async def update_instance(self, request: v1beta1.UpdateInstanceRequest = None, *, instance: v1beta1.Instance = None, update_mask: field_mask_pb2.FieldMask = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Updates a single Data Fusion instance. Args: request (:class:`google.cloud.datafusion_v1beta1.types.UpdateInstanceRequest`): The request object. Request message for updating a Data Fusion instance. Data Fusion only allows updating the labels, options, and stack driver settings. instance (:class:`google.cloud.datafusion_v1beta1.types.Instance`): The instance resource that replaces the resource on the server. Currently, Data Fusion only allows replacing labels, options, and stack driver settings. All other fields will be ignored. This corresponds to the ``instance`` field on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): Field mask is used to specify the fields that the update will overwrite in an instance resource. The fields specified in the update_mask are relative to the resource, not the full request. A field will be overwritten if it is in the mask. If the user does not provide a mask, all the supported fields (labels and options currently) will be overwritten. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.datafusion_v1beta1.types.Instance` Represents a Data Fusion instance. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([instance, update_mask]) if request is not None and has_flattened_params: raise ValueError("If the `request` argument is set, then none of " "the individual field arguments should be set.") request = v1beta1.UpdateInstanceRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if instance is not None: request.instance = instance if update_mask is not None: request.update_mask = update_mask # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_instance, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ("instance.name", request.instance.name), )), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Wrap the response in an operation future. response = operation_async.from_gapic( response, self._client._transport.operations_client, v1beta1.Instance, metadata_type=v1beta1.OperationMetadata, ) # Done; return the response. return response async def restart_instance(self, request: v1beta1.RestartInstanceRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Restart a single Data Fusion instance. At the end of an operation instance is fully restarted. Args: request (:class:`google.cloud.datafusion_v1beta1.types.RestartInstanceRequest`): The request object. Request message for restarting a Data Fusion instance. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.datafusion_v1beta1.types.Instance` Represents a Data Fusion instance. """ # Create or coerce a protobuf request object. request = v1beta1.RestartInstanceRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.restart_instance, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ("name", request.name), )), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Wrap the response in an operation future. response = operation_async.from_gapic( response, self._client._transport.operations_client, v1beta1.Instance, metadata_type=v1beta1.OperationMetadata, ) # Done; return the response. return response async def upgrade_instance(self, request: v1beta1.UpgradeInstanceRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Upgrade a single Data Fusion instance. At the end of an operation instance is fully upgraded. Args: request (:class:`google.cloud.datafusion_v1beta1.types.UpgradeInstanceRequest`): The request object. Request message for upgrading a Data Fusion instance. To change the instance properties, instance update should be used. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.datafusion_v1beta1.types.Instance` Represents a Data Fusion instance. """ # Create or coerce a protobuf request object. request = v1beta1.UpgradeInstanceRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.upgrade_instance, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ("name", request.name), )), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Wrap the response in an operation future. response = operation_async.from_gapic( response, self._client._transport.operations_client, v1beta1.Instance, metadata_type=v1beta1.OperationMetadata, ) # Done; return the response. return response async def remove_iam_policy(self, request: v1beta1.RemoveIamPolicyRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> v1beta1.RemoveIamPolicyResponse: r"""Remove IAM policy that is currently set on the given resource. Args: request (:class:`google.cloud.datafusion_v1beta1.types.RemoveIamPolicyRequest`): The request object. Request message for RemoveIamPolicy method. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.datafusion_v1beta1.types.RemoveIamPolicyResponse: Response message for RemoveIamPolicy method. """ # Create or coerce a protobuf request object. request = v1beta1.RemoveIamPolicyRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.remove_iam_policy, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ("resource", request.resource), )), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response async def list_namespaces(self, request: v1beta1.ListNamespacesRequest = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListNamespacesAsyncPager: r"""List namespaces in a given instance Args: request (:class:`google.cloud.datafusion_v1beta1.types.ListNamespacesRequest`): The request object. List namespaces request. parent (:class:`str`): Required. The instance to list its namespaces. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.datafusion_v1beta1.services.data_fusion.pagers.ListNamespacesAsyncPager: List namespaces response. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError("If the `request` argument is set, then none of " "the individual field arguments should be set.") request = v1beta1.ListNamespacesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_namespaces, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ("parent", request.parent), )), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.ListNamespacesAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response async def add_dns_peering(self, request: v1beta1.AddDnsPeeringRequest = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> v1beta1.AddDnsPeeringResponse: r"""Add DNS peering on the given resource. Args: request (:class:`google.cloud.datafusion_v1beta1.types.AddDnsPeeringRequest`): The request object. Request message to create dns peering. parent (:class:`str`): The resource on which DNS peering will be created. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.datafusion_v1beta1.types.AddDnsPeeringResponse: Response message for set dns peering method. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError("If the `request` argument is set, then none of " "the individual field arguments should be set.") request = v1beta1.AddDnsPeeringRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.add_dns_peering, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ("parent", request.parent), )), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response async def remove_dns_peering(self, request: v1beta1.RemoveDnsPeeringRequest = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> v1beta1.RemoveDnsPeeringResponse: r"""Remove DNS peering on the given resource. Args: request (:class:`google.cloud.datafusion_v1beta1.types.RemoveDnsPeeringRequest`): The request object. Request message to remove dns peering. parent (:class:`str`): The resource on which DNS peering will be removed. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.datafusion_v1beta1.types.RemoveDnsPeeringResponse: Response message for set dns peering method. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError("If the `request` argument is set, then none of " "the individual field arguments should be set.") request = v1beta1.RemoveDnsPeeringRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.remove_dns_peering, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ("parent", request.parent), )), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response async def list_dns_peerings(self, request: v1beta1.ListDnsPeeringsRequest = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListDnsPeeringsAsyncPager: r"""List DNS peering for a given resource. Args: request (:class:`google.cloud.datafusion_v1beta1.types.ListDnsPeeringsRequest`): The request object. List dns peering request. parent (:class:`str`): Required. The resource on which dns peering will be listed. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.datafusion_v1beta1.services.data_fusion.pagers.ListDnsPeeringsAsyncPager: List dns peering response. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError("If the `request` argument is set, then none of " "the individual field arguments should be set.") request = v1beta1.ListDnsPeeringsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_dns_peerings, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ("parent", request.parent), )), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.ListDnsPeeringsAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response async def __aenter__(self): return self async def __aexit__(self, exc_type, exc, tb): await self.transport.close() try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution( "google-cloud-datafusion", ).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ( "DataFusionAsyncClient", )
py
b41799e881a66a8ef586c38e9d0efec9589ae733
import pytest @pytest.fixture def faq_category_valid_args(): return {'name': 'General'}
py
b4179c1f04a340bdf6c9071333fc3e1eff3a20c9
# python3 # Copyright 2018 DeepMind Technologies Limited. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A MCTS actor.""" from typing import Tuple import acme from acme import adders from acme import specs from acme.agents.mcts import models from acme.agents.mcts import search from acme.agents.mcts import types from acme.utils import tf2_variable_utils import dm_env import numpy as np from scipy import special import sonnet as snt import tensorflow as tf class MCTSActor(acme.Actor): """Executes a policy- and value-network guided MCTS search.""" _prev_timestep: dm_env.TimeStep def __init__( self, environment_spec: specs.EnvironmentSpec, model: models.Model, network: snt.Module, discount: float, num_simulations: int, adder: adders.Adder = None, variable_client: tf2_variable_utils.VariableClient = None, ): # Internalize components: model, network, data sink and variable source. self._model = model self._network = tf.function(network) self._variable_client = variable_client self._adder = adder # Internalize hyperparameters. self._num_actions = environment_spec.actions.num_values self._num_simulations = num_simulations self._actions = list(range(self._num_actions)) self._discount = discount # We need to save the policy so as to add it to replay on the next step. self._probs = np.ones( shape=(self._num_actions,), dtype=np.float32) / self._num_actions def _forward( self, observation: types.Observation) -> Tuple[types.Probs, types.Value]: """Performs a forward pass of the policy-value network.""" logits, value = self._network(tf.expand_dims(observation, axis=0)) # Convert to numpy & take softmax. logits = logits.numpy().squeeze(axis=0) value = value.numpy().item() probs = special.softmax(logits) return probs, value def select_action(self, observation: types.Observation) -> types.Action: """Computes the agent's policy via MCTS.""" if self._model.needs_reset: self._model.reset(observation) # Compute a fresh MCTS plan. root = search.mcts( observation, model=self._model, search_policy=search.puct, evaluation=self._forward, num_simulations=self._num_simulations, num_actions=self._num_actions, discount=self._discount, ) # The agent's policy is softmax w.r.t. the *visit counts* as in AlphaZero. probs = search.visit_count_policy(root) action = np.int32(np.random.choice(self._actions, p=probs)) # Save the policy probs so that we can add them to replay in `observe()`. self._probs = probs.astype(np.float32) return action def update(self): """Fetches the latest variables from the variable source, if needed.""" if self._variable_client: self._variable_client.update() def observe_first(self, timestep: dm_env.TimeStep): self._prev_timestep = timestep if self._adder: self._adder.add_first(timestep) def observe(self, action: types.Action, next_timestep: dm_env.TimeStep): """Updates the agent's internal model and adds the transition to replay.""" self._model.update(self._prev_timestep, action, next_timestep) self._prev_timestep = next_timestep if self._adder: self._adder.add(action, next_timestep, extras={'pi': self._probs})
py
b4179c43f8d80c2784de970ef59d30aaddc53ff0
from .fixtures.aws import aws_session, aws_s3_bucket from .fixtures.test_data import( parquet_file, parquet_file_s3_1, parquet_file_s3_2, )
py
b4179cc4d68f54785688ac4874dcf5eeb9c7f18c
#coding: utf-8 import socket, sys, argparse, data_saver from _thread import * max_conn = 5 buffer_size = 4096 def main(listening_port): #levantamos el server proxy try: print("[*] Inicializando Sockets ...") s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #libreria apertura socket print("[*] Socket bindeando ...") s.bind(('', listening_port)) #bindeo socket, puerto print("[*] Escuchando al Socket [ %d ]" % listening_port) s.listen(max_conn) #empiezo a escuchar socket print("[*] Socket inicializado!") except Exception as e: print("[*] Error al inicializar el Socket") #en caso de error, todo a las pailas sys.exit(2) while 1: #me quedo escuchando try: conn, addr = s.accept() #acepto la conexion del cliente start_new_thread(conn_string, (conn, addr)) #creo un thread para manejar la conexión except KeyboardInterrupt: #cierro proxy con ctrl+c, pero que se vea lindo s.close() print("\n[*] Cerrando Server Proxy ...") sys.exit(1) s.close() def conn_string(conn, addr): try: request = conn.recv(buffer_size) #recibo la info #print "[*] request:", request data_saver.save_data(request.decode('utf-8')) conn.close() print("[*] Datos insertados :)!") except Exception as e: #fallo al pasar la info, no hacemos nada, seguimos escuchando print(e) conn.close() if __name__ == "__main__": parser = argparse.ArgumentParser(description='Escucha por información en puerto recibido') parser.add_argument('port', metavar='PORT', type=int, help='Puerto para escuchar (default=8001)', nargs='?', default=8001) args = parser.parse_args() main(args.port)
py
b4179d16a4504b7fe3cf1c8e58a694d3deaa03dd
minimal_monochrome = { "LV_COLOR_DEPTH":1, "LV_MEM_SIZE":64 * 1024, "LV_DPI_DEF":40, "LV_DRAW_COMPLEX":0, "LV_USE_LOG":1, "LV_USE_ASSERT_NULL":0, "LV_USE_ASSERT_MALLOC":0, "LV_USE_ASSERT_MEM_INTEGRITY":0, "LV_USE_ASSERT_OBJ":0, "LV_USE_ASSERT_STYLE":0, "LV_USE_USER_DATA": 0, "LV_FONT_UNSCII_8":1, "LV_USE_BIDI": 0, "LV_USE_ARABIC_PERSIAN_CHARS":0, "LV_BUILD_EXAMPLES":1, "LV_FONT_DEFAULT":"\\\"&lv_font_montserrat_14\\\"", } minimal_16bit = { "LV_COLOR_DEPTH":16, "LV_MEM_CUSTOM":1, "LV_DPI_DEF":40, "LV_DRAW_COMPLEX":0, "LV_USE_LOG":1, "LV_USE_ASSERT_NULL":0, "LV_USE_ASSERT_MALLOC":0, "LV_USE_ASSERT_MEM_INTEGRITY":0, "LV_USE_ASSERT_OBJ":0, "LV_USE_ASSERT_STYLE":0, "LV_USE_USER_DATA": 0, "LV_FONT_UNSCII_8":1, "LV_USE_BIDI": 0, "LV_USE_ARABIC_PERSIAN_CHARS":0, "LV_BUILD_EXAMPLES":1, "LV_FONT_DEFAULT":"\\\"&lv_font_montserrat_14\\\"", } normal_16bit_swap = { "LV_COLOR_DEPTH":16, "LV_COLOR_16_SWAP":1, "LV_MEM_SIZE":64 * 1024, "LV_DPI_DEF":40, "LV_DRAW_COMPLEX":1, "LV_USE_LOG":1, "LV_USE_ASSERT_NULL":0, "LV_USE_ASSERT_MALLOC":0, "LV_USE_ASSERT_MEM_INTEGRITY":0, "LV_USE_ASSERT_OBJ":0, "LV_USE_ASSERT_STYLE":0, "LV_USE_USER_DATA": 0, "LV_FONT_UNSCII_8":1, "LV_USE_FONT_SUBPX": 1, "LV_USE_BIDI": 0, "LV_USE_ARABIC_PERSIAN_CHARS":0, "LV_BUILD_EXAMPLES":1, "LV_FONT_DEFAULT":"\\\"&lv_font_montserrat_14\\\"", } full_32bit = { "LV_COLOR_DEPTH":32, "LV_MEM_SIZE":8 * 1024 * 1024, "LV_DPI_DEF":160, "LV_DRAW_COMPLEX":1, "LV_SHADOW_CACHE_SIZE":1, "LV_IMG_CACHE_DEF_SIZE":32, "LV_USE_LOG":1, "LV_USE_LOG_LEVEL":"LV_LOG_LEVEL_TRACE", "LV_LOG_PRINTF":1, "LV_USE_FONT_SUBPX": 1, "LV_FONT_SUBPX_BGR":1, "LV_USE_PERF_MONITOR":1, "LV_USE_ASSERT_NULL":1, "LV_USE_ASSERT_MALLOC":1, "LV_USE_ASSERT_MEM_INTEGRITY":1, "LV_USE_ASSERT_OBJ":1, "LV_USE_ASSERT_STYLE":1, "LV_USE_USER_DATA": 1, "LV_USE_LARGE_COORD": 1, "LV_FONT_MONTSERRAT_8":1, "LV_FONT_MONTSERRAT_10":1, "LV_FONT_MONTSERRAT_12":1, "LV_FONT_MONTSERRAT_14":1, "LV_FONT_MONTSERRAT_16":1, "LV_FONT_MONTSERRAT_18":1, "LV_FONT_MONTSERRAT_20":1, "LV_FONT_MONTSERRAT_22":1, "LV_FONT_MONTSERRAT_24":1, "LV_FONT_MONTSERRAT_26":1, "LV_FONT_MONTSERRAT_28":1, "LV_FONT_MONTSERRAT_30":1, "LV_FONT_MONTSERRAT_32":1, "LV_FONT_MONTSERRAT_34":1, "LV_FONT_MONTSERRAT_36":1, "LV_FONT_MONTSERRAT_38":1, "LV_FONT_MONTSERRAT_40":1, "LV_FONT_MONTSERRAT_42":1, "LV_FONT_MONTSERRAT_44":1, "LV_FONT_MONTSERRAT_46":1, "LV_FONT_MONTSERRAT_48":1, "LV_FONT_MONTSERRAT_12_SUBPX":1, "LV_FONT_MONTSERRAT_28_COMPRESSED":1, "LV_FONT_DEJAVU_16_PERSIAN_HEBREW":1, "LV_FONT_SIMSUN_16_CJK":1, "LV_FONT_UNSCII_8":1, "LV_FONT_UNSCII_16":1, "LV_FONT_FMT_TXT_LARGE":1, "LV_USE_FONT_COMPRESSED":1, "LV_USE_BIDI": 1, "LV_USE_ARABIC_PERSIAN_CHARS":1, "LV_USE_PERF_MONITOR":1, "LV_USE_MEM_MONITOR":1, "LV_LABEL_TEXT_SELECTION":1, "LV_BUILD_EXAMPLES":1, "LV_FONT_DEFAULT":"\\\"&lv_font_montserrat_24\\\"", } test = { "LV_COLOR_DEPTH":32, "LV_MEM_SIZE":2 * 1024 * 1024, "LV_SHADOW_CACHE_SIZE":10*1024, "LV_IMG_CACHE_DEF_SIZE":32, "LV_USE_LOG":1, "LV_LOG_PRINTF":1, "LV_USE_FONT_SUBPX": 1, "LV_FONT_SUBPX_BGR":1, "LV_USE_ASSERT_NULL":0, "LV_USE_ASSERT_MALLOC":0, "LV_USE_ASSERT_MEM_INTEGRITY":0, "LV_USE_ASSERT_OBJ":0, "LV_USE_ASSERT_STYLE":0, "LV_USE_USER_DATA": 1, "LV_USE_LARGE_COORD": 1, "LV_FONT_MONTSERRAT_14":1, "LV_FONT_MONTSERRAT_16":1, "LV_FONT_MONTSERRAT_18":1, "LV_FONT_MONTSERRAT_24":1, "LV_FONT_MONTSERRAT_48":1, "LV_FONT_MONTSERRAT_12_SUBPX":1, "LV_FONT_MONTSERRAT_28_COMPRESSED":1, "LV_FONT_DEJAVU_16_PERSIAN_HEBREW":1, "LV_FONT_SIMSUN_16_CJK":1, "LV_FONT_UNSCII_8":1, "LV_FONT_UNSCII_16":1, "LV_FONT_FMT_TXT_LARGE":1, "LV_USE_FONT_COMPRESSED":1, "LV_USE_BIDI": 1, "LV_USE_ARABIC_PERSIAN_CHARS":1, "LV_LABEL_TEXT_SELECTION":1, "LV_BUILD_EXAMPLES":1, "LV_FONT_DEFAULT":"\\\"&lv_font_montserrat_14\\\"", }
py
b4179d50281509e281624de5c04b3627b6cfad5e
from datetime import datetime from django.contrib import messages from django.core.exceptions import ValidationError from django.http.response import ( HttpResponseBadRequest, HttpResponseForbidden, JsonResponse, ) from django.shortcuts import redirect from django.utils.decorators import method_decorator from django.utils.functional import cached_property from django.utils.translation import ( ugettext as _, ugettext_lazy, ) from django.views.decorators.http import require_POST from corehq import toggles from corehq.apps.app_manager.dbaccessors import ( get_brief_apps_in_domain, get_build_doc_by_version, ) from corehq.apps.app_manager.decorators import require_can_edit_apps from corehq.apps.app_manager.models import ( AppReleaseByLocation, LatestEnabledBuildProfiles, ) from corehq.apps.domain.forms import ( ManageReleasesByAppProfileForm, ManageReleasesByLocationForm, ) from corehq.apps.domain.views import BaseProjectSettingsView from corehq.apps.locations.models import SQLLocation from corehq.apps.users.permissions import can_manage_releases @method_decorator([toggles.MANAGE_RELEASES_PER_LOCATION.required_decorator(), require_can_edit_apps], name='dispatch') class ManageReleasesByLocation(BaseProjectSettingsView): template_name = 'domain/manage_releases_by_location.html' urlname = 'manage_releases_by_location' page_title = ugettext_lazy("Manage Releases By Location") @cached_property def form(self): return ManageReleasesByLocationForm( self.request, self.domain, data=self.request.POST if self.request.method == "POST" else None, ) @staticmethod def _location_path_display(location_id): return SQLLocation.active_objects.get(location_id=location_id).get_path_display() @property def page_context(self): app_names = {app.id: app.name for app in get_brief_apps_in_domain(self.domain, include_remote=True)} q = AppReleaseByLocation.objects.filter(domain=self.domain) location_id_slug = self.request.GET.get('location_id') location_id = None if location_id_slug: location_id = self.form.extract_location_id(location_id_slug) if location_id: q = q.filter(location_id=location_id) if self.request.GET.get('app_id'): q = q.filter(app_id=self.request.GET.get('app_id')) version = self.request.GET.get('version') if version: q = q.filter(version=version) status = self.request.GET.get('status') if status: if status == 'active': q = q.filter(active=True) elif status == 'inactive': q = q.filter(active=False) app_releases_by_location = [release.to_json() for release in q.order_by('-version')] for r in app_releases_by_location: r['app'] = app_names.get(r['app'], r['app']) return { 'manage_releases_by_location_form': self.form, 'app_releases_by_location': app_releases_by_location, 'selected_build_details': ({'id': version, 'text': version} if version else None), 'selected_location_details': ({'id': location_id_slug, 'text': self._location_path_display(location_id)} if location_id else None), } def post(self, request, *args, **kwargs): if self.form.is_valid(): success, error_message = self.form.save() if success: return redirect(self.urlname, self.domain) else: messages.error(request, error_message) return self.get(request, *args, **kwargs) else: return self.get(request, *args, **kwargs) @method_decorator([toggles.RELEASE_BUILDS_PER_PROFILE.required_decorator(), require_can_edit_apps], name='dispatch') class ManageReleasesByAppProfile(BaseProjectSettingsView): template_name = 'domain/manage_releases_by_app_profile.html' urlname = 'manage_releases_by_app_profile' page_title = ugettext_lazy("Manage Releases By App Profile") @cached_property def form(self): return ManageReleasesByAppProfileForm( self.request, self.domain, data=self.request.POST if self.request.method == "POST" else None, ) @staticmethod def _get_initial_app_profile_details(domain, version, app_id, build_profile_id): # only need to set when performing search to populate with initial values in view if app_id and version: build_doc = get_build_doc_by_version(domain, app_id, version) if build_doc: return [{ 'id': _id, 'text': details['name'], 'selected': build_profile_id == _id } for _id, details in build_doc['build_profiles'].items()] @property def page_context(self): app_names = {app.id: app.name for app in get_brief_apps_in_domain(self.domain, include_remote=True)} query = LatestEnabledBuildProfiles.objects app_id = self.request.GET.get('app_id') if app_id: query = query.filter(app_id=app_id) else: query = query.filter(app_id__in=app_names.keys()) version = self.request.GET.get('version') if version: query = query.filter(version=version) build_profile_id = self.request.GET.get('build_profile_id') if build_profile_id: query = query.filter(build_profile_id=build_profile_id) status = self.request.GET.get('status') if status: if status == 'active': query = query.filter(active=True) elif status == 'inactive': query = query.filter(active=False) app_releases_by_app_profile = [release.to_json(app_names) for release in query.order_by('-version')] return { 'manage_releases_by_app_profile_form': self.form, 'app_releases_by_app_profile': app_releases_by_app_profile, 'selected_build_details': ({'id': version, 'text': version} if version else None), 'initial_app_profile_details': self._get_initial_app_profile_details(self.domain, version, app_id, build_profile_id), } def post(self, request, *args, **kwargs): if self.form.is_valid(): success, error_message = self.form.save() if success: return redirect(self.urlname, self.domain) else: messages.error(request, error_message) return self.get(request, *args, **kwargs) else: return self.get(request, *args, **kwargs) @require_can_edit_apps @require_POST def deactivate_release_restriction(request, domain, restriction_id): return _update_release_restriction(request, domain, restriction_id, active=False) @require_can_edit_apps @require_POST def activate_release_restriction(request, domain, restriction_id): return _update_release_restriction(request, domain, restriction_id, active=True) def _update_release_restriction(request, domain, restriction_id, active): if not toggles.MANAGE_RELEASES_PER_LOCATION.enabled_for_request(request): return HttpResponseForbidden() release = AppReleaseByLocation.objects.get(id=restriction_id, domain=domain) try: release.activate() if active else release.deactivate() except ValidationError as e: response_content = { 'message': ','.join(e.messages) } else: response_content = { 'id': restriction_id, 'success': True, 'activated_on': (datetime.strftime(release.activated_on, '%Y-%m-%d %H:%M:%S') if release.activated_on else None), 'deactivated_on': (datetime.strftime(release.deactivated_on, '%Y-%m-%d %H:%M:%S') if release.deactivated_on else None), } return JsonResponse(data=response_content) @require_can_edit_apps @require_POST def toggle_release_restriction_by_app_profile(request, domain, restriction_id): if not toggles.RELEASE_BUILDS_PER_PROFILE.enabled_for_request(request): return HttpResponseForbidden() release = LatestEnabledBuildProfiles.objects.get(id=restriction_id) if not release: return HttpResponseBadRequest() if not can_manage_releases(request.couch_user, domain, release.app_id): return JsonResponse(data={ 'message': _("You don't have permission to set restriction for this application")}) if request.POST.get('active') == 'false': return _update_release_restriction_by_app_profile(release, restriction_id, active=False) elif request.POST.get('active') == 'true': return _update_release_restriction_by_app_profile(release, restriction_id, active=True) def _update_release_restriction_by_app_profile(release, restriction_id, active): try: release.activate() if active else release.deactivate() except ValidationError as e: response_content = { 'message': ','.join(e.messages) } else: response_content = { 'id': restriction_id, 'success': True, } return JsonResponse(data=response_content)
py
b4179e48e94d2df8b9be7d67d89d129fb24f7f10
df.loc[df['Sex'] == 'male', 'Age'].mean()
py
b4179e57de55c95f0aba2dbad254b2603f1024ac
# # Test base current collector submodel # import pybamm import tests import unittest class TestBaseModel(unittest.TestCase): def test_public_functions(self): param = pybamm.LithiumIonParameters() variables = { "Positive current collector potential": pybamm.PrimaryBroadcast( 0, "current collector" ), "Total current density": 0, } submodel = pybamm.current_collector.PotentialPair1plus1D(param) std_tests = tests.StandardSubModelTests(submodel, variables) std_tests.test_all() submodel = pybamm.current_collector.PotentialPair2plus1D(param) std_tests = tests.StandardSubModelTests(submodel, variables) std_tests.test_all() if __name__ == "__main__": print("Add -v for more debug output") import sys if "-v" in sys.argv: debug = True pybamm.settings.debug_mode = True unittest.main()
py
b4179f6b360eb561298ef2616cef3c903de66b2a
"""Development tasks.""" import os import re from itertools import chain from pathlib import Path from shutil import which from typing import List, Optional, Pattern import httpx import toml from duty import duty from git_changelog.build import Changelog, Version from jinja2 import StrictUndefined from jinja2.sandbox import SandboxedEnvironment from pip._internal.commands.show import search_packages_info # noqa: WPS436 (no other way?) PY_SRC_PATHS = (Path(_) for _ in ("src", "tests", "duties.py")) PY_SRC_LIST = tuple(str(_) for _ in PY_SRC_PATHS) PY_SRC = " ".join(PY_SRC_LIST) TESTING = os.environ.get("TESTING", "0") in {"1", "true"} CI = os.environ.get("CI", "0") in {"1", "true"} WINDOWS = os.name == "nt" PTY = not WINDOWS def latest(lines: List[str], regex: Pattern) -> Optional[str]: """ Return the last released version. Arguments: lines: Lines of the changelog file. regex: A compiled regex to find version numbers. Returns: The last version. """ for line in lines: match = regex.search(line) if match: return match.groupdict()["version"] return None def unreleased(versions: List[Version], last_release: str) -> List[Version]: """ Return the most recent versions down to latest release. Arguments: versions: All the versions (released and unreleased). last_release: The latest release. Returns: A list of versions. """ for index, version in enumerate(versions): if version.tag == last_release: return versions[:index] return versions def read_changelog(filepath: str) -> List[str]: """ Read the changelog file. Arguments: filepath: The path to the changelog file. Returns: The changelog lines. """ with open(filepath, "r") as changelog_file: return changelog_file.read().splitlines() def write_changelog(filepath: str, lines: List[str]) -> None: """ Write the changelog file. Arguments: filepath: The path to the changelog file. lines: The lines to write to the file. """ with open(filepath, "w") as changelog_file: changelog_file.write("\n".join(lines).rstrip("\n") + "\n") def update_changelog( inplace_file: str, marker: str, version_regex: str, template_url: str, commit_style: str, ) -> None: """ Update the given changelog file in place. Arguments: inplace_file: The file to update in-place. marker: The line after which to insert new contents. version_regex: A regular expression to find currently documented versions in the file. template_url: The URL to the Jinja template used to render contents. commit_style: The style of commit messages to parse. """ env = SandboxedEnvironment(autoescape=True) template = env.from_string(httpx.get(template_url).text) changelog = Changelog(".", style=commit_style) # noqa: W0621 (shadowing changelog) if len(changelog.versions_list) == 1: last_version = changelog.versions_list[0] if last_version.planned_tag is None: planned_tag = "0.1.0" last_version.tag = planned_tag last_version.url += planned_tag last_version.compare_url = last_version.compare_url.replace("HEAD", planned_tag) lines = read_changelog(inplace_file) last_released = latest(lines, re.compile(version_regex)) if last_released: changelog.versions_list = unreleased(changelog.versions_list, last_released) rendered = template.render(changelog=changelog, inplace=True) lines[lines.index(marker)] = rendered write_changelog(inplace_file, lines) @duty def changelog(ctx): """ Update the changelog in-place with latest commits. Arguments: ctx: The context instance (passed automatically). """ ctx.run( update_changelog, kwargs={ "inplace_file": "CHANGELOG.md", "marker": "<!-- insertion marker -->", "version_regex": r"^## \[v?(?P<version>[^\]]+)", "template_url": "https://raw.githubusercontent.com/pawamoy/jinja-templates/master/keepachangelog.md", "commit_style": "angular", }, title="Updating changelog", pty=PTY, ) @duty(pre=["check_code_quality", "check_types", "check_docs", "check_dependencies"]) def check(ctx): # noqa: W0613 (no use for the context argument) """ Check it all! Arguments: ctx: The context instance (passed automatically). """ # noqa: D400 (exclamation mark is funnier) @duty def check_code_quality(ctx, files=PY_SRC): """ Check the code quality. Arguments: ctx: The context instance (passed automatically). files: The files to check. """ ctx.run(f"flakehell lint {files}", title="Checking code quality", pty=PTY) @duty def check_dependencies(ctx): """ Check for vulnerabilities in dependencies. Arguments: ctx: The context instance (passed automatically). """ nofail = False safety = which("safety") if not safety: pipx = which("pipx") if pipx: safety = f"{pipx} run safety" else: safety = "safety" nofail = True ctx.run( f"poetry export -f requirements.txt --without-hashes | {safety} check --stdin --full-report", title="Checking dependencies", pty=PTY, nofail=nofail, ) @duty def check_docs(ctx): """ Check if the documentation builds correctly. Arguments: ctx: The context instance (passed automatically). """ ctx.run("mkdocs build -s", title="Building documentation", pty=PTY) @duty def check_types(ctx): """ Check that the code is correctly typed. Arguments: ctx: The context instance (passed automatically). """ ctx.run(f"mypy --config-file config/mypy.ini {PY_SRC}", title="Type-checking", pty=PTY) @duty(silent=True) def clean(ctx): """ Delete temporary files. Arguments: ctx: The context instance (passed automatically). """ ctx.run("rm -rf .coverage*") ctx.run("rm -rf .mypy_cache") ctx.run("rm -rf .pytest_cache") ctx.run("rm -rf build") ctx.run("rm -rf dist") ctx.run("rm -rf pip-wheel-metadata") ctx.run("rm -rf site") ctx.run("find . -type d -name __pycache__ | xargs rm -rf") ctx.run("find . -name '*.rej' -delete") def get_credits_data() -> dict: """ Return data used to generate the credits file. Returns: Data required to render the credits template. """ project_dir = Path(__file__).parent.parent metadata = toml.load(project_dir / "pyproject.toml")["tool"]["poetry"] lock_data = toml.load(project_dir / "poetry.lock") project_name = metadata["name"] poetry_dependencies = chain(metadata["dependencies"].keys(), metadata["dev-dependencies"].keys()) direct_dependencies = {dep.lower() for dep in poetry_dependencies} direct_dependencies.remove("python") indirect_dependencies = {pkg["name"].lower() for pkg in lock_data["package"]} indirect_dependencies -= direct_dependencies dependencies = direct_dependencies | indirect_dependencies packages = {} for pkg in search_packages_info(dependencies): pkg = {_: pkg[_] for _ in ("name", "home-page")} packages[pkg["name"].lower()] = pkg for dependency in dependencies: if dependency not in packages: pkg_data = httpx.get(f"https://pypi.python.org/pypi/{dependency}/json").json()["info"] home_page = pkg_data["home_page"] or pkg_data["project_url"] or pkg_data["package_url"] pkg_name = pkg_data["name"] package = {"name": pkg_name, "home-page": home_page} packages.update({pkg_name.lower(): package}) return { "project_name": project_name, "direct_dependencies": sorted(direct_dependencies), "indirect_dependencies": sorted(indirect_dependencies), "package_info": packages, } @duty def docs_regen(ctx): """ Regenerate some documentation pages. Arguments: ctx: The context instance (passed automatically). """ url_prefix = "https://raw.githubusercontent.com/pawamoy/jinja-templates/master/" regen_list = (("CREDITS.md", get_credits_data, url_prefix + "credits.md"),) def regen() -> int: # noqa: WPS430 (nested function) """ Regenerate pages listed in global `REGEN` list. Returns: An exit code. """ env = SandboxedEnvironment(undefined=StrictUndefined) for target, get_data, template in regen_list: print("Regenerating", target) # noqa: WPS421 (print) template_data = get_data() template_text = httpx.get(template).text rendered = env.from_string(template_text).render(**template_data) with open(target, "w") as stream: stream.write(rendered) return 0 ctx.run(regen, title="Regenerating docfiles", pty=PTY) @duty(pre=[docs_regen]) def docs(ctx): """ Build the documentation locally. Arguments: ctx: The context instance (passed automatically). """ ctx.run("mkdocs build", title="Building documentation") @duty(pre=[docs_regen]) def docs_serve(ctx, host="127.0.0.1", port=8000): """ Serve the documentation (localhost:8000). Arguments: ctx: The context instance (passed automatically). host: The host to serve the docs from. port: The port to serve the docs on. """ ctx.run(f"mkdocs serve -a {host}:{port}", title="Serving documentation", capture=False) @duty(pre=[docs_regen]) def docs_deploy(ctx): """ Deploy the documentation on GitHub pages. Arguments: ctx: The context instance (passed automatically). """ ctx.run("mkdocs gh-deploy", title="Deploying documentation") @duty def format(ctx): # noqa: W0622 (we don't mind shadowing the format builtin) """ Run formatting tools on the code. Arguments: ctx: The context instance (passed automatically). """ ctx.run( f"autoflake -ir --exclude tests/fixtures --remove-all-unused-imports {PY_SRC}", title="Removing unused imports", pty=PTY, ) ctx.run(f"isort -y -rc {PY_SRC}", title="Ordering imports", pty=PTY) ctx.run(f"black {PY_SRC}", title="Formatting code", pty=PTY) @duty def release(ctx, version): """ Release a new Python package. Arguments: ctx: The context instance (passed automatically). version: The new version number to use. """ ctx.run(f"poetry version {version}", title=f"Bumping version in pyproject.toml to {version}", pty=PTY) ctx.run("git add pyproject.toml CHANGELOG.md", title="Staging files", pty=PTY) ctx.run(["git", "commit", "-m", f"chore: Prepare release {version}"], title="Committing changes", pty=PTY) ctx.run(f"git tag {version}", title="Tagging commit", pty=PTY) if not TESTING: ctx.run("git push", title="Pushing commits", pty=False) ctx.run("git push --tags", title="Pushing tags", pty=False) ctx.run("poetry build", title="Building dist/wheel", pty=PTY) ctx.run("poetry publish", title="Publishing version", pty=PTY) ctx.run("mkdocs gh-deploy", title="Deploying documentation", pty=PTY) @duty(silent=True) def coverage(ctx): """ Report coverage as text and HTML. Arguments: ctx: The context instance (passed automatically). """ ctx.run("coverage report --rcfile=config/coverage.ini", capture=False) ctx.run("coverage html --rcfile=config/coverage.ini") @duty(pre=[duty(lambda ctx: ctx.run("rm -f .coverage", silent=True))]) def test(ctx, match=""): """ Run the test suite. Arguments: ctx: The context instance (passed automatically). match: A pytest expression to filter selected tests. """ ctx.run( ["pytest", "-c", "config/pytest.ini", "-n", "auto", "-k", match, "tests"], title="Running tests", pty=PTY, )
py
b417a0a4326bd01af060818bff22abf9efcfa135
import os import sys import tempfile from plotly.io import to_html import plotly.graph_objs as go from PyQt5 import QtCore, QtGui, QtWidgets, sip from PyQt5.QtCore import Qt import logging logger = logging.getLogger(__name__) # https://stackoverflow.com/a/64743807/3620725 os.environ["QTWEBENGINE_CHROMIUM_FLAGS"] = "--enable-logging --log-level=3" # Since pandasgui might be imported after other packages already created a QApplication, # we need to hack around this import restriction on QtWebEngineWidgets # https://stackoverflow.com/a/57436077/3620725 try: from PyQt5 import QtWebEngineWidgets except ImportError as e: if e.msg == "QtWebEngineWidgets must be imported before a QCoreApplication instance is created": logger.info("Reinitialized existing QApplication instance to allow import of QtWebEngineWidgets.") app = QtWidgets.QApplication.instance() app.quit() sip.delete(app) from PyQt5 import QtWebEngineWidgets app.__init__(sys.argv) else: raise e class PlotlyViewer(QtWebEngineWidgets.QWebEngineView): def __init__(self, fig=None, store=None): super().__init__() self.store = store self.page().profile().downloadRequested.connect(self.on_downloadRequested) # Fix scrollbar sometimes disappearing after Plotly autosizes and leaving behind white space self.settings().setAttribute(self.settings().ShowScrollBars, False) # https://stackoverflow.com/a/8577226/3620725 self.temp_file = tempfile.NamedTemporaryFile(mode="w", suffix=".html", delete=False) self.set_figure(fig) self.resize(700, 600) self.setWindowTitle("Plotly Viewer") def set_figure(self, fig=None): self.temp_file.seek(0) if fig is None: fig = go.Figure() dark = self.store is not None and self.store.settings.theme.value == "dark" if dark: fig.update_layout(template="plotly_dark", autosize=True) html = to_html(fig, config={"responsive": True}) html += "\n<style>body{margin: 0;}" \ "\n.plot-container,.main-svg,.svg-container{width:100% !important; height:100% !important;}</style>" self.temp_file.write(html) self.temp_file.truncate() self.temp_file.seek(0) self.load(QtCore.QUrl.fromLocalFile(self.temp_file.name)) def closeEvent(self, event: QtGui.QCloseEvent) -> None: self.temp_file.close() os.unlink(self.temp_file.name) super().closeEvent(event) def sizeHint(self) -> QtCore.QSize: return QtCore.QSize(400, 400) # https://stackoverflow.com/questions/55963931/how-to-download-csv-file-with-qwebengineview-and-qurl def on_downloadRequested(self, download): dialog = QtWidgets.QFileDialog() dialog.setDefaultSuffix(".png") path, _ = dialog.getSaveFileName(self, "Save File", os.path.join(os.getcwd(), "newplot.png"), "*.png") if path: download.setPath(path) download.accept() if __name__ == "__main__": # Create a QtWidgets.QApplication instance or use the existing one if it exists app = QtWidgets.QApplication(sys.argv) import numpy as np import plotly.graph_objs as go from pandasgui.utility import fix_ipython, fix_pyqt fix_ipython() fix_pyqt() fig = go.Figure() fig.add_scatter( x=np.random.rand(100), y=np.random.rand(100), mode="markers", marker={ "size": 30, "color": np.random.rand(100), "opacity": 0.6, "colorscale": "Viridis", }, ) pv = PlotlyViewer(fig) pv.show() app.exec_()
py
b417a129f1ffede8fb6a67d1fefe4069d18ed0c7
from django.apps import AppConfig class GasConfig(AppConfig): name = 'gas'
py
b417a15c0b758c2a4947a9cb3097ca9752ed4ecd
VERSION = (0, 12, 0) __version__ = '.'.join(map(str, VERSION)) default_app_config = 'form_designer.apps.FormDesignerConfig' # Do not use Django settings at module level as recommended try: from django.utils.functional import LazyObject except ImportError: pass else: class LazySettings(LazyObject): def _setup(self): from form_designer import default_settings self._wrapped = Settings(default_settings) class Settings(object): def __init__(self, settings_module): for setting in dir(settings_module): if setting == setting.upper(): setattr(self, setting, getattr(settings_module, setting)) settings = LazySettings()
py
b417a173ee2a3faa9f197256ffc3131144fefbf3
"""shufflenetv2 in pytorch [1] Ningning Ma, Xiangyu Zhang, Hai-Tao Zheng, Jian Sun ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design https://arxiv.org/abs/1807.11164 """ import torch import torch.nn as nn import torch.nn.functional as F def channel_split(x, split): """split a tensor into two pieces along channel dimension Args: x: input tensor split:(int) channel size for each pieces """ assert x.size(1) == split * 2 return torch.split(x, split, dim=1) def channel_shuffle(x, groups): """channel shuffle operation Args: x: input tensor groups: input branch number """ batch_size, channels, height, width = x.size() channels_per_group = int(channels / groups) x = x.view(batch_size, groups, channels_per_group, height, width) x = x.transpose(1, 2).contiguous() x = x.view(batch_size, -1, height, width) return x class ShuffleUnit(nn.Module): def __init__(self, in_channels, out_channels, stride): super().__init__() self.stride = stride self.in_channels = in_channels self.out_channels = out_channels if stride != 1 or in_channels != out_channels: self.residual = nn.Sequential( nn.Conv2d(in_channels, in_channels, 1), nn.BatchNorm2d(in_channels), nn.ReLU(inplace=True), nn.Conv2d(in_channels, in_channels, 3, stride=stride, padding=1, groups=in_channels), nn.BatchNorm2d(in_channels), nn.Conv2d(in_channels, int(out_channels / 2), 1), nn.BatchNorm2d(int(out_channels / 2)), nn.ReLU(inplace=True) ) self.shortcut = nn.Sequential( nn.Conv2d(in_channels, in_channels, 3, stride=stride, padding=1, groups=in_channels), nn.BatchNorm2d(in_channels), nn.Conv2d(in_channels, int(out_channels / 2), 1), nn.BatchNorm2d(int(out_channels / 2)), nn.ReLU(inplace=True) ) else: self.shortcut = nn.Sequential() in_channels = int(in_channels / 2) self.residual = nn.Sequential( nn.Conv2d(in_channels, in_channels, 1), nn.BatchNorm2d(in_channels), nn.ReLU(inplace=True), nn.Conv2d(in_channels, in_channels, 3, stride=stride, padding=1, groups=in_channels), nn.BatchNorm2d(in_channels), nn.Conv2d(in_channels, in_channels, 1), nn.BatchNorm2d(in_channels), nn.ReLU(inplace=True) ) def forward(self, x): if self.stride == 1 and self.out_channels == self.in_channels: shortcut, residual = channel_split(x, int(self.in_channels / 2)) else: shortcut = x residual = x shortcut = self.shortcut(shortcut) residual = self.residual(residual) x = torch.cat([shortcut, residual], dim=1) x = channel_shuffle(x, 2) return x class ShuffleNetV2(nn.Module): def __init__(self, ratio=1, class_num=100): super().__init__() if ratio == 0.5: out_channels = [48, 96, 192, 1024] elif ratio == 1: out_channels = [116, 232, 464, 1024] elif ratio == 1.5: out_channels = [176, 352, 704, 1024] elif ratio == 2: out_channels = [244, 488, 976, 2048] else: ValueError('unsupported ratio number') self.pre = nn.Sequential( nn.Conv2d(3, 24, 3, padding=1), nn.BatchNorm2d(24) ) self.stage2 = self._make_stage(24, out_channels[0], 3) self.stage3 = self._make_stage(out_channels[0], out_channels[1], 7) self.stage4 = self._make_stage(out_channels[1], out_channels[2], 3) self.conv5 = nn.Sequential( nn.Conv2d(out_channels[2], out_channels[3], 1), nn.BatchNorm2d(out_channels[3]), nn.ReLU(inplace=True) ) self.fc = nn.Linear(out_channels[3], class_num) def forward(self, x): x = self.pre(x) x = self.stage2(x) x = self.stage3(x) x = self.stage4(x) x = self.conv5(x) x = F.adaptive_avg_pool2d(x, 1) x = x.view(x.size(0), -1) x = self.fc(x) return x def _make_stage(self, in_channels, out_channels, repeat): layers = [] layers.append(ShuffleUnit(in_channels, out_channels, 2)) while repeat: layers.append(ShuffleUnit(out_channels, out_channels, 1)) repeat -= 1 return nn.Sequential(*layers) def shufflenetv2(**kwargs): return ShuffleNetV2(**kwargs)
py
b417a21721578914280f604a618c4eba164f6395
import numpy as np import matplotlib.pyplot as plt from hydroDL import utils n = 10000 # x = np.random.random(n) x1 = np.random.normal(loc=0, scale=1.0, size=5000) x2 = np.random.normal(loc=0, scale=2.0, size=500) x = np.concatenate([x1, x2]) fig, ax = plt.subplots(1, 1) ax.hist(x, bins=50) fig.show()
py
b417a47c1cae635fe22160a349f13a834e137658
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Xgamma(AutotoolsPackage): """xgamma allows X users to query and alter the gamma correction of a monitor via the X video mode extension (XFree86-VidModeExtension).""" homepage = "http://cgit.freedesktop.org/xorg/app/xgamma" url = "https://www.x.org/archive/individual/app/xgamma-1.0.6.tar.gz" version('1.0.6', 'ac4f91bf1d9aa0433152ba6196288cc6') depends_on('libx11') depends_on('libxxf86vm') depends_on('[email protected]:', type='build') depends_on('[email protected]:', type='build') depends_on('util-macros', type='build')
py
b417a4bde4c373c456d3342bbc2fa22293d33b86
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Aug 6 21:02:27 2018 @author: kazuki.onodera nohup python -u 913_predict_807-2.py 0 > LOG/log_913_predict_807-2_s0.py.txt & nohup python -u 913_predict_807-2.py 1 > LOG/log_913_predict_807-2_s1.py.txt & nohup python -u 913_predict_807-2.py 2 > LOG/log_913_predict_807-2_s2.py.txt & nohup python -u 913_predict_807-2.py 3 > LOG/log_913_predict_807-2_s3.py.txt & nohup python -u 913_predict_807-2.py 4 > LOG/log_913_predict_807-2_s4.py.txt & nohup python -u 913_predict_807-2.py 5 > LOG/log_913_predict_807-2_s5.py.txt & nohup python -u 913_predict_807-2.py 6 > LOG/log_913_predict_807-2_s6.py.txt & nohup python -u 913_predict_807-2.py 7 > LOG/log_913_predict_807-2_s7.py.txt & nohup python -u 913_predict_807-2.py 8 > LOG/log_913_predict_807-2_s8.py.txt & nohup python -u 913_predict_807-2.py 9 > LOG/log_913_predict_807-2_s9.py.txt & """ import numpy as np import pandas as pd from tqdm import tqdm import gc, os from collections import defaultdict import sys sys.path.append(f'/home/{os.environ.get("USER")}/PythonLibrary') import lgbextension as ex import lightgbm as lgb from multiprocessing import cpu_count from glob import glob import utils, utils_cat, utils_best utils.start(__file__) #============================================================================== print(sys.argv) SEED = int(sys.argv[1]) LOOP = 20 NROUND = 5040 SUBMIT_FILE_PATH = f'../output/807-2_{SEED}.csv.gz' COMMENT = f'CV805_LB803 (seed87 loop100)' EXE_SUBMIT = False param = { 'objective': 'binary', 'metric': 'auc', 'learning_rate': 0.01, 'max_depth': 6, 'num_leaves': 63, 'max_bin': 255, 'min_child_weight': 10, 'min_data_in_leaf': 150, 'reg_lambda': 0.5, # L2 regularization term on weights. 'reg_alpha': 0.5, # L1 regularization term on weights. 'colsample_bytree': 0.9, 'subsample': 0.9, # 'nthread': 32, 'nthread': cpu_count(), 'bagging_freq': 1, 'verbose':-1, # 'seed': SEED } np.random.seed(SEED) loader = utils_best.Loader('CV805_LB803') # ============================================================================= # load # ============================================================================= X_train = loader.train() y_train = utils.read_pickles('../data/label').TARGET if X_train.columns.duplicated().sum()>0: raise Exception(f'duplicated!: { X_train.columns[X_train.columns.duplicated()] }') print('no dup :) ') print(f'X_train.shape {X_train.shape}') gc.collect() CAT = list( set(X_train.columns) & set(loader.category()) ) COL = X_train.columns.tolist() X_test = loader.test()[COL] # ============================================================================= # training # ============================================================================= dtrain = lgb.Dataset(X_train, y_train, categorical_feature=CAT ) models = [] for i in range(LOOP): print(f'LOOP: {i}') gc.collect() param.update({'seed':np.random.randint(9999)}) model = lgb.train(param, dtrain, NROUND, categorical_feature=CAT) # model.save_model(f'lgb{i}.model') models.append(model) imp = ex.getImp(models) imp.to_csv(f'LOG/imp_{__file__}.csv', index=False) # ============================================================================= # predict # ============================================================================= sub = pd.read_pickle('../data/sub.p') gc.collect() label_name = 'TARGET' sub[label_name] = 0 for model in models: y_pred = model.predict(X_test) sub[label_name] += pd.Series(y_pred).rank() sub[label_name] /= LOOP sub[label_name] /= sub[label_name].max() sub['SK_ID_CURR'] = sub['SK_ID_CURR'].map(int) sub.to_csv(SUBMIT_FILE_PATH, index=False, compression='gzip') print(sub[label_name].describe()) # ============================================================================= # submission # ============================================================================= if EXE_SUBMIT: print('submit') utils.submit(SUBMIT_FILE_PATH, COMMENT) #============================================================================== utils.end(__file__) utils.stop_instance()
py
b417a7a05b8a6e4e82a60d001cb3591ebcc3a071
#!/usr/bin/env python3 # Copyright (c) 2014-2015 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from decimal import Decimal from test_framework.authproxy import JSONRPCException from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_greater_than, connect_nodes # Create one-input, one-output, no-fee transaction: class RawTransactionsTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 4 self.extra_args = [['-usehd=1']] * self.num_nodes def skip_test_if_missing_module(self): self.skip_if_no_wallet() def setup_network(self): super().setup_network() connect_nodes(self.nodes[0],1) connect_nodes(self.nodes[1],2) connect_nodes(self.nodes[0],2) connect_nodes(self.nodes[0],3) def run_test(self): self.log.info("Mining blocks...") min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee'] # This test is not meant to test fee estimation and we'd like # to be sure all txs are sent at a consistent desired feerate for node in self.nodes: node.settxfee(min_relay_tx_fee) # if the fee's positive delta is higher than this value tests will fail, # neg. delta always fail the tests. # The size of the signature of every input may be at most 2 bytes larger # than a minimum sized signature. # = 2 bytes * minRelayTxFeePerByte feeTolerance = 2 * min_relay_tx_fee/1000 self.nodes[2].generate(1) self.sync_all() self.nodes[0].generate(121) self.sync_all() watchonly_address = self.nodes[0].getnewaddress() watchonly_pubkey = self.nodes[0].getaddressinfo(watchonly_address)["pubkey"] watchonly_amount = Decimal(2000) self.nodes[3].importpubkey(watchonly_pubkey, "", True) watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount) self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50) self.sync_all() self.nodes[0].generate(1) self.sync_all() ############### # simple test # ############### inputs = [ ] outputs = { self.nodes[0].getnewaddress() : 10 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) assert len(dec_tx['vin']) > 0 #test if we have enough inputs ############################## # simple test with two coins # ############################## inputs = [ ] outputs = { self.nodes[0].getnewaddress() : 22 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) assert len(dec_tx['vin']) > 0 #test if we have enough inputs ############################## # simple test with two coins # ############################## inputs = [ ] outputs = { self.nodes[0].getnewaddress() : 26 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) assert len(dec_tx['vin']) > 0 assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '') ################################ # simple test with two outputs # ################################ inputs = [ ] outputs = { self.nodes[0].getnewaddress() : 26, self.nodes[1].getnewaddress() : 25 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 for out in dec_tx['vout']: totalOut += out['value'] assert len(dec_tx['vin']) > 0 assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '') ######################################################################### # test a fundrawtransaction with a VIN greater than the required amount # ######################################################################### utx = False listunspent = self.nodes[2].listunspent() for aUtx in listunspent: if aUtx['amount'] == 50: utx = aUtx break assert utx!=False inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}] outputs = { self.nodes[0].getnewaddress() : 10 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 for out in dec_tx['vout']: totalOut += out['value'] assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee ##################################################################### # test a fundrawtransaction with which will not get a change output # ##################################################################### utx = False listunspent = self.nodes[2].listunspent() for aUtx in listunspent: if aUtx['amount'] == 50: utx = aUtx break assert utx!=False inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}] outputs = { self.nodes[0].getnewaddress() : Decimal(50) - fee - feeTolerance } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 for out in dec_tx['vout']: totalOut += out['value'] assert_equal(rawtxfund['changepos'], -1) assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee ######################################################################### # test a fundrawtransaction with a VIN smaller than the required amount # ######################################################################### utx = False listunspent = self.nodes[2].listunspent() for aUtx in listunspent: if aUtx['amount'] == 10: utx = aUtx break assert utx!=False inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}] outputs = { self.nodes[0].getnewaddress() : 10 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) # 4-byte version + 1-byte vin count + 36-byte prevout then script_len rawtx = rawtx[:82] + "0100" + rawtx[84:] dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex']) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 matchingOuts = 0 for i, out in enumerate(dec_tx['vout']): totalOut += out['value'] if out['scriptPubKey']['addresses'][0] in outputs: matchingOuts+=1 else: assert_equal(i, rawtxfund['changepos']) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex']) assert_equal(matchingOuts, 1) assert_equal(len(dec_tx['vout']), 2) ########################################### # test a fundrawtransaction with two VINs # ########################################### utx = False utx2 = False listunspent = self.nodes[2].listunspent() for aUtx in listunspent: if aUtx['amount'] == 10: utx = aUtx if aUtx['amount'] == 50: utx2 = aUtx assert utx!=False inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ] outputs = { self.nodes[0].getnewaddress() : 60 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 matchingOuts = 0 for out in dec_tx['vout']: totalOut += out['value'] if out['scriptPubKey']['addresses'][0] in outputs: matchingOuts+=1 assert_equal(matchingOuts, 1) assert_equal(len(dec_tx['vout']), 2) matchingIns = 0 for vinOut in dec_tx['vin']: for vinIn in inputs: if vinIn['txid'] == vinOut['txid']: matchingIns+=1 assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params ######################################################### # test a fundrawtransaction with two VINs and two vOUTs # ######################################################### utx = False utx2 = False listunspent = self.nodes[2].listunspent() for aUtx in listunspent: if aUtx['amount'] == 10: utx = aUtx if aUtx['amount'] == 50: utx2 = aUtx assert utx!=False inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ] outputs = { self.nodes[0].getnewaddress() : 60, self.nodes[0].getnewaddress() : 10 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 matchingOuts = 0 for out in dec_tx['vout']: totalOut += out['value'] if out['scriptPubKey']['addresses'][0] in outputs: matchingOuts+=1 assert_equal(matchingOuts, 2) assert_equal(len(dec_tx['vout']), 3) ############################################## # test a fundrawtransaction with invalid vin # ############################################## listunspent = self.nodes[2].listunspent() inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin! outputs = { self.nodes[0].getnewaddress() : 10} rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) try: rawtxfund = self.nodes[2].fundrawtransaction(rawtx) raise AssertionError("Spent more than available") except JSONRPCException as e: assert "Insufficient" in e.error['message'] ############################################################ #compare fee of a standard pubkeyhash transaction inputs = [] outputs = {self.nodes[1].getnewaddress():11} rawTx = self.nodes[0].createrawtransaction(inputs, outputs) fundedTx = self.nodes[0].fundrawtransaction(rawTx) #create same transaction over sendtoaddress txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 11) signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] #compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) assert feeDelta >= 0 and feeDelta <= feeTolerance ############################################################ ############################################################ #compare fee of a standard pubkeyhash transaction with multiple outputs inputs = [] outputs = {self.nodes[1].getnewaddress():11,self.nodes[1].getnewaddress():12,self.nodes[1].getnewaddress():1,self.nodes[1].getnewaddress():13,self.nodes[1].getnewaddress():2,self.nodes[1].getnewaddress():3} rawTx = self.nodes[0].createrawtransaction(inputs, outputs) fundedTx = self.nodes[0].fundrawtransaction(rawTx) #create same transaction over sendtoaddress txId = self.nodes[0].sendmany("", outputs) signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] #compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) assert feeDelta >= 0 and feeDelta <= feeTolerance ############################################################ ############################################################ #compare fee of a 2of2 multisig p2sh transaction # create 2of2 addr addr1 = self.nodes[1].getnewaddress() addr2 = self.nodes[1].getnewaddress() addr1Obj = self.nodes[1].getaddressinfo(addr1) addr2Obj = self.nodes[1].getaddressinfo(addr2) mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address'] inputs = [] outputs = {mSigObj:11} rawTx = self.nodes[0].createrawtransaction(inputs, outputs) fundedTx = self.nodes[0].fundrawtransaction(rawTx) #create same transaction over sendtoaddress txId = self.nodes[0].sendtoaddress(mSigObj, 11) signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] #compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) assert feeDelta >= 0 and feeDelta <= feeTolerance ############################################################ ############################################################ #compare fee of a standard pubkeyhash transaction # create 4of5 addr addr1 = self.nodes[1].getnewaddress() addr2 = self.nodes[1].getnewaddress() addr3 = self.nodes[1].getnewaddress() addr4 = self.nodes[1].getnewaddress() addr5 = self.nodes[1].getnewaddress() addr1Obj = self.nodes[1].getaddressinfo(addr1) addr2Obj = self.nodes[1].getaddressinfo(addr2) addr3Obj = self.nodes[1].getaddressinfo(addr3) addr4Obj = self.nodes[1].getaddressinfo(addr4) addr5Obj = self.nodes[1].getaddressinfo(addr5) mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])['address'] inputs = [] outputs = {mSigObj:11} rawTx = self.nodes[0].createrawtransaction(inputs, outputs) fundedTx = self.nodes[0].fundrawtransaction(rawTx) #create same transaction over sendtoaddress txId = self.nodes[0].sendtoaddress(mSigObj, 11) signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] #compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) assert feeDelta >= 0 and feeDelta <= feeTolerance ############################################################ ############################################################ # spend a 2of2 multisig transaction over fundraw # create 2of2 addr addr1 = self.nodes[2].getnewaddress() addr2 = self.nodes[2].getnewaddress() addr1Obj = self.nodes[2].getaddressinfo(addr1) addr2Obj = self.nodes[2].getaddressinfo(addr2) mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address'] # send 12 DASH to msig addr txId = self.nodes[0].sendtoaddress(mSigObj, 12) self.sync_all() self.nodes[1].generate(1) self.sync_all() oldBalance = self.nodes[1].getbalance() inputs = [] outputs = {self.nodes[1].getnewaddress():11} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) fundedTx = self.nodes[2].fundrawtransaction(rawTx) signedTx = self.nodes[2].signrawtransactionwithwallet(fundedTx['hex']) txId = self.nodes[2].sendrawtransaction(signedTx['hex']) self.sync_all() self.nodes[1].generate(1) self.sync_all() # make sure funds are received at node1 assert_equal(oldBalance+Decimal('11.0000000'), self.nodes[1].getbalance()) ############################################################ # locked wallet test self.nodes[1].encryptwallet("test") self.stop_nodes() self.start_nodes() # This test is not meant to test fee estimation and we'd like # to be sure all txs are sent at a consistent desired feerate for node in self.nodes: node.settxfee(min_relay_tx_fee) connect_nodes(self.nodes[0],1) connect_nodes(self.nodes[1],2) connect_nodes(self.nodes[0],2) connect_nodes(self.nodes[0],3) self.sync_all() # drain the keypool self.nodes[1].getnewaddress() self.nodes[1].getrawchangeaddress() inputs = [] outputs = {self.nodes[0].getnewaddress():1.1} rawTx = self.nodes[1].createrawtransaction(inputs, outputs) # fund a transaction that requires a new key for the change output # creating the key must be impossible because the wallet is locked try: fundedTx = self.nodes[1].fundrawtransaction(rawTx) raise AssertionError("Wallet unlocked without passphrase") except JSONRPCException as e: assert 'Keypool ran out' in e.error['message'] #refill the keypool self.nodes[1].walletpassphrase("test", 100) self.nodes[1].keypoolrefill(2) #need to refill the keypool to get an internal change address self.nodes[1].walletlock() try: self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 12) raise AssertionError("Wallet unlocked without passphrase") except JSONRPCException as e: assert 'walletpassphrase' in e.error['message'] oldBalance = self.nodes[0].getbalance() inputs = [] outputs = {self.nodes[0].getnewaddress():11} rawTx = self.nodes[1].createrawtransaction(inputs, outputs) fundedTx = self.nodes[1].fundrawtransaction(rawTx) #now we need to unlock self.nodes[1].walletpassphrase("test", 100) signedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex']) txId = self.nodes[1].sendrawtransaction(signedTx['hex']) self.sync_all() self.nodes[1].generate(1) self.sync_all() # make sure funds are received at node1 assert_equal(oldBalance+Decimal('511.0000000'), self.nodes[0].getbalance()) ############################################### # multiple (~19) inputs tx test | Compare fee # ############################################### #empty node1, send some small coins from node0 to node1 self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True) self.sync_all() self.nodes[0].generate(1) self.sync_all() for i in range(0,20): self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01) self.sync_all() self.nodes[0].generate(1) self.sync_all() #fund a tx with ~20 small inputs inputs = [] outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04} rawTx = self.nodes[1].createrawtransaction(inputs, outputs) fundedTx = self.nodes[1].fundrawtransaction(rawTx) #create same transaction over sendtoaddress txId = self.nodes[1].sendmany("", outputs) signedFee = self.nodes[1].getrawmempool(True)[txId]['fee'] #compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) assert feeDelta >= 0 and feeDelta <= feeTolerance*19 #~19 inputs ############################################# # multiple (~19) inputs tx test | sign/send # ############################################# #again, empty node1, send some small coins from node0 to node1 self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True) self.sync_all() self.nodes[0].generate(1) self.sync_all() for i in range(0,20): self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01) self.sync_all() self.nodes[0].generate(1) self.sync_all() #fund a tx with ~20 small inputs oldBalance = self.nodes[0].getbalance() inputs = [] outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04} rawTx = self.nodes[1].createrawtransaction(inputs, outputs) fundedTx = self.nodes[1].fundrawtransaction(rawTx) fundedAndSignedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex']) txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex']) self.sync_all() self.nodes[0].generate(1) self.sync_all() assert_equal(oldBalance+Decimal('500.19000000'), self.nodes[0].getbalance()) #0.19+block reward ##################################################### # test fundrawtransaction with OP_RETURN and no vin # ##################################################### rawtx = "0100000000010000000000000000066a047465737400000000" dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(len(dec_tx['vin']), 0) assert_equal(len(dec_tx['vout']), 1) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) assert_greater_than(len(dec_tx['vin']), 0) # at least one vin assert_equal(len(dec_tx['vout']), 2) # one change output added ################################################## # test a fundrawtransaction using only watchonly # ################################################## inputs = [] outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2} rawtx = self.nodes[3].createrawtransaction(inputs, outputs) result = self.nodes[3].fundrawtransaction(rawtx, True) res_dec = self.nodes[0].decoderawtransaction(result["hex"]) assert_equal(len(res_dec["vin"]), 1) assert_equal(res_dec["vin"][0]["txid"], watchonly_txid) assert "fee" in result.keys() assert_greater_than(result["changepos"], -1) ############################################################### # test fundrawtransaction using the entirety of watched funds # ############################################################### inputs = [] outputs = {self.nodes[2].getnewaddress() : watchonly_amount} rawtx = self.nodes[3].createrawtransaction(inputs, outputs) result = self.nodes[3].fundrawtransaction(rawtx, True) res_dec = self.nodes[0].decoderawtransaction(result["hex"]) assert_equal(len(res_dec["vin"]), 2) assert res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid assert_greater_than(result["fee"], 0) assert_greater_than(result["changepos"], -1) assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10) signedtx = self.nodes[3].signrawtransactionwithwallet(result["hex"]) assert not signedtx["complete"] signedtx = self.nodes[0].signrawtransactionwithwallet(signedtx["hex"]) assert signedtx["complete"] self.nodes[0].sendrawtransaction(signedtx["hex"]) if __name__ == '__main__': RawTransactionsTest().main()
py
b417a817ec9d5c485af50c591b79d9e38ffddcca
import asyncio import functools import pytest import http3 def threadpool(func): """ Our sync tests should run in seperate thread to the uvicorn server. """ @functools.wraps(func) async def wrapped(*args, **kwargs): nonlocal func loop = asyncio.get_event_loop() if kwargs: func = functools.partial(func, **kwargs) await loop.run_in_executor(None, func, *args) return pytest.mark.asyncio(wrapped) @threadpool def test_get(server): response = http3.get("http://127.0.0.1:8000/") assert response.status_code == 200 assert response.reason_phrase == "OK" assert response.text == "Hello, world!" @threadpool def test_post(server): response = http3.post("http://127.0.0.1:8000/", data=b"Hello, world!") assert response.status_code == 200 assert response.reason_phrase == "OK" @threadpool def test_post_byte_iterator(server): def data(): yield b"Hello" yield b", " yield b"world!" response = http3.post("http://127.0.0.1:8000/", data=data()) assert response.status_code == 200 assert response.reason_phrase == "OK" @threadpool def test_options(server): response = http3.options("http://127.0.0.1:8000/") assert response.status_code == 200 assert response.reason_phrase == "OK" @threadpool def test_head(server): response = http3.head("http://127.0.0.1:8000/") assert response.status_code == 200 assert response.reason_phrase == "OK" @threadpool def test_put(server): response = http3.put("http://127.0.0.1:8000/", data=b"Hello, world!") assert response.status_code == 200 assert response.reason_phrase == "OK" @threadpool def test_patch(server): response = http3.patch("http://127.0.0.1:8000/", data=b"Hello, world!") assert response.status_code == 200 assert response.reason_phrase == "OK" @threadpool def test_delete(server): response = http3.delete("http://127.0.0.1:8000/") assert response.status_code == 200 assert response.reason_phrase == "OK"
py
b417a88ca535c2b0110c532fe0769621a78c280e
# -*- coding: utf-8 -*- # # pymfe documentation build configuration file, created by # sphinx-quickstart on Wed Nov 11 12:35:11 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.todo', 'sphinx.ext.imgmath', 'sphinx.ext.viewcode', 'numpydoc' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'pymfe' copyright = u'2015, Michael J. Ireland' author = u'Michael J. Ireland' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1' # The full version, including alpha/beta/rc tags. release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'nature' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'pymfedoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'pymfe.tex', u'pymfe Documentation', u'Michael J. Ireland', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'pymfe', u'pymfe Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'pymfe', u'pymfe Documentation', author, 'pymfe', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
py
b417a8c15d3c7414bfc9e53d8a3ea5c843956e4c
import pandas as pd from sys import argv import distutils from distutils.util import strtobool script, filepath = argv # def create_df(filepath): # return True # open a csv file as a data frame compare expected values to actual values def grader(csv_filepath): with open(csv_filepath, newline = '') as csv_file: df = pd.read_csv(csv_file, quoting = 3) df.columns = df.columns.str.lstrip() expressions = df.Expressions.tolist() expected = df['Expected Values'].str.lstrip().tolist() correct = 0 total = df.shape[0] boolean_list = [] for i, expression in enumerate(expressions): if eval(expression) == eval(expected[i]): correct += 1 grade = "{}%".format(correct / total * 100) return grade # print("{}, {}".format(eval(expression), eval(expected[i]))) # if ('(' or ')') not in value: # words = value.split(' ') # print(f"Words: {words}") # for i, element in enumerate(words): # if element == "True" or element == "False": # boolean_list += [words.pop(i)] # print(f"Booleans: {boolean_list}") # if value: # print(value) # if (df.Expressions.eq(df['Expected Values'])): # def evaluate(expression): # print(distutils.util.strtobool(expression)) # grader(filepath) print("You received {} on this assignment".format(grader(filepath)))
py
b417a919d30fbf292cc0a3bfb1a8048b966bc924
import re import time import random from nosuch.oscutil import * from traceback import format_exc from time import sleep import sys global id id = 0 def mycallback(ev,d): o = ev.oscmsg x = 0.0 y = 0.0 # print "ev=",ev," d=",d," source_ip=",ev.source_ip for m in o: addr = m[0] # print " addr = ",addr," m2=",m[2] if addr == "/tuio/2Dcur" and m[2] == "set": print "2Dcur id=",m[3]," xy=%.4f,%.4f"%(m[4],m[5]),ev.source_ip x = m[4] y = m[5] z = 0.2 x = x * 2.0 - 1.0 y = (1.0-y) * 2.0 - 1.0 send("/square",["x=%f"%x,"y=%f"%y,"hue=*COLORLFO1","scalex=%f"%z,"scaley=%f"%z,"alpha=!ALPHAENV","lifetime=3.0"]) if addr == "/tuio/25Dcur" and m[2] == "set": print "25Dcur id=",m[3]," xy=%.4f,%.4f"%(m[4],m[5]),m[6],ev.source_ip x = m[4] y = m[5] z = m[6] global id id += 1 x = x * 2.0 - 1.0 y = (1.0-y) * 2.0 - 1.0 send("/square",["x=%f"%x,"y=%f"%y,"hue=*COLORLFO2","scalex=%f"%z,"scaley=%f"%z,"alpha=!ALPHAENV","lifetime=3.0"]) if addr == "/tuio/_ssif" and m[2] == "set": print "IGESTURE xy=",x,y," id=",m[4]," force=",m[5] def send(oscaddr,oscmsg): # print "Sending oscaddr=",oscaddr," oscmsg=",oscmsg try: global universe, universe_host, universe_port universe.sendosc(oscaddr,oscmsg) except: global universe, universe_host, universe_port print "SEND EXCEPTION !!! =",format_exc() universe = OscRecipient(universe_host,universe_port,proto="tcp") initstuff() print "TRYING AGAIN!" universe.sendosc(oscaddr,oscmsg) def initstuff(): send("/clear",[]) send("/run",[]) send("/lfo",["name=COLORLFO1","center=0.5","amp=1.0","freq=0.002", "tag=mytag"]) send("/lfo",["name=COLORLFO2","center=0.5","amp=1.0","freq=0.02", "tag=mytag"]) send("/event",["target=COLORLFO1","type=bang"]) send("/event",["target=COLORLFO2","type=bang"]) send("/env",["name=ENV1","start=0.5","end=0.0","dur=3.0"]) send("/env",["name=ENV1","start=0.5","end=0.0","dur=0.5"]) send("/env",["name=ALPHAENV","start=0.3","end=0.01","dur=3.0"]) send("/env",["name=SCALE_ENV1","start=0.5","end=0.1","dur=0.5"]) # send("/event",["target=LFO3","type=bang"]) # send("/event",["target=ENV1","type=bang"]) # send("/square",["x=!LFO3","y=0.0","hue=0.5","scalex=0.2","scaley=0.2","alpha=0.5","lifetime=50.0"]) sleep(1.0) send("/square",["x=*ENV1","y=0.3","hue=0.7","scalex=0.2","scaley=0.2","alpha=!ALPHAENV","lifetime=10.0"]) send("/event",["target=ENV1","type=bang"]) if __name__ == '__main__': global universe, universe_port, universe_host universe_port=5555 universe_host="localhost" universe = OscRecipient(universe_host,universe_port,proto="tcp") input_name = "[email protected]" input_name = "[email protected]" port = re.compile(".*@").search(input_name).group()[:-1] host = re.compile("@.*").search(input_name).group()[1:] initstuff() oscmon = OscMonitor(host,port,proto="udp") oscmon.setcallback(mycallback,"") sleep(100000) sys.exit(0) time.sleep(20.0) send("/env",["name=ENV1","start=0.9","end=0.1","dur=2.0"]) send("/env",["name=A2","start=0.9","end=0.1","dur=2.0"]) send("/square",["name=SQ1","x=!A2","alpha=!A2"]) time.sleep(14.0) send("/list",[]) # send("/square",["x=-0.99","y=-0.99","hue=0.0","scalex=1.98","scaley=1.98","handlex=0.0","handley=0.0","alpha=0.2"]) k=0 i=0 # lfoname= "LFO%04d%04d"%(k,i) # send("/lfo",["name="+lfoname,"center=0.0","amp=1.0","freq=%f"%(0.1*random.random())]) # send("/event",["target="+lfoname,"type=bang"]) envname= "ENV%04d%04d"%(k,i) send("/env",["name="+envname,"start=1.0","end=0.0","dur=3.0"]) send("/square",["x=0.0","y=0.0","hue=%f"%random.random(),"scalex=0.2","scaley=0.2","handlex=0.0","handley=0.0","alpha=!"+envname]) # send("/event",["target="+envname,"type=bang"]) time.sleep(10.0) for k in range(0,10): print "k=",k for i in range(0,10): lfoname= "LFO%04d%04d"%(k,i) send("/lfo",["name="+lfoname,"center=0.0","amp=1.0","freq=%f"%(0.1*random.random())]) send("/event",["target="+lfoname,"type=bang"]) envname= "ENV%04d%04d"%(k,i) send("/env",["name="+envname,"start=1.0","end=0.0","dur=10.0"]) send("/square",["x=!"+lfoname,"y=%f"%(2*random.random()-1.0),"hue=%f"%random.random(),"scalex=0.2","scaley=0.2","handlex=0.0","handley=0.0","alpha=!"+envname,"rotation=!"+lfoname]) send("/event",["target="+envname,"type=bang"]) time.sleep(1.0) # $p /env name=EX1 start=0.0 end=0.5 dur=4.0 # $p /lfo name=LFO1 center=0.0 amp=0.5 freq=1.0 # $p /env name=EA1 start=1.0 end=0.0 dur=8.0 # # # $p /event target=EX1 type=bang at=5.0 # # $p /event target=LFO1 type=bang at=5.0 # # $p /event target=EA1 type=bang at=5.0 # # # $p /event target=Uniq0 type=bang at=5.0 # # $p /run # # sleep 2 # $p /square x=!EX1 y=0.5 hue=0.2 scalex=0.2 scaley=0.2 handlex=0.0 handley=0.0 alpha=1.0 # # # $p /square x=0.5 y=0.5 hue=0.2 scalex=0.2 scaley=0.2 handlex=0.0 handley=0.0 alpha=1.0 rotation=*LFO1 # # $p /square x=0.0 y=0.0 hue=0.2 scalex=0.2 scaley=0.2 handlex=0.5 handley=0.5 alpha=1.0 rotation=*LFO1 # # # sleep 2 # $p /stop
py
b417a9353070e111cc5fc80c0dc7837bf28ef4fe
# Bench mark function 11 # Griewank Function # HW dimension: 30 # Min = 0 when X={0,0,0,....0} # Range [-600,600] # Reference: http://benchmarkfcns.xyz/benchmarkfcns/griewankfcn.html import math import numpy as np name = "F11" l_bound = -600 u_bound = 600 dim = 30 opt = 0 def func(X): agg = np.sum(X**2)/4000 index = np.arange(1, 1+X.size) cos_term = np.cos(np.divide(X,index**0.5)) multi = np.prod(cos_term) result = agg - multi + 1 return result if __name__ == '__main__': X = np.arange(30) X = X * 0.3 print(X) result = func(X) print(result)
py
b417a9f90cafd4c14b66660420459167431183d9
import argparse import os import datetime import logging import time import torch import torch.nn as nn import torch.utils import torch.distributed from torch.utils.data import DataLoader from core.configs import cfg from core.datasets import build_dataset from core.models import build_feature_extractor, build_classifier from core.solver import adjust_learning_rate from core.utils.misc import mkdir from core.utils.logger import setup_logger from core.utils.metric_logger import MetricLogger from core.active.build import PixelSelection, RegionSelection from core.datasets.dataset_path_catalog import DatasetCatalog from core.loss.negative_learning_loss import NegativeLearningLoss from core.loss.local_consistent_loss import LocalConsistentLoss from core.utils.utils import set_random_seed import warnings warnings.filterwarnings('ignore') def train(cfg): logger = logging.getLogger("AL-RIPU.trainer") # create network device = torch.device(cfg.MODEL.DEVICE) feature_extractor = build_feature_extractor(cfg) feature_extractor.to(device) classifier = build_classifier(cfg) classifier.to(device) print(classifier) # init optimizer optimizer_fea = torch.optim.SGD(feature_extractor.parameters(), lr=cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY) optimizer_fea.zero_grad() optimizer_cls = torch.optim.SGD(classifier.parameters(), lr=cfg.SOLVER.BASE_LR * 10, momentum=cfg.SOLVER.MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY) optimizer_cls.zero_grad() # load checkpoint if cfg.resume: logger.info("Loading checkpoint from {}".format(cfg.resume)) checkpoint = torch.load(cfg.resume, map_location=torch.device('cpu')) feature_extractor.load_state_dict(checkpoint['feature_extractor']) classifier.load_state_dict(checkpoint['classifier']) # init mask for cityscape DatasetCatalog.initMask(cfg) # init data loader src_train_data = build_dataset(cfg, mode='train', is_source=True) tgt_train_data = build_dataset(cfg, mode='train', is_source=False) tgt_epoch_data = build_dataset(cfg, mode='active', is_source=False, epochwise=True) src_train_loader = DataLoader(src_train_data, batch_size=cfg.SOLVER.BATCH_SIZE, shuffle=True, num_workers=4, pin_memory=True, drop_last=True) tgt_train_loader = DataLoader(tgt_train_data, batch_size=cfg.SOLVER.BATCH_SIZE, shuffle=True, num_workers=4, pin_memory=True, drop_last=True) tgt_epoch_loader = DataLoader(tgt_epoch_data, batch_size=1, shuffle=False, num_workers=4, pin_memory=True, drop_last=False) # init loss sup_criterion = nn.CrossEntropyLoss(ignore_index=255) negative_criterion = NegativeLearningLoss(threshold=cfg.SOLVER.NEGATIVE_THRESHOLD) local_consistent_loss = LocalConsistentLoss(cfg.MODEL.NUM_CLASSES, cfg.SOLVER.LCR_TYPE).cuda() iteration = 0 start_training_time = time.time() end = time.time() max_iters = cfg.SOLVER.MAX_ITER meters = MetricLogger(delimiter=" ") logger.info(">>>>>>>>>>>>>>>> Start Training >>>>>>>>>>>>>>>>") feature_extractor.train() classifier.train() active_round = 1 for batch_index, (src_data, tgt_data) in enumerate(zip(src_train_loader, tgt_train_loader)): data_time = time.time() - end current_lr = adjust_learning_rate(cfg.SOLVER.LR_METHOD, cfg.SOLVER.BASE_LR, iteration, max_iters, power=cfg.SOLVER.LR_POWER) for index in range(len(optimizer_fea.param_groups)): optimizer_fea.param_groups[index]['lr'] = current_lr for index in range(len(optimizer_cls.param_groups)): optimizer_cls.param_groups[index]['lr'] = current_lr * 10 optimizer_fea.zero_grad() optimizer_cls.zero_grad() src_input, src_label = src_data['img'], src_data['label'] src_input = src_input.cuda(non_blocking=True) src_label = src_label.cuda(non_blocking=True) # target data # tgt_mask is active label, 255 means unlabeled data tgt_input, tgt_mask = tgt_data['img'], tgt_data['mask'] tgt_input = tgt_input.cuda(non_blocking=True) tgt_mask = tgt_mask.cuda(non_blocking=True) src_size = src_input.shape[-2:] src_out = classifier(feature_extractor(src_input), size=src_size) tgt_size = tgt_input.shape[-2:] tgt_out = classifier(feature_extractor(tgt_input), size=tgt_size) predict = torch.softmax(tgt_out, dim=1) # source supervision loss loss = torch.Tensor([0]).cuda() loss_sup = sup_criterion(src_out, src_label) meters.update(loss_sup=loss_sup.item()) loss += loss_sup # target active supervision loss if torch.sum((tgt_mask != 255)) != 0: # target has labeled pixels loss_sup_tgt = sup_criterion(tgt_out, tgt_mask) meters.update(loss_sup_tgt=loss_sup_tgt.item()) loss += loss_sup_tgt # source consistency regularization loss if cfg.SOLVER.CONSISTENT_LOSS > 0: consistent_loss = local_consistent_loss(src_out, src_label) * cfg.SOLVER.CONSISTENT_LOSS meters.update(cr_loss=consistent_loss.item()) loss += consistent_loss # target negative pseudo loss if cfg.SOLVER.NEGATIVE_LOSS > 0: negative_learning_loss = negative_criterion(predict) * cfg.SOLVER.NEGATIVE_LOSS meters.update(nl_loss=negative_learning_loss.item()) loss += negative_learning_loss loss.backward() optimizer_fea.step() optimizer_cls.step() batch_time = time.time() - end end = time.time() meters.update(time=batch_time, data=data_time) eta_seconds = meters.time.global_avg * (cfg.SOLVER.STOP_ITER - iteration) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) iteration += 1 if iteration % 20 == 0 or iteration == max_iters: logger.info( meters.delimiter.join( [ "eta: {eta}", "iter: {iter}", "{meters}", "lr: {lr:.6f}", "max mem: {memory:.02f} GB" ] ).format( eta=eta_string, iter=iteration, meters=str(meters), lr=optimizer_fea.param_groups[0]["lr"], memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0 / 1024.0 ) ) if iteration == cfg.SOLVER.MAX_ITER or iteration % cfg.SOLVER.CHECKPOINT_PERIOD == 0: filename = os.path.join(cfg.OUTPUT_DIR, "model_iter{:06d}.pth".format(iteration)) torch.save({'iteration': iteration, 'feature_extractor': feature_extractor.state_dict(), 'classifier': classifier.state_dict(), 'optimizer_fea': optimizer_fea.state_dict(), 'optimizer_cls': optimizer_cls.state_dict(), }, filename) # active learning if iteration in cfg.ACTIVE.SELECT_ITER or cfg.DEBUG: if cfg.ACTIVE.SETTING == "RA": RegionSelection(cfg=cfg, feature_extractor=feature_extractor, classifier=classifier, tgt_epoch_loader=tgt_epoch_loader) elif cfg.ACTIVE.SETTING == 'PA': PixelSelection(cfg=cfg, feature_extractor=feature_extractor, classifier=classifier, tgt_epoch_loader=tgt_epoch_loader) active_round += 1 if iteration == cfg.SOLVER.MAX_ITER: break if iteration == cfg.SOLVER.STOP_ITER: break total_training_time = time.time() - start_training_time total_time_str = str(datetime.timedelta(seconds=total_training_time)) logger.info( "Total training time: {} ({:.4f} s / it)".format( total_time_str, total_training_time / cfg.SOLVER.STOP_ITER ) ) def main(): parser = argparse.ArgumentParser(description="Active Domain Adaptive Semantic Segmentation Training") parser.add_argument("-cfg", "--config-file", default="", metavar="FILE", help="path to config file", type=str) parser.add_argument("--proctitle", type=str, default="AL-RIPU", help="allow a process to change its title",) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER ) args = parser.parse_args() if args.opts is not None: args.opts[-1] = args.opts[-1].strip('\r\n') torch.backends.cudnn.benchmark = True cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir: mkdir(output_dir) logger = setup_logger("AL-RIPU", output_dir, 0) logger.info(args) logger.info("Loaded configuration file {}".format(args.config_file)) logger.info("Running with config:\n{}".format(cfg)) logger.info('Initializing Cityscapes label mask...') set_random_seed(cfg.SEED) train(cfg) if __name__ == '__main__': main()
py
b417ab49f7039182cd8aac3716e41c1188aead0f
#!/usr/bin/env python import os import sys if __name__ == "__main__": if 'test' in sys.argv: os.environ['DJANGO_SETTINGS_MODULE'] = 'media_management_api.settings.test' else: os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'media_management_api.settings.aws') from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
py
b417ac19d53d09edaa98db2af24a9883caccad4f
import tempfile from dffml.accuracy import MeanSquaredErrorAccuracy from dffml import train, score, predict, Feature, Features, AsyncTestCase from REPLACE_IMPORT_PACKAGE_NAME.myslr import MySLRModel TRAIN_DATA = [ [12.4, 11.2], [14.3, 12.5], [14.5, 12.7], [14.9, 13.1], [16.1, 14.1], [16.9, 14.8], [16.5, 14.4], [15.4, 13.4], [17.0, 14.9], [17.9, 15.6], [18.8, 16.4], [20.3, 17.7], [22.4, 19.6], [19.4, 16.9], [15.5, 14.0], [16.7, 14.6], ] TEST_DATA = [ [17.3, 15.1], [18.4, 16.1], [19.2, 16.8], [17.4, 15.2], [19.5, 17.0], [19.7, 17.2], [21.2, 18.6], ] class TestMySLRModel(AsyncTestCase): @classmethod def setUpClass(cls): # Create a temporary directory to store the trained model cls.model_dir = tempfile.TemporaryDirectory() # Create an instance of the model cls.model = MySLRModel( features=Features(Feature("X", float, 1)), predict=Feature("Y", float, 1), location=cls.model_dir.name, ) cls.scorer = MeanSquaredErrorAccuracy() @classmethod def tearDownClass(cls): # Remove the temporary directory where the model was stored to cleanup cls.model_dir.cleanup() async def test_00_train(self): # Train the model on the training data await train(self.model, *[{"X": x, "Y": y} for x, y in TRAIN_DATA]) async def test_01_accuracy(self): # Use the test data to assess the model's accuracy res = await score( self.model, self.scorer, Feature("Y", float, 1), *[{"X": x, "Y": y} for x, y in TEST_DATA], ) # Ensure the accuracy is above 80% self.assertTrue(0.0 <= res < 0.1) async def test_02_predict(self): # Get the prediction for each piece of test data async for i, features, prediction in predict( self.model, *[{"X": x, "Y": y} for x, y in TEST_DATA] ): # Grab the correct value correct = features["Y"] # Grab the predicted value prediction = prediction["Y"]["value"] # Check that the prediction is within 10% error of the actual value acceptable = 0.1 self.assertLess(prediction, correct * (1.0 + acceptable)) self.assertGreater(prediction, correct * (1.0 - acceptable))
py
b417ac4c1e11abec73064d10940f4e7aa5ded3e8
# Run Grtrans with rrjet model # The rrjet model is defined in "fluid_model_rrjet.py" # NOTE -- currently the power law emissivity is very slow because paralleization is off # First make grtrans with 'make' # Then run this in python import numpy as np import grtrans_batch as gr import matplotlib.pyplot as plt import scipy.ndimage.filters as filt ang=20. name = 'rrjet'+str(ang) mu = np.cos(ang*np.pi/180.) size = 300. uout = 1./(10*size) npix = 100 ngeo = 5000 cmperMpc = 3.086e24 MBH = 6.7e9 DTOBH = 16.528*cmperMpc RADPERUAS = np.pi/180./3600./1.e6 psize_rg = 2*size/npix cmperrg = 147708.8 * MBH psize_cm = psize_rg * cmperrg psize_rad = psize_cm / DTOBH psize_uas = psize_rad / RADPERUAS pp= 2.001 RF = 43.e9 cfun = 'jet' cfun2 = 'seismic' RERUN = True FNAME = 'grtrans_jet_compare.txt' def main(): # run grtrans x=gr.grtrans() x.write_grtrans_inputs(name+'.in', oname=name+'.out', fname='RRJET',phi0=0., betaeconst=1.e-4, ximax=10., nfreq=1,fmin=RF,fmax=RF, gmin=10., gmax=1.e35, p2=pp, p1=pp, #ename='SYNCHPL', ename='POLSYNCHPL', nvals=4, fpositron=0, spin=0., standard=1, uout=uout, mbh=MBH, #epcoefindx=[1,1,1,1,1,1,1], #epcoefindx=[1,1,1,1,0,0,0], mdotmin=1.57e15,mdotmax=1.57e15,nmdot=1, nmu=1,mumin=mu,mumax=mu, gridvals=[-size,size,-size,size], nn=[npix,npix,ngeo], hindf=1,hnt=1, muval=1.) if RERUN: x.run_grtrans() # load image x.read_grtrans_output() x.convert_to_Jy(DTOBH) #grt_obj=x save_grtrans_image(x) display_grtrans_image(x) def save_grtrans_image(grt_obj): """quick save, not ehtim compatible""" I_im = grt_obj.ivals[:,0,0].reshape(npix,npix).flatten() Q_im = grt_obj.ivals[:,1,0].reshape(npix,npix).flatten() U_im = grt_obj.ivals[:,2,0].reshape(npix,npix).flatten() V_im = grt_obj.ivals[:,3,0].reshape(npix,npix).flatten() # convert to Tb factor = 3.254e13/(RF**2 * psize_rad**2) I_im *= factor Q_im *= factor U_im *= factor V_im *= factor x = np.array([[i for i in range(npix)] for j in range(npix)]).flatten() y = np.array([[j for i in range(npix)] for j in range(npix)]).flatten() x -= npix/2 y -= npix/2 x = x*psize_uas y = y*psize_uas outdat = np.vstack((x.T,y.T,I_im.T,Q_im.T,U_im.T,V_im.T)).T np.savetxt('../rrjet_and_riaf/'+FNAME,outdat) #np.savetxt('../rrjet_and_riaf/grtrans_jet_compare_positron_noconv.txt',outdat) return def display_grtrans_image(grt_obj,nvec=20,veccut=0.005,blur_kernel=1.25): plt.close('all') I_im = grt_obj.ivals[:,0,0].reshape(npix,npix) Q_im = grt_obj.ivals[:,1,0].reshape(npix,npix) U_im = grt_obj.ivals[:,2,0].reshape(npix,npix) V_im = grt_obj.ivals[:,3,0].reshape(npix,npix) I_im = filt.gaussian_filter(I_im, (blur_kernel, blur_kernel)) Q_im = filt.gaussian_filter(Q_im, (blur_kernel, blur_kernel)) U_im = filt.gaussian_filter(U_im, (blur_kernel, blur_kernel)) V_im = filt.gaussian_filter(V_im, (blur_kernel, blur_kernel)) # convert to Tb factor = 3.254e13/(RF**2 * psize_rad**2) I_im *= factor Q_im *= factor U_im *= factor V_im *= factor # Polarization Vectors P_im = np.abs(Q_im + 1j*U_im) m_im = P_im/I_im thin = npix//nvec mask = I_im > veccut * np.max(I_im) mask2 = mask[::thin, ::thin] m = m_im[::thin, ::thin][mask2] x = (np.array([[i for i in range(npix)] for j in range(npix)])[::thin, ::thin]) x = x[mask2] y = (np.array([[j for i in range(npix)] for j in range(npix)])[::thin, ::thin]) y = y[mask2] a = (-np.sin(np.angle(Q_im+1j*U_im)/2)[::thin, ::thin]) a = a[mask2] #a = m*a b = ( np.cos(np.angle(Q_im+1j*U_im)/2)[::thin, ::thin]) b = b[mask2] #b = m*b P_im[np.logical_not(mask)]=0. m_im[np.logical_not(mask)]=0. # ticks xticks = ticks(npix, 2*size/npix) yticks = ticks(npix, 2*size/npix) # display Stokes I plt.figure(0) im = plt.imshow(I_im, cmap=plt.get_cmap(cfun), interpolation='gaussian') cb = plt.colorbar(im, fraction=0.046, pad=0.04, orientation="vertical") cb.set_label('Tb (K)', fontsize=14) plt.title(("Stokes I, %.2f GHz " % (RF/1e9)), fontsize=16) plt.xticks(xticks[0], xticks[1]) plt.yticks(yticks[0], yticks[1]) plt.xlabel('x/rg') plt.ylabel('y/rg') # display Stokes Q plt.figure(1) im = plt.imshow(Q_im, cmap=plt.get_cmap(cfun2), interpolation='gaussian') cb = plt.colorbar(im, fraction=0.046, pad=0.04, orientation="vertical") cb.set_label('Tb (K)', fontsize=14) plt.title(("Stokes Q, %.2f GHz " % (RF/1e9)), fontsize=16) plt.xticks(xticks[0], xticks[1]) plt.yticks(yticks[0], yticks[1]) plt.xlabel('x/rg') plt.ylabel('y/rg') # display Stokes U plt.figure(2) im = plt.imshow(U_im, cmap=plt.get_cmap(cfun2), interpolation='gaussian') cb = plt.colorbar(im, fraction=0.046, pad=0.04, orientation="vertical") cb.set_label('Tb (K)', fontsize=14) plt.title(("Stokes U, %.2f GHz " % (RF/1e9)), fontsize=16) plt.xticks(xticks[0], xticks[1]) plt.yticks(yticks[0], yticks[1]) plt.xlabel('x/rg') plt.ylabel('y/rg') # display Stokes V plt.figure(3) im = plt.imshow(V_im, cmap=plt.get_cmap(cfun2), interpolation='gaussian') cb = plt.colorbar(im, fraction=0.046, pad=0.04, orientation="vertical") cb.set_label('Tb (K)', fontsize=14) plt.title(("Stokes V, %.2f GHz " % (RF/1e9)), fontsize=16) plt.xticks(xticks[0], xticks[1]) plt.yticks(yticks[0], yticks[1]) plt.xlabel('x/rg') plt.ylabel('y/rg') # display P # plt.figure(4) # im = plt.imshow(P_im, cmap=plt.get_cmap(cfun), interpolation='gaussian') # cb = plt.colorbar(im, fraction=0.046, pad=0.04, orientation="vertical") # cb.set_label('Tb (K)', fontsize=14) # plt.title(("P, %.2f GHz " % (RF/1e9)), fontsize=16) # plt.xticks(xticks[0], xticks[1]) # plt.yticks(yticks[0], yticks[1]) # plt.xlabel('x/rg') # plt.ylabel('y/rg') # # display m # plt.figure(5) # im = plt.imshow(m_im, cmap=plt.get_cmap('viridis'), interpolation='gaussian') # cb = plt.colorbar(im, fraction=0.046, pad=0.04, orientation="vertical") # cb.set_label('P/I', fontsize=14) # plt.title(("P/I, %.2f GHz " % (RF/1e9)), fontsize=16) # plt.xticks(xticks[0], xticks[1]) # plt.yticks(yticks[0], yticks[1]) # plt.xlabel('x/rg') # plt.ylabel('y/rg') # display I with pol ticks plt.figure(6) im = plt.imshow(I_im, cmap=plt.get_cmap(cfun), interpolation='gaussian') cb = plt.colorbar(im, fraction=0.046, pad=0.04, orientation="vertical") cb.set_label('Tb (K)', fontsize=14) plt.title(("I, %.2f GHz " % (RF/1e9)), fontsize=16) plt.xticks(xticks[0], xticks[1]) plt.yticks(yticks[0], yticks[1]) plt.xlabel('x/rg') plt.ylabel('y/rg') plt.quiver(x, y, a, b, headaxislength=20, headwidth=1, headlength=.01, minlength=0, minshaft=1, width=.01*npix, units='x', pivot='mid', color='k', angles='uv', scale=1.0/thin) plt.quiver(x, y, a, b, headaxislength=20, headwidth=1, headlength=.01, minlength=0, minshaft=1, width=.005*npix, units='x', pivot='mid', color='w', angles='uv', scale=1.1/thin) plt.show() def ticks(axisdim, psize, nticks=8): """Return a list of ticklocs and ticklabels psize should be in desired units """ axisdim = int(axisdim) nticks = int(nticks) if not axisdim % 2: axisdim += 1 if nticks % 2: nticks -= 1 tickspacing = float((axisdim-1))/nticks ticklocs = np.arange(0, axisdim+1, tickspacing) - 0.5 ticklabels= np.around(psize * np.arange((axisdim-1)/2.0, -(axisdim)/2.0, -tickspacing), decimals=1) return (ticklocs, ticklabels) if __name__=='__main__': main()
py
b417ac86a8d8d927b1ac7f151a903d83b9d86b24
import unittest import numpy as np from dragnet.features import _weninger class TestWeningerSxDx(unittest.TestCase): def test_weninger_sx_sdx(self): x = np.linspace(0, 10, 10) actual = _weninger.sx_sdx(x) expected = np.array( [[0.47448994, 2.22222222], [1.18661763, 2.22222222], [2.22759261, 2.22222222], [3.33348203, 2.22214787], [4.44444444, 2.21961138], [5.55555556, 2.18707981], [6.66651797, 2.02019401], [7.77240739, 1.63420945], [8.81338237, 1.14625352], [9.52551006, 0.79272618]]) self.assertTrue(np.allclose(actual, expected)) self.assertEqual(actual.shape, (10, 2)) if __name__ == "__main__": unittest.main()
py
b417ae4c770af33455a7f73eb6f438a3eb73f5ba
"""Plotting helper for MAPDL using pyvista""" import pyvista as pv import numpy as np from ansys.mapdl.reader.misc import unique_rows def general_plotter(title, meshes, points, labels, cpos=None, show_bounds=False, show_axes=True, background=None, off_screen=None, screenshot=False, window_size=None, notebook=None, # add_mesh kwargs: color='w', show_edges=None, edge_color=None, point_size=5.0, line_width=None, opacity=1.0, flip_scalars=False, lighting=None, n_colors=256, interpolate_before_map=True, cmap=None, render_points_as_spheres=False, render_lines_as_tubes=False, stitle=None, smooth_shading=False, # labels kwargs font_size=None, font_family=None, text_color=None): """General pyansys plotter for APDL geometry and meshes. Parameters ---------- cpos : list(tuple(floats)), str The camera position to use. You can either use a saved camera position or specify one of the following strings: - ``"xy"`` - ``"xz"`` - ``"yz"`` - ``"yx"`` - ``"zx"`` - ``"zy"`` - ``"iso"`` off_screen : bool, optional Renders off screen when ``True``. Useful for automated screenshots. window_size : list, optional Window size in pixels. Defaults to ``[1024, 768]`` notebook : bool, optional When True, the resulting plot is placed inline a jupyter notebook. Assumes a jupyter console is active. Automatically enables off_screen. show_bounds : bool, optional Shows mesh bounds when ``True``. show_axes : bool, optional Shows a vtk axes widget. Enabled by default. screenshot : str or bool, optional Saves screenshot to file when enabled. color : string or 3 item list, optional, defaults to white Use to make the entire mesh have a single solid color. Either a string, RGB list, or hex color string. For example: ``color='white'``, ``color='w'``, ``color=[1, 1, 1]``, or ``color='#FFFFFF'``. Color will be overridden if scalars are specified. show_edges : bool, optional Shows the edges of a mesh. Does not apply to a wireframe representation. edge_color : string or 3 item list, optional, defaults to black The solid color to give the edges when ``show_edges=True``. Either a string, RGB list, or hex color string. point_size : float, optional Point size of any nodes in the dataset plotted. Also applicable when style='points'. Default ``5.0`` line_width : float, optional Thickness of lines. Only valid for wireframe and surface representations. Default None. opacity : float, str, array-like Opacity of the mesh. If a single float value is given, it will be the global opacity of the mesh and uniformly applied everywhere - should be between 0 and 1. A string can also be specified to map the scalars range to a predefined opacity transfer function (options include: 'linear', 'linear_r', 'geom', 'geom_r'). A string could also be used to map a scalars array from the mesh to the opacity (must have same number of elements as the ``scalars`` argument). Or you can pass a custom made transfer function that is an array either ``n_colors`` in length or shorter. n_colors : int, optional Number of colors to use when displaying scalars. Defaults to 256. The scalar bar will also have this many colors. cmap : str, list, optional Name of the Matplotlib colormap to us when mapping the ``scalars``. See available Matplotlib colormaps. Only applicable for when displaying ``scalars``. Requires Matplotlib to be installed. ``colormap`` is also an accepted alias for this. If ``colorcet`` or ``cmocean`` are installed, their colormaps can be specified by name. You can also specify a list of colors to override an existing colormap with a custom one. For example, to create a three color colormap you might specify ``['green', 'red', 'blue']`` render_points_as_spheres : bool, optional Render points as spheres. render_lines_as_tubes : bool, optional Renders lines as tubes. smooth_shading : bool, optional Smoothly render curved surfaces when plotting. Not helpful for all meshes. """ if notebook: off_screen = True pl = pv.Plotter(off_screen=off_screen, notebook=notebook) if background: pl.set_background(background) for point in points: pl.add_points(point['points'], scalars=point.get('scalars', None), color=color, show_edges=show_edges, edge_color=edge_color, point_size=point_size, line_width=line_width, opacity=opacity, flip_scalars=flip_scalars, lighting=lighting, n_colors=n_colors, interpolate_before_map=interpolate_before_map, cmap=cmap, render_points_as_spheres=render_points_as_spheres, render_lines_as_tubes=render_lines_as_tubes) for mesh in meshes: pl.add_mesh(mesh['mesh'], scalars=mesh.get('scalars', None), stitle=mesh.get('stitle', None), color=mesh.get('color', color), show_edges=show_edges, edge_color=edge_color, smooth_shading=smooth_shading, point_size=point_size, line_width=line_width, opacity=opacity, flip_scalars=flip_scalars, lighting=lighting, n_colors=n_colors, interpolate_before_map=interpolate_before_map, cmap=cmap, render_points_as_spheres=render_points_as_spheres, render_lines_as_tubes=render_lines_as_tubes) for label in labels: # verify points are not duplicates points, idx, _ = unique_rows(np.array(label['points'])) labels = np.array(label['labels'])[idx].tolist() pl.add_point_labels(points, labels, show_points=False, shadow=False, font_size=font_size, font_family=font_family, text_color=text_color) if stitle is not None: pl.add_scalar_bar(title=stitle) if cpos: pl.camera_position = cpos if show_bounds: pl.show_bounds() if show_axes: pl.show_axes() if screenshot: pl.show(title=title, auto_close=False, window_size=window_size) pl.screenshot(screenshot) else: pl.show() return pl.camera_position
py
b417af59e5ef105b3a11a6c6fb3cc87112f1f2a6
#!/usr/bin/env python # -*- coding: utf-8 -*- # We have a few methods in here whose exact signature varies from class to class -- pylint: disable=arguments-differ # Also we access husker._value all over, the name starts with an underscores but that's ok, pylint: disable=protected-access #---------------------------------------------------------------------------------------------------------------------------------- # includes # 2+3 compat from __future__ import absolute_import, division, print_function, unicode_literals # standards from datetime import datetime from decimal import Decimal, InvalidOperation from functools import reduce, wraps import operator import re # alcazar from ..utils.compatibility import PY2, string_types, text_type from ..utils.text import normalize_spaces from .exceptions import HuskerLookupError, HuskerMismatch, HuskerMultipleSpecMatch, HuskerNotUnique, HuskerValueError #---------------------------------------------------------------------------------------------------------------------------------- # globals _builtin_int = int # pylint: disable=invalid-name _unspecified = object() # pylint: disable=invalid-name #---------------------------------------------------------------------------------------------------------------------------------- class SelectorMixin(object): # This could as well be part of `Husker`, it's separated only for readability and an aesthetic separation of concerns @property def id(self): return self.__class__.__name__ def __call__(self, *args, **kwargs): return self.one(*args, **kwargs) def selection(self, *spec): """ Runs a search for the given spec, and returns the results, as a ListHusker """ raise NotImplementedError def parts(self, **fields): for key, husker in fields.items(): if not isinstance(husker, Husker): husker = self.one(husker) yield key, husker def one(self, *spec): selected = self.selection(*spec) if len(selected) == 0: raise HuskerMismatch('%s found no matches for %s in %s' % (self.id, self.repr_spec(*spec), self.repr_value())) elif len(selected) > 1: raise HuskerNotUnique('%s expected 1 match for %s, found %d' % (self.id, self.repr_spec(*spec), len(selected))) else: return selected[0] def some(self, *spec): selected = self.selection(*spec) if len(selected) == 0: return NULL_HUSKER elif len(selected) > 1: raise HuskerNotUnique('%s expected 1 match for %s, found %d' % (self.id, self.repr_spec(*spec), len(selected))) else: return selected[0] def first(self, *spec): selected = self.selection(*spec) if len(selected) == 0: raise HuskerMismatch('%s found no matches for %s in %s' % (self.id, self.repr_spec(*spec), self.repr_value())) else: return selected[0] def last(self, *spec): selected = self.selection(*spec) if len(selected) == 0: raise HuskerMismatch('%s found no matches for %s in %s' % (self.id, self.repr_spec(*spec), self.repr_value())) else: return selected[-1] def any(self, *spec): selected = self.selection(*spec) if len(selected) == 0: return NULL_HUSKER else: return selected[0] def all(self, *spec): selected = self.selection(*spec) if not selected: raise HuskerMismatch('%s found no matches for %s in %s' % (self.id, self.repr_spec(*spec), self.repr_value())) return selected def one_of(self, *all_specs): match = self.some_of(*all_specs) if not match: raise HuskerMismatch("%s: none of the specified specs matched %r: %s" % ( self.id, self._value, ', '.join('"%s"' % spec for spec in all_specs), )) return match def some_of(self, *all_specs): match = matching_spec = None for spec in all_specs: if not isinstance(spec, (list, tuple)): spec = [spec] selected = self.some(*spec) if selected: if matching_spec is None: match, matching_spec = selected, spec else: raise HuskerMultipleSpecMatch('%s: both %s and %s matched' % ( self.id, self.repr_spec(*matching_spec), self.repr_spec(*spec), )) return match def first_of(self, *all_specs): for spec in all_specs: if not isinstance(spec, (list, tuple)): spec = [spec] selected = self.any(*spec) if selected: return selected raise HuskerMismatch("%s: none of the specified specs matched: %s" % ( self.id, ', '.join('"%s"' % spec for spec in all_specs), )) def any_of(self, *all_specs): for spec in all_specs: if not isinstance(spec, (list, tuple)): spec = [spec] selected = self.any(*spec) if selected: return selected return NULL_HUSKER def all_of(self, *all_specs): return ListHusker( element for spec in all_specs for element in self.all(spec) ) def selection_of(self, *all_specs): return ListHusker( element for spec in all_specs for element in self.selection(spec) ) #---------------------------------------------------------------------------------------------------------------------------------- # utils def _forward_to_value(operator_function, return_type): def method(self, other): raw = operator_function(self._value, other) if raw is NotImplemented: return NotImplemented return return_type(raw) return method def _value_errors_as_husker_errors(exception_class=ValueError): def make_wrapper(function): @wraps(function) def wrapped(*args, **kwargs): try: return function(*args, **kwargs) except exception_class as error: raise HuskerValueError(*error.args) return wrapped return make_wrapper #---------------------------------------------------------------------------------------------------------------------------------- class Husker(SelectorMixin): """ A Husker is used to extract from a raw document those bits of data that are of relevance to our scraper. It does not concern itself with cleaning or converting that data -- that's for the `Cleaner` to do. A Husker is just about locating the document node, or the text substring, that we're looking for. """ def __init__(self, value): self._value = value @property def text(self): return TextHusker(self.str) @property def multiline(self): """ Same as `text`, but preserves line breaks """ raise NotImplementedError(repr(self)) @property def str(self): raise NotImplementedError(repr(self)) @property @_value_errors_as_husker_errors() def int(self): return int(self.str) @property @_value_errors_as_husker_errors() def float(self): return float(self.str) @property @_value_errors_as_husker_errors(exception_class=InvalidOperation) def decimal(self): return Decimal(self.str) @_value_errors_as_husker_errors() def date(self, fmt='%Y-%m-%d'): return datetime.strptime(self.str, fmt).date() @_value_errors_as_husker_errors() def datetime(self, fmt='%Y-%m-%dT%H:%M:%S'): text = self.text.sub( r'(?:\.\d+|[\+\-]\d\d?(?::\d\d?)?|Z)*$', '', ) return datetime.strptime(text.str, fmt) # NB method `json` is moneky-patched into here from JmesPathHusker def map_raw(self, function): return function(self.raw) def map(self, function): return function(self) def filter(self, function): if function(self): return self else: return NULL_HUSKER def lookup(self, table, default=_unspecified): try: return table[self.str] except KeyError: if default is _unspecified: raise HuskerLookupError("%r not found in lookup table" % (self.raw,)) else: return default @property def raw(self): # In the default case, return ._value, but some subclasses override this return self._value def __bool__(self): """ A husker evaluates as truthy iff it holds a value at all, irrespective of what that value's truthiness is. """ return self._value is not None if PY2: # NB don't just say `__nonzero__ = __bool__` because `__bool__` is overriden in some subclasses def __nonzero__(self): return self.__bool__() def repr_spec(self, *spec): if len(spec) == 1: return repr(spec[0]) else: return repr(spec) def repr_value(self): return repr(self.text._value) def __str__(self): return self.text._value def __repr__(self): return '%s(%r)' % (self.__class__.__name__, self._value) def __hash__(self): return hash(self._value) __eq__ = _forward_to_value(operator.eq, bool) __ne__ = _forward_to_value(operator.ne, bool) __lt__ = _forward_to_value(operator.lt, bool) __le__ = _forward_to_value(operator.le, bool) __gt__ = _forward_to_value(operator.gt, bool) __ge__ = _forward_to_value(operator.ge, bool) #---------------------------------------------------------------------------------------------------------------------------------- class ListHusker(Husker): def __init__(self, value): assert value is not None if not callable(getattr(value, '__len__', None)): value = list(value) super(ListHusker, self).__init__(value) def __iter__(self): return iter(self._value) def __len__(self): return len(self._value) def __getitem__(self, item): return self._value[item] def __bool__(self): return bool(self._value) def __add__(self, other): return ListHusker(self._value + other._value) def selection(self, test=None): if test is not None and not callable(test): spec = test test = lambda child: child.selection(spec) return ListHusker( child for child in self._value if test is None or test(child) ) def dedup(self, key=None): seen = set() deduped = [] for child in self._value: keyed = child if key is None else key(child) if keyed not in seen: seen.add(keyed) deduped.append(child) return ListHusker(deduped) def _mapped_property(name, cls=None): # pylint: disable=no-self-argument return property(lambda self: (cls or self.__class__)( getattr(child, name) for child in self._value )) def _mapped_operation(name, cls=None): # pylint: disable=no-self-argument def operation(self, *args, **kwargs): list_cls = cls or self.__class__ return list_cls( getattr(child, name)(*args, **kwargs) for child in self._value ) return operation text = _mapped_property('text') multiline = _mapped_property('multiline') js = _mapped_operation('js') json = _mapped_operation('json') sub = _mapped_operation('sub') attrib = _mapped_operation('attrib') raw = _mapped_property('raw', cls=list) str = _mapped_property('str', cls=list) int = _mapped_property('int', cls=list) float = _mapped_property('float', cls=list) decimal = _mapped_property('decimal', cls=list) date = _mapped_operation('date', cls=list) datetime = _mapped_operation('datetime', cls=list) def map_raw(self, function): return [function(element.raw) for element in self] def map(self, function): return [function(element) for element in self] def filter(self, function): return ListHusker( element for element in self if function(element) ) def join(self, sep): return TextHusker(sep.join(self.raw)) def __str__(self): return repr(self._value) EMPTY_LIST_HUSKER = ListHusker([]) #---------------------------------------------------------------------------------------------------------------------------------- class ScalarHusker(Husker): def __init__(self, value): assert value is not None super(ScalarHusker, self).__init__(value) def selection(self, *spec): return EMPTY_LIST_HUSKER def repr_spec(self, regex, flags=''): del flags return regex def __bool__(self): return bool(self._value) @property def str(self): return str(self._value) def __str__(self): return str(self._value) #---------------------------------------------------------------------------------------------------------------------------------- class TextHusker(Husker): def __init__(self, value): assert value is not None super(TextHusker, self).__init__(value) def selection(self, regex, flags=''): regex = self._compile(regex, flags) selected = regex.finditer(self._value) if regex.groups < 2: return ListHusker(map(_husk, ( m.group(regex.groups) for m in selected ))) else: return ListHusker( ListHusker(map(_husk, m.groups())) for m in selected ) def sub(self, regex, replacement, flags=''): return TextHusker( self._compile(regex, flags).sub( replacement, self._value, ) ) @property def text(self): return self @property def str(self): return self._value @property def multiline(self): return self @property def normalized(self): return TextHusker(normalize_spaces(self._value)) def lower(self): return TextHusker(self._value.lower()) def upper(self): return TextHusker(self._value.upper()) def repr_spec(self, regex, flags=''): return "%s%s" % ( re.sub(r'^u?[\'\"](.*)[\'\"]$', r'/\1/', regex), flags, ) def __add__(self, other): return TextHusker(self._value + other._value) def __bool__(self): return bool(self._value) def __str__(self): return self._value @staticmethod def _compile(regex, flags): if isinstance(regex, string_types): return re.compile( regex, reduce( operator.or_, (getattr(re, f.upper()) for f in flags), 0, ), ) elif flags == '': return regex else: raise ValueError((regex, flags)) #---------------------------------------------------------------------------------------------------------------------------------- class NullHusker(Husker): def __init__(self): super(NullHusker, self).__init__(None) _returns_null = lambda *args, **kwargs: NULL_HUSKER _returns_none = lambda *args, **kwargs: None selection = _returns_null one = _returns_null some = _returns_null first = _returns_null last = _returns_null any = _returns_null all = _returns_null one_of = _returns_null some_of = _returns_null first_of = _returns_null any_of = _returns_null all_of = _returns_null selection_of = _returns_null text = property(_returns_null) multiline = property(_returns_null) join = _returns_null list = _returns_null sub = _returns_null lower = _returns_null upper = _returns_null __getitem__ = _returns_null attrib = _returns_null form = _returns_none json = _returns_null str = property(_returns_none) int = property(_returns_none) float = property(_returns_none) decimal = property(_returns_none) date = _returns_none datetime = _returns_none strftime = _returns_none map = _returns_null map_raw = _returns_none filter = _returns_null lookup = _returns_none __iter__ = lambda self: iter([]) __eq__ = lambda self, other: other is None __ne__ = lambda self, other: other is not None __lt__ = lambda self, other: False __le__ = lambda self, other: False __gt__ = lambda self, other: False __ge__ = lambda self, other: False def __str__(self): return '<Null>' NULL_HUSKER = NullHusker() #---------------------------------------------------------------------------------------------------------------------------------- def _husk(value): if isinstance(value, text_type): return TextHusker(value) elif value is None: return NULL_HUSKER else: raise ValueError(repr(value)) #----------------------------------------------------------------------------------------------------------------------------------
py
b417af7fdef08dd9b97bbbcee05838e029be8286
""" Money unittests as mixins for Money and subclasses """ import abc from decimal import Decimal import collections import unittest from money import Money, XMoney class ClassMixin(object): def test_new_instance_int_amount(self): self.assertIsInstance(self.MoneyClass(0, 'XXX'), self.MoneyClass) self.assertIsInstance(self.MoneyClass(12345, 'XXX'), self.MoneyClass) def test_new_instance_decimal_amount(self): self.assertIsInstance(self.MoneyClass(Decimal(12345), 'XXX'), self.MoneyClass) def test_new_instance_float_amount(self): self.assertIsInstance(self.MoneyClass(12345.12345, 'XXX'), self.MoneyClass) def test_new_instance_str_amount(self): self.assertIsInstance(self.MoneyClass('0', 'XXX'), self.MoneyClass) self.assertIsInstance(self.MoneyClass('12345.12345', 'XXX'), self.MoneyClass) def test_invalid_currency_none(self): with self.assertRaises(ValueError): money = self.MoneyClass('2.22', None) def test_invalid_currency_false(self): with self.assertRaises(ValueError): money = self.MoneyClass('2.22', False) def test_invalid_currency_empty(self): with self.assertRaises(ValueError): money = self.MoneyClass('2.22', '') def test_invalid_currency_code(self): with self.assertRaises(ValueError): money = self.MoneyClass('2.22', 'XX') with self.assertRaises(ValueError): money = self.MoneyClass('2.22', '123') with self.assertRaises(ValueError): money = self.MoneyClass('2.22', 'xxx') with self.assertRaises(ValueError): money = self.MoneyClass('2.22', '$') with self.assertRaises(ValueError): money = self.MoneyClass('2.22', 'US$') def test_invalid_amount(self): with self.assertRaises(ValueError): money = self.MoneyClass('twenty', 'XXX') def test_not_hashable(self): money = self.MoneyClass('2.22', 'XXX') self.assertFalse(isinstance(money, collections.Hashable)) class RepresentationsMixin(object): def test_repr(self): self.assertEqual(repr(self.MoneyClass('1234.567', 'XXX')), 'XXX 1234.567') def test_str(self): self.assertEqual(str(self.MoneyClass('1234.567', 'XXX')), 'XXX 1,234.57') class FormattingMixin(object): def setUp(self): self.money = self.MoneyClass('-1234.567', 'USD') def test_custom_format_padding(self): self.assertEqual(self.money.format('en_US', '¤000000.00'), '-$001234.57') def test_custom_format_custom_negative(self): self.assertEqual(self.money.format('en_US', '¤#,##0.00;<¤#,##0.00>'), '<$1,234.57>') def test_custom_format_grouping(self): self.assertEqual(self.money.format('en_US', '¤#,##0.00'), '-$1,234.57') self.assertEqual(self.money.format('de_DE', '#,##0.00 ¤'), '-1.234,57 $') self.assertEqual(self.money.format('en_US', '¤0.00'), '-$1234.57') self.assertEqual(self.money.format('de_DE', '0.00 ¤'), '-1234,57 $') def test_custom_format_decimals(self): self.assertEqual(self.money.format('en_US', '¤0.000'), '-$1234.567') self.assertEqual(self.money.format('en_US', '¤0'), '-$1235') def test_auto_format_locales(self): self.assertEqual(self.money.format('en_US'), '($1,234.57)') self.assertEqual(self.money.format('de_DE'), '-1.234,57\xa0$') self.assertEqual(self.money.format('es_CO'), '-1.234,57\xa0US$') def test_auto_format_locales_alias(self): self.assertEqual(self.money.format('en'), self.money.format('en_US')) self.assertEqual(self.money.format('de'), self.money.format('de_DE')) class ParserMixin(object): def test_loads_valid(self): self.assertEqual(self.MoneyClass.loads('XXX 2.22'), self.MoneyClass('2.22', 'XXX')) def test_loads_missing_currency(self): with self.assertRaises(ValueError): money = self.MoneyClass.loads('2.22') def test_loads_reversed_order(self): with self.assertRaises(ValueError): money = self.MoneyClass.loads('2.22 XXX') def test_loads_empty(self): with self.assertRaises(ValueError): money = self.MoneyClass.loads('') class NumericOperationsMixin(object): def test_lt_number(self): self.assertTrue(self.MoneyClass('2.22', 'XXX') < 3) self.assertTrue(self.MoneyClass('2.22', 'XXX') < 3.0) self.assertTrue(self.MoneyClass('2.22', 'XXX') < Decimal(3)) def test_lt_money(self): self.assertTrue(self.MoneyClass('2.219', 'XXX') < self.MoneyClass('2.22', 'XXX')) self.assertTrue(self.MoneyClass('-2.22', 'XXX') < self.MoneyClass('2.22', 'XXX')) def test_lt_none(self): with self.assertRaises(TypeError): self.MoneyClass(0, 'XXX') < None def test_le_number(self): self.assertTrue(self.MoneyClass('2.219', 'XXX') <= 3) self.assertTrue(self.MoneyClass('2.219', 'XXX') <= 3.0) self.assertTrue(self.MoneyClass('-2.22', 'XXX') <= Decimal('3')) def test_le_money(self): self.assertTrue(self.MoneyClass('2.219', 'XXX') <= self.MoneyClass('2.22', 'XXX')) self.assertTrue(self.MoneyClass('-2.22', 'XXX') <= self.MoneyClass('2.22', 'XXX')) self.assertTrue(self.MoneyClass('2.220', 'XXX') <= self.MoneyClass('2.22', 'XXX')) def test_le_none(self): with self.assertRaises(TypeError): self.MoneyClass(0, 'XXX') <= None def test_eq(self): self.assertEqual(self.MoneyClass('2', 'XXX'), self.MoneyClass('2', 'XXX')) self.assertEqual(self.MoneyClass('2.22000', 'XXX'), self.MoneyClass('2.22', 'XXX')) def test_ne(self): self.assertNotEqual(self.MoneyClass('0', 'XXX'), 0) self.assertNotEqual(self.MoneyClass('2', 'XXX'), 2) self.assertNotEqual(self.MoneyClass('2', 'XXX'), 'two') def test_ne_money(self): self.assertNotEqual(self.MoneyClass('2', 'XXX'), self.MoneyClass('3', 'XXX')) self.assertNotEqual(self.MoneyClass('2', 'XXX'), self.MoneyClass('2', 'YYY')) def test_ne_none(self): self.assertNotEqual(self.MoneyClass(0, 'XXX'), None) def test_gt_number(self): self.assertTrue(self.MoneyClass('2.22', 'XXX') > 2) self.assertTrue(self.MoneyClass('2.22', 'XXX') > Decimal('2')) def test_gt_money(self): self.assertTrue(self.MoneyClass('2.22', 'XXX') > self.MoneyClass('2.219', 'XXX')) self.assertTrue(self.MoneyClass('2.22', 'XXX') > self.MoneyClass('-2.22', 'XXX')) def test_gt_none(self): with self.assertRaises(TypeError): self.MoneyClass(0, 'XXX') > None def test_ge_number(self): self.assertTrue(self.MoneyClass('2', 'XXX') >= 1) self.assertTrue(self.MoneyClass('2', 'XXX') >= 2) self.assertTrue(self.MoneyClass('2', 'XXX') >= Decimal('1')) self.assertTrue(self.MoneyClass('2', 'XXX') >= Decimal('2')) def test_ge_money(self): self.assertTrue(self.MoneyClass('2.22', 'XXX') >= self.MoneyClass('2.219', 'XXX')) self.assertTrue(self.MoneyClass('2.22', 'XXX') >= self.MoneyClass('-2.22', 'XXX')) self.assertTrue(self.MoneyClass('2.22', 'XXX') >= self.MoneyClass('2.22', 'XXX')) def test_ge_none(self): with self.assertRaises(TypeError): self.MoneyClass(0, 'XXX') >= None def test_bool_true(self): self.assertTrue(self.MoneyClass('2.22', 'XXX')) self.assertTrue(self.MoneyClass('-1', 'XXX')) def test_bool_false(self): self.assertFalse(self.MoneyClass('0', 'XXX')) def test_add_int(self): result = self.MoneyClass('2', 'XXX') + 2 self.assertEqual(result, self.MoneyClass('4', 'XXX')) def test_add_decimal(self): result = self.MoneyClass('2', 'XXX') + Decimal('2') self.assertEqual(result, self.MoneyClass('4', 'XXX')) def test_add_money(self): result = self.MoneyClass('2', 'XXX') + self.MoneyClass('2', 'XXX') self.assertEqual(result, self.MoneyClass('4', 'XXX')) def test_add_none(self): with self.assertRaises(TypeError): self.MoneyClass(0, 'XXX') + None def test_radd_int(self): result = 2 + self.MoneyClass('2', 'XXX') self.assertEqual(result, self.MoneyClass('4', 'XXX')) def test_sub_int(self): result = self.MoneyClass('2', 'XXX') - 2 self.assertEqual(result, self.MoneyClass('0', 'XXX')) def test_sub_decimal(self): result = self.MoneyClass('2', 'XXX') - Decimal(2) self.assertEqual(result, self.MoneyClass('0', 'XXX')) def test_sub_money(self): result = self.MoneyClass('2', 'XXX') - self.MoneyClass('2', 'XXX') self.assertEqual(result, self.MoneyClass('0', 'XXX')) def test_sub_none(self): with self.assertRaises(TypeError): self.MoneyClass(0, 'XXX') - None def test_rsub_int(self): result = 2 - self.MoneyClass('2', 'XXX') self.assertEqual(result, self.MoneyClass('0', 'XXX')) def test_mul_int(self): result = self.MoneyClass('2', 'XXX') * 2 self.assertEqual(result, self.MoneyClass('4', 'XXX')) def test_mul_float(self): result = self.MoneyClass('2', 'XXX') * 2.0 self.assertEqual(result, self.MoneyClass('4', 'XXX')) def test_mul_decimal(self): result = self.MoneyClass('2', 'XXX') * Decimal(2) self.assertEqual(result, self.MoneyClass('4', 'XXX')) def test_mul_money(self): with self.assertRaises(TypeError): self.MoneyClass('2', 'XXX') * self.MoneyClass('2', 'XXX') def test_mul_none(self): with self.assertRaises(TypeError): self.MoneyClass(0, 'XXX') * None def test_rmul_int(self): result = 2 * self.MoneyClass('2', 'XXX') self.assertEqual(result, self.MoneyClass('4', 'XXX')) def test_truediv_int(self): result = self.MoneyClass('2.22', 'XXX') / 2 self.assertEqual(result, self.MoneyClass('1.11', 'XXX')) def test_truediv_decimal(self): result = self.MoneyClass('2.22', 'XXX') / Decimal(2) self.assertEqual(result, self.MoneyClass('1.11', 'XXX')) def test_truediv_money(self): result = self.MoneyClass('2', 'XXX') / self.MoneyClass('2', 'XXX') self.assertEqual(result, Decimal('1')) def test_truediv_none(self): with self.assertRaises(TypeError): self.MoneyClass(2, 'XXX') / None def test_truediv_zero(self): with self.assertRaises(ZeroDivisionError): self.MoneyClass(2, 'XXX') / 0 def test_floordiv_number(self): result = self.MoneyClass('2.22', 'XXX') // 2 self.assertEqual(result, self.MoneyClass('1', 'XXX')) def test_floordiv_money(self): result = self.MoneyClass('2.22', 'XXX') // self.MoneyClass('2', 'XXX') self.assertEqual(result, Decimal('1')) def test_floordiv_none(self): with self.assertRaises(TypeError): self.MoneyClass(2, 'XXX') // None def test_floordiv_zero(self): with self.assertRaises(ZeroDivisionError): self.MoneyClass(2, 'XXX') // 0 def test_mod_number(self): result = self.MoneyClass('2.22', 'XXX') % 2 self.assertEqual(result, self.MoneyClass('0.22', 'XXX')) def test_mod_money(self): with self.assertRaises(TypeError): self.MoneyClass('2.22', 'XXX') % self.MoneyClass('2', 'XXX') def test_mod_none(self): with self.assertRaises(TypeError): self.MoneyClass(2, 'XXX') % None def test_mod_zero(self): with self.assertRaises(ZeroDivisionError): self.MoneyClass(2, 'XXX') % 0 def test_divmod_number(self): whole, remainder = divmod(self.MoneyClass('2.22', 'XXX'), 2) self.assertEqual(whole, self.MoneyClass('1', 'XXX')) self.assertEqual(remainder, self.MoneyClass('0.22', 'XXX')) def test_divmod_money(self): whole, remainder = divmod(self.MoneyClass('2.22', 'XXX'), self.MoneyClass('2', 'XXX')) self.assertEqual(whole, Decimal('1')) self.assertEqual(remainder, Decimal('0.22')) def test_divmod_none(self): with self.assertRaises(TypeError): divmod(self.MoneyClass(2, 'XXX'), None) def test_divmod_zero(self): with self.assertRaises(ZeroDivisionError): divmod(self.MoneyClass(2, 'XXX'), 0) def test_pow_number(self): result = self.MoneyClass('3', 'XXX') ** 2 self.assertEqual(result, self.MoneyClass('9', 'XXX')) def test_pow_money(self): with self.assertRaises(TypeError): self.MoneyClass('3', 'XXX') ** self.MoneyClass('2', 'XXX') def test_pow_none(self): with self.assertRaises(TypeError): self.MoneyClass(0, 'XXX') ** None def test_neg(self): result = -self.MoneyClass('2.22', 'XXX') self.assertEqual(result, self.MoneyClass('-2.22', 'XXX')) def test_pos(self): result = +self.MoneyClass('2.22', 'XXX') self.assertEqual(result, self.MoneyClass('2.22', 'XXX')) def test_abs(self): result = abs(self.MoneyClass('-2.22', 'XXX')) self.assertEqual(result, self.MoneyClass('2.22', 'XXX')) def test_int(self): self.assertEqual(int(self.MoneyClass('-2.22', 'XXX')), -2) self.assertEqual(int(self.MoneyClass('2.22', 'XXX')), 2) def test_float(self): self.assertEqual(float(self.MoneyClass('-2.22', 'XXX')), -2.22) self.assertEqual(float(self.MoneyClass('2.22', 'XXX')), 2.22) def test_round(self): self.assertEqual(round(self.MoneyClass('-1.49', 'XXX')), self.MoneyClass('-1', 'XXX')) self.assertEqual(round(self.MoneyClass('1.50', 'XXX')), self.MoneyClass('2', 'XXX')) self.assertEqual(round(self.MoneyClass('1.234', 'XXX'), 2), self.MoneyClass('1.23', 'XXX')) class UnaryOperationsReturnNewMixin(object): def setUp(self): self.money = self.MoneyClass(2, 'XXX') def test_pos(self): self.assertIsNot(+self.money, self.money) def test_abs(self): self.assertIsNot(abs(self.money), self.money) def test_round(self): self.assertIsNot(round(self.money), self.money) class LeftmostTypePrevailsMixin(object): def setUp(self): if self.MoneyClass.__name__ == 'Money': self.OtherClass = XMoney if self.MoneyClass.__name__ == 'XMoney': self.OtherClass = Money self.home = self.MoneyClass(2, 'XXX') self.visitor = self.OtherClass(2, 'XXX') def test_add(self): result = self.home + self.visitor self.assertEqual(result.__class__, self.MoneyClass) def test_add_other(self): result = self.visitor + self.home self.assertEqual(result.__class__, self.OtherClass) def test_sub(self): result = self.home - self.visitor self.assertEqual(result.__class__, self.MoneyClass) def test_sub_other(self): result = self.visitor - self.home self.assertEqual(result.__class__, self.OtherClass)
py
b417b0986749dae483fac720f5a10e990839ab6a
#!/usr/bin/env python3 import os import json import logging import hashlib import requests import subprocess import multiprocessing import logging.handlers import inotify.adapters import inotify.constants def setup_dirs(basedir): if not(os.path.exists(basedir) and os.path.isdir(basedir)): os.mkdir(basedir) def setup_logging(basedir): log_path = os.path.expanduser(basedir + '/app.log') logger = logging.getLogger() handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=131072, backupCount=3) formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.DEBUG) def get_config(basedir): config = { 'paths': set(), 'extensions': set() } with open(os.path.expanduser(basedir + '/config.json')) as data: user_config = json.load(data) if 'vt_api_key' in user_config and type(user_config['vt_api_key']) is str: config['vt_api_key'] = user_config['vt_api_key'] else: raise Exception('Failed to find virustotal API key') if 'paths' in user_config and type(user_config['paths']) is list: for path in user_config['paths']: path = os.path.expanduser(path.encode()) if not os.path.isabs(path): logging.warn('"%s" is not an absolute path and was ignored' % path) elif not os.path.isdir(path): logging.warn('"%s" is not a directory and was ignored' % path) else: config['paths'].add(path) if not config['paths']: logging.info('Using default paths') config['paths'] = {os.path.expanduser('~/Downloads').encode()} if 'extensions' in user_config and type(user_config['extensions']) is list: for ext in user_config['extensions']: if ext[0] == '.': ext = ext[1:] if ext == '': continue config['extensions'].add(ext.lower()) if not config['extensions']: logging.info('Using default extensions') config['extensions'] = { 'exe', 'msi', 'dll', 'scr', 'cpl', 'apk', 'jar', 'swf', 'vbs', 'wsf', 'zip', 'rar', 'iso', 'pdf', 'doc', 'xls', 'ppt', 'docm', 'dotm', 'xlsm', 'xltm', 'xlam', 'pptm', 'potm', 'ppam', 'ppsm' } return config def notify(title, message, icon='', expires=15): subprocess.call(['notify-send', title, message, '-t', str(expires * 1000), '-i', icon]) def check_file(path, vt_api_key): hash = hashlib.sha256() try: with open(path, 'rb') as f: while True: data = f.read(4096) if not data: break hash.update(data) response = requests.get('https://www.virustotal.com/vtapi/v2/file/report', params={ 'apikey': vt_api_key, 'resource': hash.hexdigest() }) if response.status_code != 200: notify('Malware check failed', 'Failed to check %s, probably because the rate limits were exceeded' % path, 'dialog-information') return jsonresponse = response.json() if 'positives' in jsonresponse and 'total' in jsonresponse: positives = jsonresponse['positives'] total = jsonresponse['total'] for av, res in jsonresponse['scans'].items(): if res['detected']: virusname = res['result'] break if positives > 0: notify('Potential malware detected', 'File: %s\n' 'Malware family: %s (%s)\n' 'Detection Ratio: %i/%i (%.2f%%)' % (path, virusname, av, positives, total, 100 * positives/total), 'dialog-warning', 1200) except requests.exceptions.RequestException as e: logging.warn('Unable to check file "%s": %s' % (path, str(e))) except IOError as e: logging.warn('Unable to read file "%s": %s' % (path, str(e))) def monitor_dirs(paths, scan_exts, vt_api_key): watcher = inotify.adapters.InotifyTrees(paths=paths, mask=inotify.constants.IN_CLOSE_WRITE | inotify.constants.IN_MOVED_TO) try: for event in watcher.event_gen(): if event is not None and event[0].mask in [ inotify.constants.IN_CLOSE_WRITE, inotify.constants.IN_MOVED_TO ]: filename = event[3].decode('utf-8') ext = os.path.splitext(filename)[1][1:].lower() if ext in scan_exts: path = os.path.join(event[2].decode('utf-8'), filename) checker = multiprocessing.Process(target=check_file, args=(path, vt_api_key)) checker.start() finally: for path in paths: watcher.remove_watch(path) def main(): basedir = os.path.expanduser('~/.vtlivescan') setup_dirs(basedir) setup_logging(basedir) config = get_config(basedir) monitor_dirs(list(config['paths']), config['extensions'], config['vt_api_key']) if __name__ == '__main__': main()
py
b417b1bf5a5452e2a8a10aaebeda68db9dfe1702
import argparse import time from reskin_sensor import ReSkinProcess, ReSkinSettings if __name__ == '__main__': parser = argparse.ArgumentParser(description='Test code to run a ReSkin streaming process in the background. Allows data to be collected without code blocking') parser.add_argument('-p','--port', type=str, help='port to which the microcontroller is connected', required=True) parser.add_argument('-b','--baudrate', type=str, help='baudrate at which the microcontroller is streaming data', default=115200) parser.add_argument('-n','--num_mags', type=int, help='number of magentometers on the sensor board', default=5) args = parser.parse_args() test_settings = ReSkinSettings( num_mags=args.num_mags, port=args.port, baudrate=args.baudrate, burst_mode=True, device_id=1 ) # Create sensor stream sensor_stream = ReSkinProcess(test_settings) # Start sensor stream sensor_stream.start() time.sleep(0.1) # Buffer data for two seconds and return buffer if sensor_stream.is_alive(): sensor_stream.start_buffering() buffer_start = time.time() time.sleep(2.0) sensor_stream.pause_buffering() buffer_stop = time.time() # Get buffered data buffered_data = sensor_stream.get_buffer() if buffered_data is not None: print('Time elapsed: {}, Number of datapoints: {}'.format( buffer_stop - buffer_start, len(buffered_data))) # Get a specified number of samples test_samples = sensor_stream.get_data(num_samples=5) print('Columns: ', ', \t'.join( ['T{0}, \tBx{0}, \tBy{0}, \tBz{0}'.format(ind) for ind in range(test_settings.num_mags)])) for sid, sample in enumerate(test_samples): print('Sample {}: '.format(sid+1) + str(['{:.2f}'.format(d) for d in sample.data])) # Pause sensor stream sensor_stream.pause_streaming() sensor_stream.join()
py
b417b2fe7e17dd87dbfcbafd32c0745b96e77477
# see https://pythonhosted.org/PyDrive/quickstart.html # see https://pythonhosted.org/PyDrive/oauth.html#sample-settings-yaml # see https://github.com/googledrive/PyDrive from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive import webbrowser import datetime # Create local webserver and auto handles authentication. # Standard webbrowser gave an error, JonB found the solution if not hasattr(webbrowser, '_open'): webbrowser._open = webbrowser.open def wbopen(url, *args, **kwargs): return webbrowser._open(url) webbrowser.open = wbopen gauth = GoogleAuth() gauth.LocalWebserverAuth() # upload drive = GoogleDrive(gauth) # Create GoogleDriveFile instance. file1 = drive.CreateFile( {'title': 'shopyo__db_{}.db'.format( datetime.datetime.now().strftime("%d_%m_%Y__%H_%M_%S"))}) file1.SetContentFile('test.db') # Set content of the file from given string. file1.Upload()
py
b417b3292e75b31ba55207ab5d918804101bf1b7
from __future__ import print_function import gdbremote_testcase import signal from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil class TestGdbRemoteAbort(gdbremote_testcase.GdbRemoteTestCaseBase): mydir = TestBase.compute_mydir(__file__) def inferior_abort_received(self): procs = self.prep_debug_monitor_and_inferior(inferior_args=["abort"]) self.assertIsNotNone(procs) self.test_sequence.add_log_lines(["read packet: $vCont;c#a8", {"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2}).*#[0-9a-fA-F]{2}$", "capture": {1: "hex_exit_code"}}, ], True) context = self.expect_gdbremote_sequence() self.assertIsNotNone(context) hex_exit_code = context.get("hex_exit_code") self.assertIsNotNone(hex_exit_code) self.assertEqual(int(hex_exit_code, 16), lldbutil.get_signal_number('SIGABRT')) @debugserver_test def test_inferior_abort_received_debugserver(self): self.init_debugserver_test() self.build() self.inferior_abort_received() @llgs_test # std::abort() on <= API 16 raises SIGSEGV - b.android.com/179836 @expectedFailureAndroid(api_levels=list(range(16 + 1))) def test_inferior_abort_received_llgs(self): self.init_llgs_test() self.build() self.inferior_abort_received()
py
b417b408adb3d7cfcd275b99e5fa9a61495a5a57
# -*- coding: utf-8 -*- """ Test module for actions @author: Charlie Lewis """ import logging from faucetconfgetsetter import get_sdn_connect from poseidon_core.helpers.actions import Actions from poseidon_core.helpers.config import Config from poseidon_core.helpers.endpoint import endpoint_factory logger = logging.getLogger('test') def test_actions(): """ Tests Actions """ endpoint = endpoint_factory('foo') endpoint.endpoint_data = { 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1'} s = get_sdn_connect(logger) a = Actions(endpoint, s.sdnc) a.mirror_endpoint() a.unmirror_endpoint() a.coprocess_endpoint() a.uncoprocess_endpoint() def test_actions_nosdn(): """ Tests Actions with no SDN controller """ endpoint = endpoint_factory('foo') endpoint.endpoint_data = { 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1'} s = get_sdn_connect(logger) s.sdnc = None a = Actions(endpoint, s.sdnc) a.mirror_endpoint() a.unmirror_endpoint() a.coprocess_endpoint() a.uncoprocess_endpoint()
py
b417b4a94d20549894006058b465e405ec184e5e
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 5/15/20 4:49 PM # @File : grover.py # qubit number=4 # total number=16 import cirq import cirq.google as cg from typing import Optional import sys from math import log2 import numpy as np #thatsNoCode from cirq.contrib.svg import SVGCircuit # Symbols for the rotation angles in the QAOA circuit. def make_circuit(n: int, input_qubit): c = cirq.Circuit() # circuit begin c.append(cirq.H.on(input_qubit[0])) # number=1 c.append(cirq.H.on(input_qubit[1])) # number=2 c.append(cirq.H.on(input_qubit[2])) # number=3 c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=9 c.append(cirq.X.on(input_qubit[2])) # number=10 c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=11 c.append(cirq.H.on(input_qubit[3])) # number=4 c.append(cirq.Y.on(input_qubit[3])) # number=5 c.append(cirq.CNOT.on(input_qubit[1],input_qubit[3])) # number=12 c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=7 c.append(cirq.H.on(input_qubit[0])) # number=13 c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=14 c.append(cirq.H.on(input_qubit[0])) # number=15 # circuit end c.append(cirq.measure(*input_qubit, key='result')) return c def bitstring(bits): return ''.join(str(int(b)) for b in bits) if __name__ == '__main__': qubit_count = 4 input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)] circuit = make_circuit(qubit_count,input_qubits) circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap') circuit_sample_count =2000 simulator = cirq.Simulator() result = simulator.run(circuit, repetitions=circuit_sample_count) frequencies = result.histogram(key='result', fold_func=bitstring) writefile = open("../data/startCirq492.csv","w+") print(format(frequencies),file=writefile) print("results end", file=writefile) print(circuit.__len__(), file=writefile) print(circuit,file=writefile) writefile.close()
py
b417b6acadd66198a48ea4e48640c78d6889ba3b
# ============================================================================= # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Tests for decode_proto op.""" # Python3 preparedness imports. from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from google.protobuf import text_format from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.kernel_tests.proto import proto_op_test_base as test_base from tensorflow.python.kernel_tests.proto import test_example_pb2 class DecodeProtoOpTestBase(test_base.ProtoOpTestBase, parameterized.TestCase): """Base class for testing proto decoding ops.""" def __init__(self, decode_module, methodName='runTest'): # pylint: disable=invalid-name """DecodeProtoOpTestBase initializer. Args: decode_module: a module containing the `decode_proto_op` method methodName: the name of the test method (same as for test.TestCase) """ super(DecodeProtoOpTestBase, self).__init__(methodName) self._decode_module = decode_module def _compareValues(self, fd, vs, evs): """Compare lists/arrays of field values.""" if len(vs) != len(evs): self.fail('Field %s decoded %d outputs, expected %d' % (fd.name, len(vs), len(evs))) for i, ev in enumerate(evs): # Special case fuzzy match for float32. TensorFlow seems to mess with # MAX_FLT slightly and the test doesn't work otherwise. # TODO(nix): ask on TF list about why MAX_FLT doesn't pass through. if fd.cpp_type == fd.CPPTYPE_FLOAT: # Numpy isclose() is better than assertIsClose() which uses an absolute # value comparison. self.assertTrue( np.isclose(vs[i], ev), 'expected %r, actual %r' % (ev, vs[i])) elif fd.cpp_type == fd.CPPTYPE_STRING: # In Python3 string tensor values will be represented as bytes, so we # reencode the proto values to match that. self.assertEqual(vs[i], ev.encode('ascii')) else: # Doubles and other types pass through unscathed. self.assertEqual(vs[i], ev) def _compareProtos(self, batch_shape, sizes, fields, field_dict): """Compare protos of type TestValue. Args: batch_shape: the shape of the input tensor of serialized messages. sizes: int matrix of repeat counts returned by decode_proto fields: list of test_example_pb2.FieldSpec (types and expected values) field_dict: map from field names to decoded numpy tensors of values """ # Check that expected values match. for field in fields: values = field_dict[field.name] self.assertEqual(dtypes.as_dtype(values.dtype), field.dtype) if 'ext_value' in field.name: fd = test_example_pb2.PrimitiveValue() else: fd = field.value.DESCRIPTOR.fields_by_name[field.name] # Values has the same shape as the input plus an extra # dimension for repeats. self.assertEqual(list(values.shape)[:-1], batch_shape) # Nested messages are represented as TF strings, requiring # some special handling. if field.name == 'message_value' or 'ext_value' in field.name: vs = [] for buf in values.flat: msg = test_example_pb2.PrimitiveValue() msg.ParseFromString(buf) vs.append(msg) if 'ext_value' in field.name: evs = field.value.Extensions[test_example_pb2.ext_value] else: evs = getattr(field.value, field.name) if len(vs) != len(evs): self.fail('Field %s decoded %d outputs, expected %d' % (fd.name, len(vs), len(evs))) for v, ev in zip(vs, evs): self.assertEqual(v, ev) continue tf_type_to_primitive_value_field = { dtypes.bool: 'bool_value', dtypes.float32: 'float_value', dtypes.float64: 'double_value', dtypes.int8: 'int8_value', dtypes.int32: 'int32_value', dtypes.int64: 'int64_value', dtypes.string: 'string_value', dtypes.uint8: 'uint8_value', dtypes.uint32: 'uint32_value', dtypes.uint64: 'uint64_value', } tf_field_name = tf_type_to_primitive_value_field.get(field.dtype) if tf_field_name is None: self.fail('Unhandled tensorflow type %d' % field.dtype) self._compareValues(fd, values.flat, getattr(field.value, tf_field_name)) def _runDecodeProtoTests(self, fields, case_sizes, batch_shape, batch, message_type, message_format, sanitize, force_disordered=False): """Run decode tests on a batch of messages. Args: fields: list of test_example_pb2.FieldSpec (types and expected values) case_sizes: expected sizes array batch_shape: the shape of the input tensor of serialized messages batch: list of serialized messages message_type: descriptor name for messages message_format: format of messages, 'text' or 'binary' sanitize: whether to sanitize binary protobuf inputs force_disordered: whether to force fields encoded out of order. """ if force_disordered: # Exercise code path that handles out-of-order fields by prepending extra # fields with tag numbers higher than any real field. Note that this won't # work with sanitization because that forces reserialization using a # trusted decoder and encoder. assert not sanitize extra_fields = test_example_pb2.ExtraFields() extra_fields.string_value = 'IGNORE ME' extra_fields.bool_value = False extra_msg = extra_fields.SerializeToString() batch = [extra_msg + msg for msg in batch] # Numpy silently truncates the strings if you don't specify dtype=object. batch = np.array(batch, dtype=object) batch = np.reshape(batch, batch_shape) field_names = [f.name for f in fields] output_types = [f.dtype for f in fields] with self.cached_session() as sess: sizes, vtensor = self._decode_module.decode_proto( batch, message_type=message_type, field_names=field_names, output_types=output_types, message_format=message_format, sanitize=sanitize) vlist = sess.run([sizes] + vtensor) sizes = vlist[0] # Values is a list of tensors, one for each field. value_tensors = vlist[1:] # Check that the repeat sizes are correct. self.assertTrue( np.all(np.array(sizes.shape) == batch_shape + [len(field_names)])) # Check that the decoded sizes match the expected sizes. self.assertEqual(len(sizes.flat), len(case_sizes)) self.assertTrue( np.all(sizes.flat == np.array( case_sizes, dtype=np.int32))) field_dict = dict(zip(field_names, value_tensors)) self._compareProtos(batch_shape, sizes, fields, field_dict) @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters()) def testBinary(self, case): batch = [value.SerializeToString() for value in case.values] self._runDecodeProtoTests( case.fields, case.sizes, list(case.shapes), batch, 'tensorflow.contrib.proto.TestValue', 'binary', sanitize=False) @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters()) def testBinaryDisordered(self, case): batch = [value.SerializeToString() for value in case.values] self._runDecodeProtoTests( case.fields, case.sizes, list(case.shapes), batch, 'tensorflow.contrib.proto.TestValue', 'binary', sanitize=False, force_disordered=True) @parameterized.named_parameters( *test_base.ProtoOpTestBase.named_parameters(extension=False)) def testPacked(self, case): # Now try with the packed serialization. # # We test the packed representations by loading the same test case using # PackedTestValue instead of TestValue. To do this we rely on the text # format being the same for packed and unpacked fields, and reparse the # test message using the packed version of the proto. packed_batch = [ # Note: float_format='.17g' is necessary to ensure preservation of # doubles and floats in text format. text_format.Parse( text_format.MessageToString(value, float_format='.17g'), test_example_pb2.PackedTestValue()).SerializeToString() for value in case.values ] self._runDecodeProtoTests( case.fields, case.sizes, list(case.shapes), packed_batch, 'tensorflow.contrib.proto.PackedTestValue', 'binary', sanitize=False) @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters()) def testText(self, case): # Note: float_format='.17g' is necessary to ensure preservation of # doubles and floats in text format. text_batch = [ text_format.MessageToString( value, float_format='.17g') for value in case.values ] self._runDecodeProtoTests( case.fields, case.sizes, list(case.shapes), text_batch, 'tensorflow.contrib.proto.TestValue', 'text', sanitize=False) @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters()) def testSanitizerGood(self, case): batch = [value.SerializeToString() for value in case.values] self._runDecodeProtoTests( case.fields, case.sizes, list(case.shapes), batch, 'tensorflow.contrib.proto.TestValue', 'binary', sanitize=True) @parameterized.parameters((False), (True)) def testCorruptProtobuf(self, sanitize): corrupt_proto = 'This is not a binary protobuf' # Numpy silently truncates the strings if you don't specify dtype=object. batch = np.array(corrupt_proto, dtype=object) msg_type = 'tensorflow.contrib.proto.TestCase' field_names = ['sizes'] field_types = [dtypes.int32] with self.assertRaisesRegexp( errors.DataLossError, 'Unable to parse binary protobuf' '|Failed to consume entire buffer'): self.evaluate( self._decode_module.decode_proto( batch, message_type=msg_type, field_names=field_names, output_types=field_types, sanitize=sanitize))
py
b417b6ad2109145c7231140f4881dce16a1d80a3
import numpy as np def compute_errors(gt, pred): """Compute error metrics using predicted and ground truth depths. From https://github.com/mrharicot/monodepth/blob/master/utils/evaluation_utils.py """ thresh = np.maximum((gt / pred), (pred / gt)) a1 = (thresh < 1.25).mean() a2 = (thresh < 1.25 ** 2).mean() a3 = (thresh < 1.25 ** 3).mean() rmse = (gt - pred) ** 2 rmse = np.sqrt(rmse.mean()) rmse_log = (np.log(gt) - np.log(pred)) ** 2 rmse_log = np.sqrt(rmse_log.mean()) abs_rel = np.mean(np.abs(gt - pred) / gt) sq_rel = np.mean(((gt - pred) ** 2) / gt) return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3 def compute_scale_and_shift(prediction, target, mask): """From https://gist.github.com/ranftlr/a1c7a24ebb24ce0e2f2ace5bce917022""" # system matrix: A = [[a_00, a_01], [a_10, a_11]] a_00 = np.sum(mask * prediction * prediction) a_01 = np.sum(mask * prediction) a_11 = np.sum(mask) # right hand side: b = [b_0, b_1] b_0 = np.sum(mask * prediction * target) b_1 = np.sum(mask * target) x_0 = np.zeros_like(b_0) x_1 = np.zeros_like(b_1) det = a_00 * a_11 - a_01 * a_01 # A needs to be a positive definite matrix. valid = det > 0 x_0[valid] = (a_11[valid] * b_0[valid] - a_01[valid] * b_1[valid]) / det[valid] x_1[valid] = (-a_01[valid] * b_0[valid] + a_00[valid] * b_1[valid]) / det[valid] return x_0, x_1
py
b417b6d7fb8dc3cbc2416888a58502771413d95b
# Copyright 2013 eBay Inc. # Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock import webob from cinder.api.contrib import qos_specs_manage from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_notifier def stub_qos_specs(id): res = dict(name='qos_specs_' + str(id)) res.update(dict(consumer='back-end')) res.update(dict(id=str(id))) specs = {"key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5"} res.update(dict(specs=specs)) return objects.QualityOfServiceSpecs(**res) def stub_qos_associates(id): return [{ 'association_type': 'volume_type', 'name': 'FakeVolTypeName', 'id': fake.VOLUME_TYPE_ID}] def return_qos_specs_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): return [ stub_qos_specs(fake.QOS_SPEC_ID), stub_qos_specs(fake.QOS_SPEC2_ID), stub_qos_specs(fake.QOS_SPEC3_ID), ] def return_qos_specs_get_qos_specs(context, id): if id == fake.WILL_NOT_BE_FOUND_ID: raise exception.QoSSpecsNotFound(specs_id=id) return stub_qos_specs(id) def return_qos_specs_delete(context, id, force): if id == fake.WILL_NOT_BE_FOUND_ID: raise exception.QoSSpecsNotFound(specs_id=id) elif id == fake.IN_USE_ID: raise exception.QoSSpecsInUse(specs_id=id) pass def return_qos_specs_delete_keys(context, id, keys): if id == fake.WILL_NOT_BE_FOUND_ID: raise exception.QoSSpecsNotFound(specs_id=id) if 'foo' in keys: raise exception.QoSSpecsKeyNotFound(specs_id=id, specs_key='foo') def return_qos_specs_update(context, id, specs): if id == fake.WILL_NOT_BE_FOUND_ID: raise exception.QoSSpecsNotFound(specs_id=id) elif id == fake.INVALID_ID: raise exception.InvalidQoSSpecs(reason=id) elif id == fake.UPDATE_FAILED_ID: raise exception.QoSSpecsUpdateFailed(specs_id=id, qos_specs=specs) pass def return_qos_specs_create(context, name, specs): if name == 'qos_spec_%s' % fake.ALREADY_EXISTS_ID: raise exception.QoSSpecsExists(specs_id=name) elif name == 'qos_spec_%s' % fake.ACTION_FAILED_ID: raise exception.QoSSpecsCreateFailed(name=id, qos_specs=specs) elif name == 'qos_spec_%s' % fake.INVALID_ID: raise exception.InvalidQoSSpecs(reason=name) return objects.QualityOfServiceSpecs(name=name, specs=specs, consumer='back-end', id=fake.QOS_SPEC_ID) def return_get_qos_associations(context, id): if id == fake.WILL_NOT_BE_FOUND_ID: raise exception.QoSSpecsNotFound(specs_id=id) elif id == fake.RAISE_ID: raise exception.CinderException() return stub_qos_associates(id) def return_associate_qos_specs(context, id, type_id): if id == fake.WILL_NOT_BE_FOUND_ID: raise exception.QoSSpecsNotFound(specs_id=id) elif id == fake.ACTION_FAILED_ID: raise exception.QoSSpecsAssociateFailed(specs_id=id, type_id=type_id) elif id == fake.ACTION2_FAILED_ID: raise exception.QoSSpecsDisassociateFailed(specs_id=id, type_id=type_id) if type_id == fake.WILL_NOT_BE_FOUND_ID: raise exception.VolumeTypeNotFound( volume_type_id=type_id) pass def return_disassociate_all(context, id): if id == fake.WILL_NOT_BE_FOUND_ID: raise exception.QoSSpecsNotFound(specs_id=id) elif id == fake.ACTION2_FAILED_ID: raise exception.QoSSpecsDisassociateFailed(specs_id=id, type_id=None) @ddt.ddt class QoSSpecManageApiTest(test.TestCase): def _create_qos_specs(self, name, values=None): """Create a transfer object.""" if values: specs = dict(name=name, qos_specs=values) else: specs = {'name': name, 'consumer': 'back-end', 'specs': { 'key1': 'value1', 'key2': 'value2'}} return db.qos_specs_create(self.ctxt, specs)['id'] def setUp(self): super(QoSSpecManageApiTest, self).setUp() self.flags(host='fake') self.controller = qos_specs_manage.QoSSpecsController() self.ctxt = context.RequestContext(user_id=fake.USER_ID, project_id=fake.PROJECT_ID, is_admin=True) self.user_ctxt = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) self.qos_id1 = self._create_qos_specs("Qos_test_1") self.qos_id2 = self._create_qos_specs("Qos_test_2") self.qos_id3 = self._create_qos_specs("Qos_test_3") self.qos_id4 = self._create_qos_specs("Qos_test_4") @mock.patch('cinder.volume.qos_specs.get_all_specs', side_effect=return_qos_specs_get_all) def test_index(self, mock_get_all_specs): req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID) res = self.controller.index(req) self.assertEqual(3, len(res['qos_specs'])) names = set() for item in res['qos_specs']: self.assertEqual('value1', item['specs']['key1']) names.add(item['name']) expected_names = ['qos_specs_%s' % fake.QOS_SPEC_ID, 'qos_specs_%s' % fake.QOS_SPEC2_ID, 'qos_specs_%s' % fake.QOS_SPEC3_ID] self.assertEqual(set(expected_names), names) def test_index_with_limit(self): url = '/v2/%s/qos-specs?limit=2' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, use_admin_context=True) res = self.controller.index(req) self.assertEqual(2, len(res['qos_specs'])) self.assertEqual(self.qos_id4, res['qos_specs'][0]['id']) self.assertEqual(self.qos_id3, res['qos_specs'][1]['id']) expect_next_link = ('http://localhost/v2/%s/qos-specs?limit' '=2&marker=%s') % ( fake.PROJECT_ID, res['qos_specs'][1]['id']) self.assertEqual(expect_next_link, res['qos_specs_links'][0]['href']) def test_index_with_offset(self): url = '/v2/%s/qos-specs?offset=1' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, use_admin_context=True) res = self.controller.index(req) self.assertEqual(3, len(res['qos_specs'])) def test_index_with_offset_out_of_range(self): url = '/v2/%s/qos-specs?offset=356576877698707' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) def test_index_with_limit_and_offset(self): url = '/v2/%s/qos-specs?limit=2&offset=1' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, use_admin_context=True) res = self.controller.index(req) self.assertEqual(2, len(res['qos_specs'])) self.assertEqual(self.qos_id3, res['qos_specs'][0]['id']) self.assertEqual(self.qos_id2, res['qos_specs'][1]['id']) def test_index_with_marker(self): url = '/v2/%s/qos-specs?marker=%s' % (fake.PROJECT_ID, self.qos_id4) req = fakes.HTTPRequest.blank(url, use_admin_context=True) res = self.controller.index(req) self.assertEqual(3, len(res['qos_specs'])) def test_index_with_filter(self): url = '/v2/%s/qos-specs?id=%s' % (fake.PROJECT_ID, self.qos_id4) req = fakes.HTTPRequest.blank(url, use_admin_context=True) res = self.controller.index(req) self.assertEqual(1, len(res['qos_specs'])) self.assertEqual(self.qos_id4, res['qos_specs'][0]['id']) def test_index_with_sort_keys(self): url = '/v2/%s/qos-specs?sort=id' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, use_admin_context=True) res = self.controller.index(req) self.assertEqual(4, len(res['qos_specs'])) expect_result = [self.qos_id1, self.qos_id2, self.qos_id3, self.qos_id4] expect_result.sort(reverse=True) self.assertEqual(expect_result[0], res['qos_specs'][0]['id']) self.assertEqual(expect_result[1], res['qos_specs'][1]['id']) self.assertEqual(expect_result[2], res['qos_specs'][2]['id']) self.assertEqual(expect_result[3], res['qos_specs'][3]['id']) def test_index_with_sort_keys_and_sort_dirs(self): url = '/v2/%s/qos-specs?sort=id:asc' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, use_admin_context=True) res = self.controller.index(req) self.assertEqual(4, len(res['qos_specs'])) expect_result = [self.qos_id1, self.qos_id2, self.qos_id3, self.qos_id4] expect_result.sort() self.assertEqual(expect_result[0], res['qos_specs'][0]['id']) self.assertEqual(expect_result[1], res['qos_specs'][1]['id']) self.assertEqual(expect_result[2], res['qos_specs'][2]['id']) self.assertEqual(expect_result[3], res['qos_specs'][3]['id']) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.delete', side_effect=return_qos_specs_delete) def test_qos_specs_delete(self, mock_qos_delete, mock_qos_get_specs): req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % ( fake.PROJECT_ID, fake.QOS_SPEC_ID)) notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): self.controller.delete(req, fake.QOS_SPEC_ID) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.delete', side_effect=return_qos_specs_delete) def test_qos_specs_delete_not_found(self, mock_qos_delete, mock_qos_get_specs): notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) self.assertRaises(exception.QoSSpecsNotFound, self.controller.delete, req, fake.WILL_NOT_BE_FOUND_ID) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.delete', side_effect=return_qos_specs_delete) def test_qos_specs_delete_inuse(self, mock_qos_delete, mock_qos_get_specs): req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % ( fake.PROJECT_ID, fake.IN_USE_ID)) notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, req, fake.IN_USE_ID) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.delete', side_effect=return_qos_specs_delete) def test_qos_specs_delete_inuse_force(self, mock_qos_delete, mock_qos_get_specs): req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s?force=True' % (fake.PROJECT_ID, fake.IN_USE_ID)) notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.delete, req, fake.IN_USE_ID) self.assertEqual(1, notifier.get_notification_count()) def test_qos_specs_delete_with_invalid_force(self): invalid_force = "invalid_bool" req = fakes.HTTPRequest.blank( '/v2/%s/qos-specs/%s/delete_keys?force=%s' % (fake.PROJECT_ID, fake.QOS_SPEC_ID, invalid_force)) self.assertRaises(exception.InvalidParameterValue, self.controller.delete, req, fake.QOS_SPEC_ID) @mock.patch('cinder.volume.qos_specs.delete_keys', side_effect=return_qos_specs_delete_keys) def test_qos_specs_delete_keys(self, mock_qos_delete_keys): body = {"keys": ['bar', 'zoo']} req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s/delete_keys' % (fake.PROJECT_ID, fake.IN_USE_ID)) notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): self.controller.delete_keys(req, fake.IN_USE_ID, body) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.delete_keys', side_effect=return_qos_specs_delete_keys) def test_qos_specs_delete_keys_qos_notfound(self, mock_qos_specs_delete): body = {"keys": ['bar', 'zoo']} req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s/delete_keys' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): self.assertRaises(exception.QoSSpecsNotFound, self.controller.delete_keys, req, fake.WILL_NOT_BE_FOUND_ID, body) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.delete_keys', side_effect=return_qos_specs_delete_keys) def test_qos_specs_delete_keys_badkey(self, mock_qos_specs_delete): req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s/delete_keys' % (fake.PROJECT_ID, fake.IN_USE_ID)) body = {"keys": ['foo', 'zoo']} notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): self.assertRaises(exception.QoSSpecsKeyNotFound, self.controller.delete_keys, req, fake.IN_USE_ID, body) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.delete_keys', side_effect=return_qos_specs_delete_keys) def test_qos_specs_delete_keys_get_notifier(self, mock_qos_delete_keys): body = {"keys": ['bar', 'zoo']} req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s/delete_keys' % (fake.PROJECT_ID, fake.IN_USE_ID)) notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier, autospec=True) as mock_get_notifier: self.controller.delete_keys(req, fake.IN_USE_ID, body) mock_get_notifier.assert_called_once_with('QoSSpecs') @mock.patch('cinder.volume.qos_specs.create', side_effect=return_qos_specs_create) @mock.patch('cinder.utils.validate_dictionary_string_length') def test_create(self, mock_validate, mock_qos_spec_create): body = {"qos_specs": {"name": "qos_specs_%s" % fake.QOS_SPEC_ID, "key1": "value1"}} req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID) notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): res_dict = self.controller.create(req, body) self.assertEqual(1, notifier.get_notification_count()) self.assertEqual('qos_specs_%s' % fake.QOS_SPEC_ID, res_dict['qos_specs']['name']) self.assertTrue(mock_validate.called) @mock.patch('cinder.volume.qos_specs.create', side_effect=return_qos_specs_create) def test_create_invalid_input(self, mock_qos_get_specs): body = {"qos_specs": {"name": 'qos_spec_%s' % fake.INVALID_ID, "consumer": "invalid_consumer"}} req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID) notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.create', side_effect=return_qos_specs_create) def test_create_conflict(self, mock_qos_spec_create): body = {"qos_specs": {"name": 'qos_spec_%s' % fake.ALREADY_EXISTS_ID, "key1": "value1"}} req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID) notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): self.assertRaises(webob.exc.HTTPConflict, self.controller.create, req, body) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.create', side_effect=return_qos_specs_create) def test_create_failed(self, mock_qos_spec_create): body = {"qos_specs": {"name": 'qos_spec_%s' % fake.ACTION_FAILED_ID, "key1": "value1"}} req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID) notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.create, req, body) self.assertEqual(1, notifier.get_notification_count()) @ddt.data({'foo': {'a': 'b'}}, {'qos_specs': {'a': 'b'}}, {'qos_specs': 'string'}, None) def test_create_invalid_body_bad_request(self, body): req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID, use_admin_context=True) req.method = 'POST' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) @ddt.data({'name': 'fake_name', 'a' * 256: 'a'}, {'name': 'fake_name', 'a': 'a' * 256}, {'name': 'fake_name', '': 'a'}) def test_create_qos_with_invalid_specs(self, value): body = {'qos_specs': value} req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID, use_admin_context=True) req.method = 'POST' self.assertRaises(exception.InvalidInput, self.controller.create, req, body) @ddt.data({'name': None}, {'name': 'n' * 256}, {'name': ''}, {'name': ' '}) def test_create_qos_with_invalid_spec_name(self, value): body = {'qos_specs': value} req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID, use_admin_context=True) req.method = 'POST' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) @mock.patch('cinder.volume.qos_specs.update', side_effect=return_qos_specs_update) def test_update(self, mock_qos_update): notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % (fake.PROJECT_ID, fake.QOS_SPEC_ID)) body = {'qos_specs': {'key1': 'value1', 'key2': 'value2'}} res = self.controller.update(req, fake.QOS_SPEC_ID, body) self.assertDictEqual(body, res) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.update', side_effect=return_qos_specs_update) def test_update_not_found(self, mock_qos_update): notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) body = {'qos_specs': {'key1': 'value1', 'key2': 'value2'}} self.assertRaises(exception.QoSSpecsNotFound, self.controller.update, req, fake.WILL_NOT_BE_FOUND_ID, body) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.update', side_effect=return_qos_specs_update) def test_update_invalid_input(self, mock_qos_update): notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % (fake.PROJECT_ID, fake.INVALID_ID)) body = {'qos_specs': {'key1': 'value1', 'key2': 'value2'}} self.assertRaises(exception.InvalidQoSSpecs, self.controller.update, req, fake.INVALID_ID, body) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.update', side_effect=return_qos_specs_update) def test_update_failed(self, mock_qos_update): notifier = fake_notifier.get_fake_notifier() with mock.patch('cinder.rpc.get_notifier', return_value=notifier): req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % (fake.PROJECT_ID, fake.UPDATE_FAILED_ID)) body = {'qos_specs': {'key1': 'value1', 'key2': 'value2'}} self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.update, req, fake.UPDATE_FAILED_ID, body) self.assertEqual(1, notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) def test_show(self, mock_get_qos_specs): req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % ( fake.PROJECT_ID, fake.QOS_SPEC_ID)) res_dict = self.controller.show(req, fake.QOS_SPEC_ID) self.assertEqual(fake.QOS_SPEC_ID, res_dict['qos_specs']['id']) self.assertEqual('qos_specs_%s' % fake.QOS_SPEC_ID, res_dict['qos_specs']['name']) @mock.patch('cinder.volume.qos_specs.get_associations', side_effect=return_get_qos_associations) def test_get_associations(self, mock_get_assciations): req = fakes.HTTPRequest.blank( '/v2/%s/qos-specs/%s/associations' % ( fake.PROJECT_ID, fake.QOS_SPEC_ID)) res = self.controller.associations(req, fake.QOS_SPEC_ID) self.assertEqual('FakeVolTypeName', res['qos_associations'][0]['name']) self.assertEqual(fake.VOLUME_TYPE_ID, res['qos_associations'][0]['id']) @mock.patch('cinder.volume.qos_specs.get_associations', side_effect=return_get_qos_associations) def test_get_associations_not_found(self, mock_get_assciations): req = fakes.HTTPRequest.blank( '/v2/%s/qos-specs/%s/associations' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) self.assertRaises(exception.QoSSpecsNotFound, self.controller.associations, req, fake.WILL_NOT_BE_FOUND_ID) @mock.patch('cinder.volume.qos_specs.get_associations', side_effect=return_get_qos_associations) def test_get_associations_failed(self, mock_get_associations): req = fakes.HTTPRequest.blank( '/v2/%s/qos-specs/%s/associations' % ( fake.PROJECT_ID, fake.RAISE_ID)) self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.associations, req, fake.RAISE_ID) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.associate_qos_with_type', side_effect=return_associate_qos_specs) def test_associate(self, mock_associate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v2/%s/qos-specs/%s/associate?vol_type_id=%s' % (fake.PROJECT_ID, fake.QOS_SPEC_ID, fake.VOLUME_TYPE_ID)) res = self.controller.associate(req, fake.QOS_SPEC_ID) self.assertEqual(202, res.status_int) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.associate_qos_with_type', side_effect=return_associate_qos_specs) def test_associate_no_type(self, mock_associate, mock_get_qos): req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s/associate' % (fake.PROJECT_ID, fake.QOS_SPEC_ID)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.associate, req, fake.QOS_SPEC_ID) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.associate_qos_with_type', side_effect=return_associate_qos_specs) def test_associate_not_found(self, mock_associate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v2/%s/qos-specs/%s/associate?vol_type_id=%s' % ( fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID, fake.VOLUME_TYPE_ID)) self.assertRaises(exception.QoSSpecsNotFound, self.controller.associate, req, fake.WILL_NOT_BE_FOUND_ID) req = fakes.HTTPRequest.blank( '/v2/%s/qos-specs/%s/associate?vol_type_id=%s' % (fake.PROJECT_ID, fake.QOS_SPEC_ID, fake.WILL_NOT_BE_FOUND_ID)) self.assertRaises(exception.VolumeTypeNotFound, self.controller.associate, req, fake.QOS_SPEC_ID) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.associate_qos_with_type', side_effect=return_associate_qos_specs) def test_associate_fail(self, mock_associate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v2/%s/qos-specs/%s/associate?vol_type_id=%s' % (fake.PROJECT_ID, fake.ACTION_FAILED_ID, fake.VOLUME_TYPE_ID)) self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.associate, req, fake.ACTION_FAILED_ID) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.disassociate_qos_specs', side_effect=return_associate_qos_specs) def test_disassociate(self, mock_disassociate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v2/%s/qos-specs/%s/disassociate?vol_type_id=%s' % ( fake.PROJECT_ID, fake.QOS_SPEC_ID, fake.VOLUME_TYPE_ID)) res = self.controller.disassociate(req, fake.QOS_SPEC_ID) self.assertEqual(202, res.status_int) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.disassociate_qos_specs', side_effect=return_associate_qos_specs) def test_disassociate_no_type(self, mock_disassociate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v2/%s/qos-specs/%s/disassociate' % ( fake.PROJECT_ID, fake.QOS_SPEC_ID)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.disassociate, req, fake.QOS_SPEC_ID) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.disassociate_qos_specs', side_effect=return_associate_qos_specs) def test_disassociate_not_found(self, mock_disassociate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v2/%s/qos-specs/%s/disassociate?vol_type_id=%s' % ( fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID, fake.VOLUME_TYPE_ID)) self.assertRaises(exception.QoSSpecsNotFound, self.controller.disassociate, req, fake.WILL_NOT_BE_FOUND_ID) req = fakes.HTTPRequest.blank( '/v2/%s/qos-specs/%s/disassociate?vol_type_id=%s' % (fake.PROJECT_ID, fake.VOLUME_TYPE_ID, fake.WILL_NOT_BE_FOUND_ID)) self.assertRaises(exception.VolumeTypeNotFound, self.controller.disassociate, req, fake.VOLUME_TYPE_ID) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.disassociate_qos_specs', side_effect=return_associate_qos_specs) def test_disassociate_failed(self, mock_disassociate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v2/%s/qos-specs/%s/disassociate?vol_type_id=%s' % ( fake.PROJECT_ID, fake.ACTION2_FAILED_ID, fake.VOLUME_TYPE_ID)) self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.disassociate, req, fake.ACTION2_FAILED_ID) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.disassociate_all', side_effect=return_disassociate_all) def test_disassociate_all(self, mock_disassociate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v2/%s/qos-specs/%s/disassociate_all' % ( fake.PROJECT_ID, fake.QOS_SPEC_ID)) res = self.controller.disassociate_all(req, fake.QOS_SPEC_ID) self.assertEqual(202, res.status_int) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.disassociate_all', side_effect=return_disassociate_all) def test_disassociate_all_not_found(self, mock_disassociate, mock_get): req = fakes.HTTPRequest.blank( '/v2/%s/qos-specs/%s/disassociate_all' % ( fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) self.assertRaises(exception.QoSSpecsNotFound, self.controller.disassociate_all, req, fake.WILL_NOT_BE_FOUND_ID) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.disassociate_all', side_effect=return_disassociate_all) def test_disassociate_all_failed(self, mock_disassociate, mock_get): req = fakes.HTTPRequest.blank( '/v2/%s/qos-specs/%s/disassociate_all' % ( fake.PROJECT_ID, fake.ACTION2_FAILED_ID)) self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.disassociate_all, req, fake.ACTION2_FAILED_ID)
py
b417b7eef5eb8d40cf7ec6fa0b2601b8018e7654
#! /usr/bin/env python # coding=utf-8 #================================================================ # Copyright (C) 2019 * Ltd. All rights reserved. # # Editor : VIM # File name : train.py # Author : YunYang1994 # Created date: 2019-02-28 17:50:26 # Description : # #================================================================ import os import time import shutil import numpy as np import tensorflow as tf import core.utils as utils from tqdm import tqdm from core.dataset import Dataset from core.yolov3 import YOLOV3 from core.config import cfg from tensorflow.python import pywrap_tensorflow class YoloTrain(object): def __init__(self): self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE self.classes = utils.read_class_names(cfg.YOLO.CLASSES) self.num_classes = len(self.classes) self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT self.time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time())) self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY self.max_bbox_per_scale = 150 self.train_logdir = "./data/log/train" self.trainset = Dataset('train') self.testset = Dataset('test') self.steps_per_period = len(self.trainset) self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) with tf.name_scope('define_input'): self.input_rgb = tf.placeholder(dtype=tf.float32, name='input_rgb') self.input_lwir = tf.placeholder(dtype=tf.float32, name='input_lwir') self.label_sbbox = tf.placeholder(dtype=tf.float32, name='label_sbbox') self.label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox') self.label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox') self.true_sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes') self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes') self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes') self.trainable = tf.placeholder(dtype=tf.bool, name='training') with tf.name_scope("define_loss"): self.model = YOLOV3(self.input_rgb,self.input_lwir, self.trainable) self.net_var = tf.global_variables() self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss( self.label_sbbox, self.label_mbbox, self.label_lbbox, self.true_sbboxes, self.true_mbboxes, self.true_lbboxes) self.loss = self.giou_loss + self.conf_loss + self.prob_loss with tf.name_scope('learn_rate'): self.global_step = tf.Variable(1.0, dtype=tf.float64, trainable=False, name='global_step') warmup_steps = tf.constant(self.warmup_periods * self.steps_per_period, dtype=tf.float64, name='warmup_steps') train_steps = tf.constant( (self.first_stage_epochs + self.second_stage_epochs)* self.steps_per_period, dtype=tf.float64, name='train_steps') self.learn_rate = tf.cond( pred=self.global_step < warmup_steps, true_fn=lambda: self.global_step / warmup_steps * self.learn_rate_init, false_fn=lambda: self.learn_rate_end + 0.5 * (self.learn_rate_init - self.learn_rate_end) * (1 + tf.cos( (self.global_step - warmup_steps) / (train_steps - warmup_steps) * np.pi)) ) global_step_update = tf.assign_add(self.global_step, 1.0) with tf.name_scope("define_weight_decay"): moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay).apply(tf.trainable_variables()) with tf.name_scope("define_first_stage_train"): self.first_stage_trainable_var_list = [] for var in tf.trainable_variables(): var_name = var.op.name var_name_mess = str(var_name).split('/') if var_name_mess[0] in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']: self.first_stage_trainable_var_list.append(var) first_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss, var_list=self.first_stage_trainable_var_list) with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): with tf.control_dependencies([first_stage_optimizer, global_step_update]): with tf.control_dependencies([moving_ave]): self.train_op_with_frozen_variables = tf.no_op() with tf.name_scope("define_second_stage_train"): second_stage_trainable_var_list = tf.trainable_variables() second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss, var_list=second_stage_trainable_var_list) with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): with tf.control_dependencies([second_stage_optimizer, global_step_update]): with tf.control_dependencies([moving_ave]): self.train_op_with_all_variables = tf.no_op() with tf.name_scope('loader_and_saver'): # self.loader = tf.train.Saver(self.net_var) self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=10) with tf.name_scope('summary'): tf.summary.scalar("learn_rate", self.learn_rate) tf.summary.scalar("giou_loss", self.giou_loss) tf.summary.scalar("conf_loss", self.conf_loss) tf.summary.scalar("prob_loss", self.prob_loss) tf.summary.scalar("total_loss", self.loss) logdir = "./data/log/" if os.path.exists(logdir): shutil.rmtree(logdir) os.mkdir(logdir) self.write_op = tf.summary.merge_all() self.summary_writer = tf.summary.FileWriter(logdir, graph=self.sess.graph) def get_variables_in_checkpoint_file(self, file_name): try: reader = pywrap_tensorflow.NewCheckpointReader(file_name) var_to_shape_map = reader.get_variable_to_shape_map() return var_to_shape_map except Exception as e: # pylint: disable=broad-except print(str(e)) if "corrupted compressed block contents" in str(e): print("It's likely that your checkpoint file has been compressed " "with SNAPPY.") def get_variables_to_restore(self, variables, var_keep_dic): variables_to_restore = [] for v in variables: if v.name.split(':')[0] in var_keep_dic and v.name.split('/')[0] not in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']: print('Variables restored: %s' % v.name) variables_to_restore.append(v) return variables_to_restore def train(self): self.sess.run(tf.global_variables_initializer()) try: print('=> Restoring weights from: %s ... ' % self.initial_weight) variables = tf.global_variables() self.sess.run(tf.variables_initializer(variables, name='init')) var_keep_dic = self.get_variables_in_checkpoint_file(r"D:\DoubleImage\交接1(福邦),2(又俊)\yolov3融合1\checkpoint\yolov3_RGB_0910_loss=59.2860.ckpt-50") # Get the variables to restore, ignorizing the variables to fix variables_to_restore = self.get_variables_to_restore(variables, var_keep_dic) loader = tf.train.Saver(variables_to_restore) loader.restore(self.sess, r"D:\DoubleImage\交接1(福邦),2(又俊)\yolov3融合1\checkpoint\yolov3_RGB_0910_loss=59.2860.ckpt-50") print("load model sucessful !") except: print('=> %s does not exist !!!' % self.initial_weight) print('=> Now it starts to train YOLOV3 from scratch ...') self.first_stage_epochs = 0 for epoch in range(1, 1+self.first_stage_epochs+self.second_stage_epochs): if epoch <= self.first_stage_epochs: train_op = self.train_op_with_frozen_variables else: train_op = self.train_op_with_all_variables pbar = tqdm(self.trainset) qbar = tqdm(self.testset) train_epoch_loss, test_epoch_loss = [], [] for train_data in pbar: _, summary, train_step_loss, global_step_val = self.sess.run( [train_op, self.write_op, self.loss, self.global_step],feed_dict={ self.input_rgb: train_data[0], self.input_lwir: train_data[1], self.label_sbbox: train_data[2], self.label_mbbox: train_data[3], self.label_lbbox: train_data[4], self.true_sbboxes: train_data[5], self.true_mbboxes: train_data[6], self.true_lbboxes: train_data[7], self.trainable: True, }) train_epoch_loss.append(train_step_loss) self.summary_writer.add_summary(summary, global_step_val) pbar.set_description("train loss: %.2f" %train_step_loss) for test_data in qbar: test_step_loss = self.sess.run( self.loss, feed_dict={ self.input_rgb: train_data[0], self.input_lwir: train_data[1], self.label_sbbox: train_data[2], self.label_mbbox: train_data[3], self.label_lbbox: train_data[4], self.true_sbboxes: train_data[5], self.true_mbboxes: train_data[6], self.true_lbboxes: train_data[7], self.trainable: False, }) test_epoch_loss.append(test_step_loss) qbar.set_description("test loss: %.2f" %test_step_loss) train_epoch_loss, test_epoch_loss = np.mean(train_epoch_loss), np.mean(test_epoch_loss) ckpt_file = "./checkpoint/yolov3_RGB_0910_loss=%.4f.ckpt" % test_epoch_loss log_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) print("=> Epoch: %2d Time: %s Train loss: %.2f Test loss: %.2f Saving %s ..." %(epoch, log_time, train_epoch_loss, test_epoch_loss, ckpt_file)) self.saver.save(self.sess, ckpt_file, global_step=epoch) if __name__ == '__main__': YoloTrain().train()
py
b417b890f542c24fa1ed31db6bc9710a6525bfbe
# -*- encoding: UTF-8 -*- import re import os import codecs from setuptools import setup, find_packages here = os.path.abspath(os.path.dirname(__file__)) def read(*parts): # intentionally *not* adding an encoding option to open # see here: https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690 return codecs.open(os.path.join(here, *parts), 'r').read() def find_version(*file_paths): version_file = read(*file_paths) version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) if version_match: return version_match.group(1) raise RuntimeError("Unable to find version string.") # Get the long description from the README file with codecs.open(os.path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name='Socketer', # Versions should comply with PEP440. For a discussion on single-sourcing # the version across setup.py and the project code, see # https://packaging.python.org/en/latest/single_source_version.html version=find_version('Socketer', '__init__.py'), description='', long_description=long_description, # The project's main homepage. url='https://github.com/mingotang/Socketer', # Author details author='Mingo Tang', author_email='[email protected]', # Choose your license license='MIT License', # See https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ # How mature is this project? Common values are # 3 - Alpha # 4 - Beta # 5 - Production/Stable 'Development Status :: 3 - Alpha', # Indicate who your project is intended for 'Intended Audience :: Developers', 'Topic :: Software Development :: Libraries :: Python Modules', # Pick your license as you wish (should match "license" above) 'License :: OSI Approved :: Apache Software License', # Specify the Python versions you support here. In particular, ensure # that you indicate whether you support Python 2, Python 3 or both. # 'Programming Language :: Python :: 2', # 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', ], # What does your project relate to? keywords='socketer service', # You can just specify the packages manually here if your project is # simple. Or you can use find_packages(). packages=find_packages(exclude=['samples']), # Alternatively, if you want to distribute just a my_module.py, uncomment # this: # py_modules=["my_module"], # List run-time dependencies here. These will be installed by pip when # your project is installed. For an analysis of "install_requires" vs pip's # requirements files see: # https://packaging.python.org/en/latest/requirements.html install_requires=[ 'docopt>=0.6.2', ], # List additional groups of dependencies here (e.g. development # dependencies). You can install these using the following syntax, # for example: # $ pip install -e .[dev,test] # extras_require={ # 'data': ['check-manifest'], # 'test': ['tests_require'], # }, # If there are data files included in your packages that need to be # installed, specify them here. If using Python 2.6 or less, then these # have to be included in MANIFEST.in as well. # package_data={ # 'data': ['*.csv', 'DeveloperDefinedAdjustment.txt'], # }, # Although 'package_data' is the preferred approach, in some case you may # need to place data files outside of your packages. See: # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa # In this case, 'data_file' will be installed into '<sys.prefix>/my_data' data_files=[ # ('data', [ # 'data/DeveloperDefinedAdjustment.txt', # 'data/PackageDefinedFirmTypeWhitelist.xlsx', # 'data/PackageDefinedKeywordBlacklist.xlsx', # 'data/PackageDefinedPartitionExpression.xlsx', # 'data/PackageDefinedServiceTypeWhitelist.xlsx', # 'data/ToponymInfomation.xlsx' # ]), ('.', [ 'LICENSE', 'README.md', ]) ], # To provide executable scripts, use entry points in preference to the # "scripts" keyword. Entry points provide cross-platform support and allow # pip to create the appropriate form of executable for the target platform. # entry_points=dict(console_scripts=['pip=pip:main', # 'pip%s=pip:main' % sys.version[:1], # 'pip%s=pip:main' % sys.version[:3]]), entry_points={ 'console_scripts': [ 'socketer=Socketer.cmd:cli', ], }, )
py
b417b90d5d7481cdc9f57e626c224f20bc889749
""" Plotting constant longitude slices ================================== This example shows how to plot slices of constant longitude from a MAS model output. """ ############################################################################### # First, load the required modules. import matplotlib.pyplot as plt from psipy.data import sample_data from psipy.model import MASOutput ############################################################################### # Next, load a set of MAS output files. You will need to change this line to # point to a folder with MAS files in them. mas_path = sample_data.mas_sample_data() model = MASOutput(mas_path) ############################################################################### # Each MAS model contains a number of variables. The variable names can be # accessed using the ``.variables`` attribute. print(model.variables) ############################################################################### # Set parameters for plotting. The first line will give us a horizontal # errorbar underneath the plots. The second line is the index to select for the # longitude slice. cbar_kwargs = {'orientation': 'horizontal'} phi_idx = 40 ############################################################################### # Plot the slices # # Note that for density (rho) and pressure (p) we first normalise the data # relative to a power law decrease, to make it easer to see spatial variations. fig = plt.figure() axs = [plt.subplot(1, 2, i + 1, projection='polar') for i in range(2)] ax = axs[0] model['vr'].plot_phi_cut(phi_idx, ax=ax, cbar_kwargs=cbar_kwargs) ax = axs[1] rho = model['rho'] rho_r2 = rho.radial_normalized(2) rho_r2.plot_phi_cut(phi_idx, ax=ax, cbar_kwargs=cbar_kwargs) # Add a contour of br = 0 (the heliopsheric current sheet) to all the axes for ax in axs: model['br'].contour_phi_cut(phi_idx, levels=[0], ax=ax, colors='white', linestyles='--', linewidths=1) plt.show()
py
b417b95b8421a4978ea8d6e1a4e490e608ab8f35
# Copyright (c) 2013 The SAYCBridge Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import itertools import logging import multiprocessing import sys import traceback import unittest2 from core.call import Call from core.callhistory import CallHistory, Vulnerability from core.hand import Hand from factory import BidderFactory from third_party import outputcapture from tests import test_sayc _log = logging.getLogger(__name__) def expectation_line(hand, call_history, expected_call=None): vulnerability = call_history.vulnerability.name optional_vulernability_string = "" if vulnerability == "None" else ", '%s'" % vulnerability expected_call_string = expected_call.name if expected_call else "?" return "['%s', '%s', '%s'%s]," % ( hand.cdhs_dot_string(), expected_call_string, call_history.calls_string(), optional_vulernability_string, ) class CompiledTest(object): def __init__(self, group, hand, call_history, expected_call, parent_test=None): self.group = group self.hand = hand self.call_history = call_history self.expected_call = expected_call self.parent_test = parent_test @classmethod def from_expectation_tuple_in_group(cls, expectation, test_group): hand_string = expectation[0] assert '.' in hand_string, "_split_expectation expectes C.D.H.S formatted hands, missing '.': %s" % hand_string expected_call = Call.from_string(expectation[1]) history_string = expectation[2] if len(expectation) > 2 else "" vulnerability_string = expectation[3] if len(expectation) > 3 else None hand = Hand.from_cdhs_string(hand_string) call_history = CallHistory.from_string(history_string, vulnerability_string=vulnerability_string) return cls(test_group, hand, call_history, expected_call) @property def identifier(self): # FIXME: Our "have we run this" check would be more powerful if we used a combinatics based identifier for the hands. return "%s-%s" % (self.hand.cdhs_dot_string(), self.call_history.identifier) @property def subtest_string(self): if self.parent_test: return " (subtest of %s)" % self.parent_test.call_history.calls_string() return "" @property def test_string(self): return "%s, history: %s%s" % (self.hand.pretty_one_line(), self.call_history.calls_string(), self.subtest_string) @property def subtests(self): subtests = [] partial_history = self.call_history while len(partial_history.calls) >= 4: expected_call = partial_history.calls[-4] partial_history = partial_history.copy_with_partial_history(-4) subtests.append(CompiledTest(self.group, self.hand, partial_history, expected_call, self)) return subtests class TestGroup(object): def __init__(self, name): self.name = name self._seen_expectations = {} self.tests = [] def add_test(self, test): # Sanity check to make sure we're not running a test twice. test_identifier = test.identifier previous_call = self._seen_expectations.get(test_identifier) if previous_call: if previous_call != test.expected_call: _log.error("Conflicting expectations for %s, %s != %s" % (test_identifier, previous_call, test.expected_call)) elif not test.parent_test: _log.debug("%s is an explicit duplicate of an earlier test." % test_identifier) else: _log.debug("Ignoring dupliate subtest %s" % test_identifier) return self._seen_expectations[test_identifier] = test.expected_call self.tests.append(test) def add_expectation_line(self, expectation): try: test = CompiledTest.from_expectation_tuple_in_group(expectation, self) except: print "Exception compiling: %s in group %s" % (expectation, self.name) raise self.add_test(test) for test in test.subtests: self.add_test(test) def add_expectation_lines(self, expectation_lines): for expectation in expectation_lines: self.add_expectation_line(expectation) class ResultsAggregator(object): def __init__(self, groups): self.groups = groups self._results_count_by_group = { group.name: 0 for group in self.groups } self._results_by_identifier = {} self._group_has_printed = [False for group in self.groups] self._total_failures = 0 def _is_complete(self, group): return self._results_count_by_group.get(group.name) == len(group.tests) def _print_completed_groups(self): for index, has_printed in enumerate(self._group_has_printed): if has_printed == True: continue group = self.groups[index] if not self._is_complete(group): return self._print_group_summary(group) self._group_has_printed[index] = True def _print_group_summary(self, group): fail_count = 0 print "%s:" % group.name for test in group.tests: result = self._results_by_identifier[test.identifier] result.print_captured_logs() if result.exc_str: _log.error("Exception during find_call_for %s %s: %s" % (test.hand.pretty_one_line(), test.call_history.calls_string(), result.call)) _log.error(result.exc_str) raise StopIteration if result.call and result.call == test.expected_call: _log.info("PASS: %s for %s" % (test.expected_call, test.test_string)) else: fail_count += 1 print "FAIL: %s (expected %s) for %s" % (result.call, test.expected_call, test.test_string) # FIXME: We don't need to update _total_failures here. self._total_failures += fail_count print "Pass %s of %s hands" % (len(group.tests) - fail_count, len(group.tests)) print def add_results_callback(self, results): for result in results: # FIXME: Instead of warning, we should assert here, and we should fix # duplicated detection to be global, instead of per TestGroup. # existing_result = self._results_by_identifier.get(result.test.identifier) # if existing_result: # print "WARNING: Got duplicate result (%s, %s) %s" % (existing_result.call, result.call, result.test.test_string) self._results_by_identifier[result.test.identifier] = result self._results_count_by_group[result.test.group.name] += 1 self._print_completed_groups() # These were explicitly tested and matched some hand. @property def called_rule_names(self): rule_names = map(lambda result: result.rule_name, self._results_by_identifier.values()) return set(map(str, filter(None, rule_names))) # These were tested via interpretation of a call_history. @property def interpreted_rule_names(self): rule_name_tuples = map(lambda result: result.last_three_rule_names, self._results_by_identifier.values()) # result.last_three_rule_names can be None. rule_name_tuples = filter(None, rule_name_tuples) rule_names = itertools.chain.from_iterable(rule_name_tuples) return set(map(str, filter(None, rule_names))) def print_summary(self): total_tests = len(self._results_by_identifier) total_pass = total_tests - self._total_failures percent = 100.0 * total_pass / total_tests if total_tests else 0 print "Pass %s (%.1f%%) of %s total hands" % (total_pass, percent, total_tests) # Pickle gets mad at us if we make this a member or even static function. # This call is executed in a different process when running tests in parallel. def _run_test(test): # FIXME: There is no need to lookup the bidder every time. bidder = BidderFactory.default_bidder() result = TestResult() result.test = test # FIXME: OutputCapture captures logging channels as well which is probably a waste. output = outputcapture.OutputCapture() stdout, stderr = output.capture_output() try: call_selection = bidder.call_selection_for(test.hand, test.call_history) if call_selection: result.call = call_selection.call result.rule_name = str(call_selection.rule) result.fill_last_three_rule_names(call_selection) if result.last_three_rule_names and result.last_three_rule_names[-2] is None: print "WARNING: Failed to interpret partner's last bid: %s" % test.call_history.copy_with_partial_history(-2) except Exception: result.exc_str = ''.join(traceback.format_exception(*sys.exc_info())) output.restore_output() result.save_captured_logs(stdout, stderr) return result class TestHarness(unittest2.TestCase): use_multi_process = True test_shard_size = 10 def __init__(self, *args, **kwargs): super(TestHarness, self).__init__(*args, **kwargs) self.groups = [] self.results = None def collect_test_groups(self): # sorted happens to "just work" here since tuples are compared in item order. # Since we know we don't have any duplicate keys, the values will never be compared. for group_name, expectations_list in sorted(test_sayc.sayc_expectations.items()): group = TestGroup(group_name) group.add_expectation_lines(expectations_list) self.groups.append(group) def run_tests_single_process(self): # This follows the same logic-flow as the multi-process code, yet stays single threaded. all_tests = list(itertools.chain.from_iterable(group.tests for group in self.groups)) for x in range(0, len(all_tests), self.test_shard_size): shard = all_tests[x : x + self.test_shard_size] results = map(_run_test, shard) self.results.add_results_callback(results) def run_tests_multi_process(self): all_tests = list(itertools.chain.from_iterable(group.tests for group in self.groups)) pool = multiprocessing.Pool() # FIXME: outstanding_jobs + map_async is a workaround for http://bugs.python.org/issue8296 (only fixed in python 3) outstanding_jobs = [] for x in range(0, len(all_tests), self.test_shard_size): results = pool.map_async( _run_test, all_tests[x : x + self.test_shard_size], self.test_shard_size, self.results.add_results_callback ) outstanding_jobs.append(results) pool.close() # Calling pool.join() won't handle KeyboardInterrupts, so we do a timed wait # on each individual results object. while outstanding_jobs: outstanding_jobs.pop(0).wait(0xFFFF) pool.terminate() def _print_coverage_summary(self): # FIXME: This need not depend on z3 specifically. try: all_rules = BidderFactory.default_bidder().system.rules except: print "Ignoring coverage summary, failed to find rules." return all_rule_names = set(map(str, all_rules)) # Don't expect to see rules which are marked "requires_planning". non_planned_rules = filter(lambda rule: not rule.requires_planning, all_rules) non_planned_rule_names = set(map(str, non_planned_rules)) called_rule_names = self.results.called_rule_names planned_rule_count = len(all_rule_names) - len(non_planned_rule_names) print "Tested call generation of %s rules of %s total (excluding %s requires_planning rules)." % (len(called_rule_names), len(non_planned_rule_names), planned_rule_count) uncalled_rule_names = non_planned_rule_names - called_rule_names if uncalled_rule_names: print "Never selected call from:" print "\n".join(sorted(uncalled_rule_names)) interpreted_rule_names = self.results.interpreted_rule_names print "\nTested interpretation of %s rules of %s total." % (len(interpreted_rule_names), len(all_rule_names)) uninterpreted_rule_names = all_rule_names - interpreted_rule_names # FIXME: We should print these, but we have too many right now! # if uninterpreted_rule_names: # print "Never interpreted call with:" # print "\n".join(sorted(uninterpreted_rule_names)) never_tested_rule_names = uncalled_rule_names & uninterpreted_rule_names if uninterpreted_rule_names: print "\n%s rules were never used for either bidding or interpretation:" % len(never_tested_rule_names) print "\n".join(sorted(never_tested_rule_names)) def test_main(self): self.collect_test_groups() self.results = ResultsAggregator(self.groups) if self.use_multi_process: self.run_tests_multi_process() else: self.run_tests_single_process() self.results.print_summary() print self._print_coverage_summary() class TestResult(object): def __init__(self): self.test = None self.call = None self.rule_name = None # We only bother to store the last 3, as the subtest system will have handled all calls before that. self.last_three_rule_names = None self.exc_str = None self.stdout = None self.stderr = None def fill_last_three_rule_names(self, call_selection): # FIXME: This is kinda an ugly z3b-dependant hack. if not hasattr(call_selection, "rule_selector"): return from z3b.model import positions # These are in call-order, so we'd access partner's via names[-2]. last_three_positions = (positions.LHO, positions.Partner, positions.RHO) # This history is prior to the call_selection's call. history = call_selection.rule_selector.history self.last_three_rule_names = map(str, map(history.rule_for_last_call, last_three_positions)) def save_captured_logs(self, stdout, stderr): self.stdout = stdout.getvalue() self.stderr = stderr.getvalue() def print_captured_logs(self): if self.stderr: sys.stderr.write(self.stderr) if self.stdout: sys.stdout.write(self.stdout)
py
b417bae9b224ef11dd91910c9c928f6d172f51bf
# Python Program To Use Addition Operator To Add The Contents Of Two Objects ''' Function Name : Use Addition Operator To Add Contents Of 2 Object Function Date : 20 Sep 2020 Function Author : Prasad Dangare Input : String Output : String ''' class BookX: def __init__(self, pages): self.pages = pages def __add__(self, other): return self.pages+other.pages class BookY: def __init__(self, pages): self.pages = pages b1 = BookX(100) b2 = BookY(150) print('Total pages = ', b1+b2)
py
b417bbe43402376e0ba3a25db9a362948c985592
#from .voc0712 import VOCDetection, AnnotationTransform, detection_collate, VOC_CLASSES from .ucf24 import UCF24Detection, AnnotationTransform, detection_collate, CLASSES from .config import * import cv2 import numpy as np def base_transform(image, size, mean): x = cv2.resize(image, (size, size)).astype(np.float32) # x = cv2.resize(np.array(image), (size, size)).astype(np.float32) x -= mean x = x.astype(np.float32) return x class BaseTransform: def __init__(self, size, mean): self.size = size self.mean = np.array(mean, dtype=np.float32) def __call__(self, image, boxes=None, labels=None): return base_transform(image, self.size, self.mean), boxes, labels
py
b417bc9f072645179c22eeb55d77a743121b0057
import logging from io import BytesIO import boto3 import pandas as pd from botocore import UNSIGNED from botocore.client import Config LOGGER = logging.getLogger(__name__) def get_bucket(bucket_name): resource = boto3.resource('s3', config=Config(signature_version=UNSIGNED)) return resource.Bucket(bucket_name) def clean_dataset(data): numerical_col = [] for column in data.columns: if (data[column].astype(int) == data[column]).all(): numerical_col.append(column) data.drop(data.columns[numerical_col], axis=1, inplace=True) data.columns = range(data.shape[1]) return data def load_dataset(obj): body = obj.get()['Body'].read() data = pd.read_csv(BytesIO(body), header=None) return clean_dataset(data) def get_dataset(bucket_name, dataset_name): bucket = get_bucket(bucket_name) dataset = bucket.Object(key=dataset_name) return load_dataset(dataset) def get_datasets(bucket_name, limit=None): bucket = get_bucket(bucket_name) datasets = dict() for obj in list(bucket.objects.all()): dataset = load_dataset(obj) if not dataset.empty: datasets[obj.key] = dataset if len(datasets) >= limit: break return datasets
py
b417bd5cea5cde856e00e1309a39c6e53e377c8e
import gym from gym.envs.registration import registry, make, spec from .RC_env import RC_env def register(id, *args, **kvargs): if id in registry.env_specs: return else: return gym.envs.registration.register(id, *args, **kvargs) print('Trying to register custom envs') register( id='RC_env-v1', entry_point=RC_env, # max_episode_steps=2000, # reward_threshold=2000.0, )
py
b417be48ba3f585db552087fcca86296bb2e2f24
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Identity v3 Trust action implementations""" import datetime import six from eclcli.common import command from eclcli.common import utils from eclcli.identity import common class CreateTrust(command.ShowOne): """Create new trust""" def get_parser(self, prog_name): parser = super(CreateTrust, self).get_parser(prog_name) parser.add_argument( 'trustor', metavar='<trustor-user>', help='User that is delegating authorization (name or ID)', ) parser.add_argument( 'trustee', metavar='<trustee-user>', help='User that is assuming authorization (name or ID)', ) parser.add_argument( '--project', metavar='<project>', required=True, help='Project being delegated (name or ID) (required)', ) parser.add_argument( '--role', metavar='<role>', action='append', default=[], help='Roles to authorize (name or ID) ' '(repeat to set multiple values) (required)', required=True ) parser.add_argument( '--impersonate', dest='impersonate', action='store_true', default=False, help='Tokens generated from the trust will represent <trustor>' ' (defaults to False)', ) parser.add_argument( '--expiration', metavar='<expiration>', help='Sets an expiration date for the trust' ' (format of YYYY-mm-ddTHH:MM:SS)', ) common.add_project_domain_option_to_parser(parser) parser.add_argument( '--trustor-domain', metavar='<trustor-domain>', help='Domain that contains <trustor> (name or ID)', ) parser.add_argument( '--trustee-domain', metavar='<trustee-domain>', help='Domain that contains <trustee> (name or ID)', ) return parser def take_action(self, parsed_args): identity_client = self.app.client_manager.identity # NOTE(stevemar): Find the two users, project and roles that # are necessary for making a trust usable, the API dictates that # trustee, project and role are optional, but that makes the trust # pointless, and trusts are immutable, so let's enforce it at the # client level. trustor_id = common.find_user(identity_client, parsed_args.trustor, parsed_args.trustor_domain).id trustee_id = common.find_user(identity_client, parsed_args.trustee, parsed_args.trustee_domain).id project_id = common.find_project(identity_client, parsed_args.project, parsed_args.project_domain).id role_names = [] for role in parsed_args.role: role_name = utils.find_resource( identity_client.roles, role, ).name role_names.append(role_name) expires_at = None if parsed_args.expiration: expires_at = datetime.datetime.strptime(parsed_args.expiration, '%Y-%m-%dT%H:%M:%S') trust = identity_client.trusts.create( trustee_id, trustor_id, impersonation=parsed_args.impersonate, project=project_id, role_names=role_names, expires_at=expires_at, ) trust._info.pop('roles_links', None) trust._info.pop('links', None) # Format roles into something sensible roles = trust._info.pop('roles') msg = ' '.join(r['name'] for r in roles) trust._info['roles'] = msg return zip(*sorted(six.iteritems(trust._info))) class DeleteTrust(command.Command): """Delete trust(s)""" def get_parser(self, prog_name): parser = super(DeleteTrust, self).get_parser(prog_name) parser.add_argument( 'trust', metavar='<trust>', help='Trust(s) to delete', nargs="+", ) return parser def take_action(self, parsed_args): identity_client = self.app.client_manager.identity for t in parsed_args.trust: trust_obj = utils.find_resource(identity_client.trusts, t) identity_client.trusts.delete(trust_obj.id) class ListTrust(command.Lister): """List trusts""" def take_action(self, parsed_args): columns = ('ID', 'Expires At', 'Impersonation', 'Project ID', 'Trustee User ID', 'Trustor User ID') data = self.app.client_manager.identity.trusts.list() return (columns, (utils.get_item_properties( s, columns, formatters={}, ) for s in data)) class ShowTrust(command.ShowOne): """Display trust details""" def get_parser(self, prog_name): parser = super(ShowTrust, self).get_parser(prog_name) parser.add_argument( 'trust', metavar='<trust>', help='Trust to display', ) return parser def take_action(self, parsed_args): identity_client = self.app.client_manager.identity trust = utils.find_resource(identity_client.trusts, parsed_args.trust) trust._info.pop('roles_links', None) trust._info.pop('links', None) # Format roles into something sensible roles = trust._info.pop('roles') msg = ' '.join(r['name'] for r in roles) trust._info['roles'] = msg return zip(*sorted(six.iteritems(trust._info)))
py
b417bea2150fa728330c0090b6ad0e6c6259432a
import unittest from test import support import sys import random import math import array # Used for lazy formatting of failure messages class Frm(object): def __init__(self, format, *args): self.format = format self.args = args def __str__(self): return self.format % self.args # SHIFT should match the value in longintrepr.h for best testing. SHIFT = sys.int_info.bits_per_digit BASE = 2 ** SHIFT MASK = BASE - 1 KARATSUBA_CUTOFF = 70 # from longobject.c # Max number of base BASE digits to use in test cases. Doubling # this will more than double the runtime. MAXDIGITS = 15 # build some special values special = [0, 1, 2, BASE, BASE >> 1, 0x5555555555555555, 0xaaaaaaaaaaaaaaaa] # some solid strings of one bits p2 = 4 # 0 and 1 already added for i in range(2*SHIFT): special.append(p2 - 1) p2 = p2 << 1 del p2 # add complements & negations special += [~x for x in special] + [-x for x in special] DBL_MAX = sys.float_info.max DBL_MAX_EXP = sys.float_info.max_exp DBL_MIN_EXP = sys.float_info.min_exp DBL_MANT_DIG = sys.float_info.mant_dig DBL_MIN_OVERFLOW = 2**DBL_MAX_EXP - 2**(DBL_MAX_EXP - DBL_MANT_DIG - 1) # Pure Python version of correctly-rounded integer-to-float conversion. def int_to_float(n): """ Correctly-rounded integer-to-float conversion. """ # Constants, depending only on the floating-point format in use. # We use an extra 2 bits of precision for rounding purposes. PRECISION = sys.float_info.mant_dig + 2 SHIFT_MAX = sys.float_info.max_exp - PRECISION Q_MAX = 1 << PRECISION ROUND_HALF_TO_EVEN_CORRECTION = [0, -1, -2, 1, 0, -1, 2, 1] # Reduce to the case where n is positive. if n == 0: return 0.0 elif n < 0: return -int_to_float(-n) # Convert n to a 'floating-point' number q * 2**shift, where q is an # integer with 'PRECISION' significant bits. When shifting n to create q, # the least significant bit of q is treated as 'sticky'. That is, the # least significant bit of q is set if either the corresponding bit of n # was already set, or any one of the bits of n lost in the shift was set. shift = n.bit_length() - PRECISION q = n << -shift if shift < 0 else (n >> shift) | bool(n & ~(-1 << shift)) # Round half to even (actually rounds to the nearest multiple of 4, # rounding ties to a multiple of 8). q += ROUND_HALF_TO_EVEN_CORRECTION[q & 7] # Detect overflow. if shift + (q == Q_MAX) > SHIFT_MAX: raise OverflowError("integer too large to convert to float") # Checks: q is exactly representable, and q**2**shift doesn't overflow. assert q % 4 == 0 and q // 4 <= 2**(sys.float_info.mant_dig) assert q * 2**shift <= sys.float_info.max # Some circularity here, since float(q) is doing an int-to-float # conversion. But here q is of bounded size, and is exactly representable # as a float. In a low-level C-like language, this operation would be a # simple cast (e.g., from unsigned long long to double). return math.ldexp(float(q), shift) # pure Python version of correctly-rounded true division def truediv(a, b): """Correctly-rounded true division for integers.""" negative = a^b < 0 a, b = abs(a), abs(b) # exceptions: division by zero, overflow if not b: raise ZeroDivisionError("division by zero") if a >= DBL_MIN_OVERFLOW * b: raise OverflowError("int/int too large to represent as a float") # find integer d satisfying 2**(d - 1) <= a/b < 2**d d = a.bit_length() - b.bit_length() if d >= 0 and a >= 2**d * b or d < 0 and a * 2**-d >= b: d += 1 # compute 2**-exp * a / b for suitable exp exp = max(d, DBL_MIN_EXP) - DBL_MANT_DIG a, b = a << max(-exp, 0), b << max(exp, 0) q, r = divmod(a, b) # round-half-to-even: fractional part is r/b, which is > 0.5 iff # 2*r > b, and == 0.5 iff 2*r == b. if 2*r > b or 2*r == b and q % 2 == 1: q += 1 result = math.ldexp(q, exp) return -result if negative else result class LongTest(unittest.TestCase): # Get quasi-random long consisting of ndigits digits (in base BASE). # quasi == the most-significant digit will not be 0, and the number # is constructed to contain long strings of 0 and 1 bits. These are # more likely than random bits to provoke digit-boundary errors. # The sign of the number is also random. def getran(self, ndigits): self.assertTrue(ndigits > 0) nbits_hi = ndigits * SHIFT nbits_lo = nbits_hi - SHIFT + 1 answer = 0 nbits = 0 r = int(random.random() * (SHIFT * 2)) | 1 # force 1 bits to start while nbits < nbits_lo: bits = (r >> 1) + 1 bits = min(bits, nbits_hi - nbits) self.assertTrue(1 <= bits <= SHIFT) nbits = nbits + bits answer = answer << bits if r & 1: answer = answer | ((1 << bits) - 1) r = int(random.random() * (SHIFT * 2)) self.assertTrue(nbits_lo <= nbits <= nbits_hi) if random.random() < 0.5: answer = -answer return answer # Get random long consisting of ndigits random digits (relative to base # BASE). The sign bit is also random. def getran2(ndigits): answer = 0 for i in range(ndigits): answer = (answer << SHIFT) | random.randint(0, MASK) if random.random() < 0.5: answer = -answer return answer def check_division(self, x, y): eq = self.assertEqual q, r = divmod(x, y) q2, r2 = x//y, x%y pab, pba = x*y, y*x eq(pab, pba, Frm("multiplication does not commute for %r and %r", x, y)) eq(q, q2, Frm("divmod returns different quotient than / for %r and %r", x, y)) eq(r, r2, Frm("divmod returns different mod than %% for %r and %r", x, y)) eq(x, q*y + r, Frm("x != q*y + r after divmod on x=%r, y=%r", x, y)) if y > 0: self.assertTrue(0 <= r < y, Frm("bad mod from divmod on %r and %r", x, y)) else: self.assertTrue(y < r <= 0, Frm("bad mod from divmod on %r and %r", x, y)) def test_division(self): digits = list(range(1, MAXDIGITS+1)) + list(range(KARATSUBA_CUTOFF, KARATSUBA_CUTOFF + 14)) digits.append(KARATSUBA_CUTOFF * 3) for lenx in digits: x = self.getran(lenx) for leny in digits: y = self.getran(leny) or 1 self.check_division(x, y) # specific numbers chosen to exercise corner cases of the # current long division implementation # 30-bit cases involving a quotient digit estimate of BASE+1 self.check_division(1231948412290879395966702881, 1147341367131428698) self.check_division(815427756481275430342312021515587883, 707270836069027745) self.check_division(627976073697012820849443363563599041, 643588798496057020) self.check_division(1115141373653752303710932756325578065, 1038556335171453937726882627) # 30-bit cases that require the post-subtraction correction step self.check_division(922498905405436751940989320930368494, 949985870686786135626943396) self.check_division(768235853328091167204009652174031844, 1091555541180371554426545266) # 15-bit cases involving a quotient digit estimate of BASE+1 self.check_division(20172188947443, 615611397) self.check_division(1020908530270155025, 950795710) self.check_division(128589565723112408, 736393718) self.check_division(609919780285761575, 18613274546784) # 15-bit cases that require the post-subtraction correction step self.check_division(710031681576388032, 26769404391308) self.check_division(1933622614268221, 30212853348836) def test_karatsuba(self): digits = list(range(1, 5)) + list(range(KARATSUBA_CUTOFF, KARATSUBA_CUTOFF + 10)) digits.extend([KARATSUBA_CUTOFF * 10, KARATSUBA_CUTOFF * 100]) bits = [digit * SHIFT for digit in digits] # Test products of long strings of 1 bits -- (2**x-1)*(2**y-1) == # 2**(x+y) - 2**x - 2**y + 1, so the proper result is easy to check. for abits in bits: a = (1 << abits) - 1 for bbits in bits: if bbits < abits: continue b = (1 << bbits) - 1 x = a * b y = ((1 << (abits + bbits)) - (1 << abits) - (1 << bbits) + 1) self.assertEqual(x, y, Frm("bad result for a*b: a=%r, b=%r, x=%r, y=%r", a, b, x, y)) def check_bitop_identities_1(self, x): eq = self.assertEqual eq(x & 0, 0, Frm("x & 0 != 0 for x=%r", x)) eq(x | 0, x, Frm("x | 0 != x for x=%r", x)) eq(x ^ 0, x, Frm("x ^ 0 != x for x=%r", x)) eq(x & -1, x, Frm("x & -1 != x for x=%r", x)) eq(x | -1, -1, Frm("x | -1 != -1 for x=%r", x)) eq(x ^ -1, ~x, Frm("x ^ -1 != ~x for x=%r", x)) eq(x, ~~x, Frm("x != ~~x for x=%r", x)) eq(x & x, x, Frm("x & x != x for x=%r", x)) eq(x | x, x, Frm("x | x != x for x=%r", x)) eq(x ^ x, 0, Frm("x ^ x != 0 for x=%r", x)) eq(x & ~x, 0, Frm("x & ~x != 0 for x=%r", x)) eq(x | ~x, -1, Frm("x | ~x != -1 for x=%r", x)) eq(x ^ ~x, -1, Frm("x ^ ~x != -1 for x=%r", x)) eq(-x, 1 + ~x, Frm("not -x == 1 + ~x for x=%r", x)) eq(-x, ~(x-1), Frm("not -x == ~(x-1) forx =%r", x)) for n in range(2*SHIFT): p2 = 2 ** n eq(x << n >> n, x, Frm("x << n >> n != x for x=%r, n=%r", (x, n))) eq(x // p2, x >> n, Frm("x // p2 != x >> n for x=%r n=%r p2=%r", (x, n, p2))) eq(x * p2, x << n, Frm("x * p2 != x << n for x=%r n=%r p2=%r", (x, n, p2))) eq(x & -p2, x >> n << n, Frm("not x & -p2 == x >> n << n for x=%r n=%r p2=%r", (x, n, p2))) eq(x & -p2, x & ~(p2 - 1), Frm("not x & -p2 == x & ~(p2 - 1) for x=%r n=%r p2=%r", (x, n, p2))) def check_bitop_identities_2(self, x, y): eq = self.assertEqual eq(x & y, y & x, Frm("x & y != y & x for x=%r, y=%r", (x, y))) eq(x | y, y | x, Frm("x | y != y | x for x=%r, y=%r", (x, y))) eq(x ^ y, y ^ x, Frm("x ^ y != y ^ x for x=%r, y=%r", (x, y))) eq(x ^ y ^ x, y, Frm("x ^ y ^ x != y for x=%r, y=%r", (x, y))) eq(x & y, ~(~x | ~y), Frm("x & y != ~(~x | ~y) for x=%r, y=%r", (x, y))) eq(x | y, ~(~x & ~y), Frm("x | y != ~(~x & ~y) for x=%r, y=%r", (x, y))) eq(x ^ y, (x | y) & ~(x & y), Frm("x ^ y != (x | y) & ~(x & y) for x=%r, y=%r", (x, y))) eq(x ^ y, (x & ~y) | (~x & y), Frm("x ^ y == (x & ~y) | (~x & y) for x=%r, y=%r", (x, y))) eq(x ^ y, (x | y) & (~x | ~y), Frm("x ^ y == (x | y) & (~x | ~y) for x=%r, y=%r", (x, y))) def check_bitop_identities_3(self, x, y, z): eq = self.assertEqual eq((x & y) & z, x & (y & z), Frm("(x & y) & z != x & (y & z) for x=%r, y=%r, z=%r", (x, y, z))) eq((x | y) | z, x | (y | z), Frm("(x | y) | z != x | (y | z) for x=%r, y=%r, z=%r", (x, y, z))) eq((x ^ y) ^ z, x ^ (y ^ z), Frm("(x ^ y) ^ z != x ^ (y ^ z) for x=%r, y=%r, z=%r", (x, y, z))) eq(x & (y | z), (x & y) | (x & z), Frm("x & (y | z) != (x & y) | (x & z) for x=%r, y=%r, z=%r", (x, y, z))) eq(x | (y & z), (x | y) & (x | z), Frm("x | (y & z) != (x | y) & (x | z) for x=%r, y=%r, z=%r", (x, y, z))) def test_bitop_identities(self): for x in special: self.check_bitop_identities_1(x) digits = range(1, MAXDIGITS+1) for lenx in digits: x = self.getran(lenx) self.check_bitop_identities_1(x) for leny in digits: y = self.getran(leny) self.check_bitop_identities_2(x, y) self.check_bitop_identities_3(x, y, self.getran((lenx + leny)//2)) def slow_format(self, x, base): digits = [] sign = 0 if x < 0: sign, x = 1, -x while x: x, r = divmod(x, base) digits.append(int(r)) digits.reverse() digits = digits or [0] return '-'[:sign] + \ {2: '0b', 8: '0o', 10: '', 16: '0x'}[base] + \ "".join("0123456789abcdef"[i] for i in digits) def check_format_1(self, x): for base, mapper in (8, oct), (10, repr), (16, hex): got = mapper(x) expected = self.slow_format(x, base) msg = Frm("%s returned %r but expected %r for %r", mapper.__name__, got, expected, x) self.assertEqual(got, expected, msg) self.assertEqual(int(got, 0), x, Frm('int("%s", 0) != %r', got, x)) # str() has to be checked a little differently since there's no # trailing "L" got = str(x) expected = self.slow_format(x, 10) msg = Frm("%s returned %r but expected %r for %r", mapper.__name__, got, expected, x) self.assertEqual(got, expected, msg) def test_format(self): for x in special: self.check_format_1(x) for i in range(10): for lenx in range(1, MAXDIGITS+1): x = self.getran(lenx) self.check_format_1(x) def test_long(self): # Check conversions from string LL = [ ('1' + '0'*20, 10**20), ('1' + '0'*100, 10**100) ] for s, v in LL: for sign in "", "+", "-": for prefix in "", " ", "\t", " \t\t ": ss = prefix + sign + s vv = v if sign == "-" and v is not ValueError: vv = -v try: self.assertEqual(int(ss), vv) except ValueError: pass # trailing L should no longer be accepted... self.assertRaises(ValueError, int, '123L') self.assertRaises(ValueError, int, '123l') self.assertRaises(ValueError, int, '0L') self.assertRaises(ValueError, int, '-37L') self.assertRaises(ValueError, int, '0x32L', 16) self.assertRaises(ValueError, int, '1L', 21) # ... but it's just a normal digit if base >= 22 self.assertEqual(int('1L', 22), 43) # tests with base 0 self.assertEqual(int('000', 0), 0) self.assertEqual(int('0o123', 0), 83) self.assertEqual(int('0x123', 0), 291) self.assertEqual(int('0b100', 0), 4) self.assertEqual(int(' 0O123 ', 0), 83) self.assertEqual(int(' 0X123 ', 0), 291) self.assertEqual(int(' 0B100 ', 0), 4) self.assertEqual(int('0', 0), 0) self.assertEqual(int('+0', 0), 0) self.assertEqual(int('-0', 0), 0) self.assertEqual(int('00', 0), 0) self.assertRaises(ValueError, int, '08', 0) self.assertRaises(ValueError, int, '-012395', 0) # invalid bases invalid_bases = [-909, 2**31-1, 2**31, -2**31, -2**31-1, 2**63-1, 2**63, -2**63, -2**63-1, 2**100, -2**100, ] for base in invalid_bases: self.assertRaises(ValueError, int, '42', base) def test_conversion(self): class JustLong: # test that __long__ no longer used in 3.x def __long__(self): return 42 self.assertRaises(TypeError, int, JustLong()) class LongTrunc: # __long__ should be ignored in 3.x def __long__(self): return 42 def __trunc__(self): return 1729 self.assertEqual(int(LongTrunc()), 1729) def check_float_conversion(self, n): # Check that int -> float conversion behaviour matches # that of the pure Python version above. try: actual = float(n) except OverflowError: actual = 'overflow' try: expected = int_to_float(n) except OverflowError: expected = 'overflow' msg = ("Error in conversion of integer {} to float. " "Got {}, expected {}.".format(n, actual, expected)) self.assertEqual(actual, expected, msg) @support.requires_IEEE_754 def test_float_conversion(self): exact_values = [0, 1, 2, 2**53-3, 2**53-2, 2**53-1, 2**53, 2**53+2, 2**54-4, 2**54-2, 2**54, 2**54+4] for x in exact_values: self.assertEqual(float(x), x) self.assertEqual(float(-x), -x) # test round-half-even for x, y in [(1, 0), (2, 2), (3, 4), (4, 4), (5, 4), (6, 6), (7, 8)]: for p in range(15): self.assertEqual(int(float(2**p*(2**53+x))), 2**p*(2**53+y)) for x, y in [(0, 0), (1, 0), (2, 0), (3, 4), (4, 4), (5, 4), (6, 8), (7, 8), (8, 8), (9, 8), (10, 8), (11, 12), (12, 12), (13, 12), (14, 16), (15, 16)]: for p in range(15): self.assertEqual(int(float(2**p*(2**54+x))), 2**p*(2**54+y)) # behaviour near extremes of floating-point range int_dbl_max = int(DBL_MAX) top_power = 2**DBL_MAX_EXP halfway = (int_dbl_max + top_power)//2 self.assertEqual(float(int_dbl_max), DBL_MAX) self.assertEqual(float(int_dbl_max+1), DBL_MAX) self.assertEqual(float(halfway-1), DBL_MAX) self.assertRaises(OverflowError, float, halfway) self.assertEqual(float(1-halfway), -DBL_MAX) self.assertRaises(OverflowError, float, -halfway) self.assertRaises(OverflowError, float, top_power-1) self.assertRaises(OverflowError, float, top_power) self.assertRaises(OverflowError, float, top_power+1) self.assertRaises(OverflowError, float, 2*top_power-1) self.assertRaises(OverflowError, float, 2*top_power) self.assertRaises(OverflowError, float, top_power*top_power) for p in range(100): x = 2**p * (2**53 + 1) + 1 y = 2**p * (2**53 + 2) self.assertEqual(int(float(x)), y) x = 2**p * (2**53 + 1) y = 2**p * 2**53 self.assertEqual(int(float(x)), y) # Compare builtin float conversion with pure Python int_to_float # function above. test_values = [ int_dbl_max-1, int_dbl_max, int_dbl_max+1, halfway-1, halfway, halfway + 1, top_power-1, top_power, top_power+1, 2*top_power-1, 2*top_power, top_power*top_power, ] test_values.extend(exact_values) for p in range(-4, 8): for x in range(-128, 128): test_values.append(2**(p+53) + x) for value in test_values: self.check_float_conversion(value) self.check_float_conversion(-value) def test_float_overflow(self): for x in -2.0, -1.0, 0.0, 1.0, 2.0: self.assertEqual(float(int(x)), x) shuge = '12345' * 120 huge = 1 << 30000 mhuge = -huge namespace = {'huge': huge, 'mhuge': mhuge, 'shuge': shuge, 'math': math} for test in ["float(huge)", "float(mhuge)", "complex(huge)", "complex(mhuge)", "complex(huge, 1)", "complex(mhuge, 1)", "complex(1, huge)", "complex(1, mhuge)", "1. + huge", "huge + 1.", "1. + mhuge", "mhuge + 1.", "1. - huge", "huge - 1.", "1. - mhuge", "mhuge - 1.", "1. * huge", "huge * 1.", "1. * mhuge", "mhuge * 1.", "1. // huge", "huge // 1.", "1. // mhuge", "mhuge // 1.", "1. / huge", "huge / 1.", "1. / mhuge", "mhuge / 1.", "1. ** huge", "huge ** 1.", "1. ** mhuge", "mhuge ** 1.", "math.sin(huge)", "math.sin(mhuge)", "math.sqrt(huge)", "math.sqrt(mhuge)", # should do better # math.floor() of an int returns an int now ##"math.floor(huge)", "math.floor(mhuge)", ]: self.assertRaises(OverflowError, eval, test, namespace) # XXX Perhaps float(shuge) can raise OverflowError on some box? # The comparison should not. self.assertNotEqual(float(shuge), int(shuge), "float(shuge) should not equal int(shuge)") def test_logs(self): LOG10E = math.log10(math.e) for exp in list(range(10)) + [100, 1000, 10000]: value = 10 ** exp log10 = math.log10(value) self.assertAlmostEqual(log10, exp) # log10(value) == exp, so log(value) == log10(value)/log10(e) == # exp/LOG10E expected = exp / LOG10E log = math.log(value) self.assertAlmostEqual(log, expected) for bad in -(1 << 10000), -2, 0: self.assertRaises(ValueError, math.log, bad) self.assertRaises(ValueError, math.log10, bad) def test_mixed_compares(self): eq = self.assertEqual # We're mostly concerned with that mixing floats and longs does the # right stuff, even when longs are too large to fit in a float. # The safest way to check the results is to use an entirely different # method, which we do here via a skeletal rational class (which # represents all Python ints, longs and floats exactly). class Rat: def __init__(self, value): if isinstance(value, int): self.n = value self.d = 1 elif isinstance(value, float): # Convert to exact rational equivalent. f, e = math.frexp(abs(value)) assert f == 0 or 0.5 <= f < 1.0 # |value| = f * 2**e exactly # Suck up CHUNK bits at a time; 28 is enough so that we suck # up all bits in 2 iterations for all known binary double- # precision formats, and small enough to fit in an int. CHUNK = 28 top = 0 # invariant: |value| = (top + f) * 2**e exactly while f: f = math.ldexp(f, CHUNK) digit = int(f) assert digit >> CHUNK == 0 top = (top << CHUNK) | digit f -= digit assert 0.0 <= f < 1.0 e -= CHUNK # Now |value| = top * 2**e exactly. if e >= 0: n = top << e d = 1 else: n = top d = 1 << -e if value < 0: n = -n self.n = n self.d = d assert float(n) / float(d) == value else: raise TypeError("can't deal with %r" % value) def _cmp__(self, other): if not isinstance(other, Rat): other = Rat(other) x, y = self.n * other.d, self.d * other.n return (x > y) - (x < y) def __eq__(self, other): return self._cmp__(other) == 0 def __ne__(self, other): return self._cmp__(other) != 0 def __ge__(self, other): return self._cmp__(other) >= 0 def __gt__(self, other): return self._cmp__(other) > 0 def __le__(self, other): return self._cmp__(other) <= 0 def __lt__(self, other): return self._cmp__(other) < 0 cases = [0, 0.001, 0.99, 1.0, 1.5, 1e20, 1e200] # 2**48 is an important boundary in the internals. 2**53 is an # important boundary for IEEE double precision. for t in 2.0**48, 2.0**50, 2.0**53: cases.extend([t - 1.0, t - 0.3, t, t + 0.3, t + 1.0, int(t-1), int(t), int(t+1)]) cases.extend([0, 1, 2, sys.maxsize, float(sys.maxsize)]) # 1 << 20000 should exceed all double formats. int(1e200) is to # check that we get equality with 1e200 above. t = int(1e200) cases.extend([0, 1, 2, 1 << 20000, t-1, t, t+1]) cases.extend([-x for x in cases]) for x in cases: Rx = Rat(x) for y in cases: Ry = Rat(y) Rcmp = (Rx > Ry) - (Rx < Ry) xycmp = (x > y) - (x < y) eq(Rcmp, xycmp, Frm("%r %r %d %d", x, y, Rcmp, xycmp)) eq(x == y, Rcmp == 0, Frm("%r == %r %d", x, y, Rcmp)) eq(x != y, Rcmp != 0, Frm("%r != %r %d", x, y, Rcmp)) eq(x < y, Rcmp < 0, Frm("%r < %r %d", x, y, Rcmp)) eq(x <= y, Rcmp <= 0, Frm("%r <= %r %d", x, y, Rcmp)) eq(x > y, Rcmp > 0, Frm("%r > %r %d", x, y, Rcmp)) eq(x >= y, Rcmp >= 0, Frm("%r >= %r %d", x, y, Rcmp)) def test__format__(self): self.assertEqual(format(123456789, 'd'), '123456789') self.assertEqual(format(123456789, 'd'), '123456789') # sign and aligning are interdependent self.assertEqual(format(1, "-"), '1') self.assertEqual(format(-1, "-"), '-1') self.assertEqual(format(1, "-3"), ' 1') self.assertEqual(format(-1, "-3"), ' -1') self.assertEqual(format(1, "+3"), ' +1') self.assertEqual(format(-1, "+3"), ' -1') self.assertEqual(format(1, " 3"), ' 1') self.assertEqual(format(-1, " 3"), ' -1') self.assertEqual(format(1, " "), ' 1') self.assertEqual(format(-1, " "), '-1') # hex self.assertEqual(format(3, "x"), "3") self.assertEqual(format(3, "X"), "3") self.assertEqual(format(1234, "x"), "4d2") self.assertEqual(format(-1234, "x"), "-4d2") self.assertEqual(format(1234, "8x"), " 4d2") self.assertEqual(format(-1234, "8x"), " -4d2") self.assertEqual(format(1234, "x"), "4d2") self.assertEqual(format(-1234, "x"), "-4d2") self.assertEqual(format(-3, "x"), "-3") self.assertEqual(format(-3, "X"), "-3") self.assertEqual(format(int('be', 16), "x"), "be") self.assertEqual(format(int('be', 16), "X"), "BE") self.assertEqual(format(-int('be', 16), "x"), "-be") self.assertEqual(format(-int('be', 16), "X"), "-BE") # octal self.assertEqual(format(3, "b"), "11") self.assertEqual(format(-3, "b"), "-11") self.assertEqual(format(1234, "b"), "10011010010") self.assertEqual(format(-1234, "b"), "-10011010010") self.assertEqual(format(1234, "-b"), "10011010010") self.assertEqual(format(-1234, "-b"), "-10011010010") self.assertEqual(format(1234, " b"), " 10011010010") self.assertEqual(format(-1234, " b"), "-10011010010") self.assertEqual(format(1234, "+b"), "+10011010010") self.assertEqual(format(-1234, "+b"), "-10011010010") # make sure these are errors self.assertRaises(ValueError, format, 3, "1.3") # precision disallowed self.assertRaises(ValueError, format, 3, "+c") # sign not allowed # with 'c' # ensure that only int and float type specifiers work for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] + [chr(x) for x in range(ord('A'), ord('Z')+1)]): if not format_spec in 'bcdoxXeEfFgGn%': self.assertRaises(ValueError, format, 0, format_spec) self.assertRaises(ValueError, format, 1, format_spec) self.assertRaises(ValueError, format, -1, format_spec) self.assertRaises(ValueError, format, 2**100, format_spec) self.assertRaises(ValueError, format, -(2**100), format_spec) # ensure that float type specifiers work; format converts # the int to a float for format_spec in 'eEfFgG%': for value in [0, 1, -1, 100, -100, 1234567890, -1234567890]: self.assertEqual(format(value, format_spec), format(float(value), format_spec)) def test_nan_inf(self): self.assertRaises(OverflowError, int, float('inf')) self.assertRaises(OverflowError, int, float('-inf')) self.assertRaises(ValueError, int, float('nan')) def test_true_division(self): huge = 1 << 40000 mhuge = -huge self.assertEqual(huge / huge, 1.0) self.assertEqual(mhuge / mhuge, 1.0) self.assertEqual(huge / mhuge, -1.0) self.assertEqual(mhuge / huge, -1.0) self.assertEqual(1 / huge, 0.0) self.assertEqual(1 / huge, 0.0) self.assertEqual(1 / mhuge, 0.0) self.assertEqual(1 / mhuge, 0.0) self.assertEqual((666 * huge + (huge >> 1)) / huge, 666.5) self.assertEqual((666 * mhuge + (mhuge >> 1)) / mhuge, 666.5) self.assertEqual((666 * huge + (huge >> 1)) / mhuge, -666.5) self.assertEqual((666 * mhuge + (mhuge >> 1)) / huge, -666.5) self.assertEqual(huge / (huge << 1), 0.5) self.assertEqual((1000000 * huge) / huge, 1000000) namespace = {'huge': huge, 'mhuge': mhuge} for overflow in ["float(huge)", "float(mhuge)", "huge / 1", "huge / 2", "huge / -1", "huge / -2", "mhuge / 100", "mhuge / 200"]: self.assertRaises(OverflowError, eval, overflow, namespace) for underflow in ["1 / huge", "2 / huge", "-1 / huge", "-2 / huge", "100 / mhuge", "200 / mhuge"]: result = eval(underflow, namespace) self.assertEqual(result, 0.0, "expected underflow to 0 from %r" % underflow) for zero in ["huge / 0", "mhuge / 0"]: self.assertRaises(ZeroDivisionError, eval, zero, namespace) def check_truediv(self, a, b, skip_small=True): """Verify that the result of a/b is correctly rounded, by comparing it with a pure Python implementation of correctly rounded division. b should be nonzero.""" # skip check for small a and b: in this case, the current # implementation converts the arguments to float directly and # then applies a float division. This can give doubly-rounded # results on x87-using machines (particularly 32-bit Linux). if skip_small and max(abs(a), abs(b)) < 2**DBL_MANT_DIG: return try: # use repr so that we can distinguish between -0.0 and 0.0 expected = repr(truediv(a, b)) except OverflowError: expected = 'overflow' except ZeroDivisionError: expected = 'zerodivision' try: got = repr(a / b) except OverflowError: got = 'overflow' except ZeroDivisionError: got = 'zerodivision' self.assertEqual(expected, got, "Incorrectly rounded division {}/{}: " "expected {}, got {}".format(a, b, expected, got)) @support.requires_IEEE_754 def test_correctly_rounded_true_division(self): # more stringent tests than those above, checking that the # result of true division of ints is always correctly rounded. # This test should probably be considered CPython-specific. # Exercise all the code paths not involving Gb-sized ints. # ... divisions involving zero self.check_truediv(123, 0) self.check_truediv(-456, 0) self.check_truediv(0, 3) self.check_truediv(0, -3) self.check_truediv(0, 0) # ... overflow or underflow by large margin self.check_truediv(671 * 12345 * 2**DBL_MAX_EXP, 12345) self.check_truediv(12345, 345678 * 2**(DBL_MANT_DIG - DBL_MIN_EXP)) # ... a much larger or smaller than b self.check_truediv(12345*2**100, 98765) self.check_truediv(12345*2**30, 98765*7**81) # ... a / b near a boundary: one of 1, 2**DBL_MANT_DIG, 2**DBL_MIN_EXP, # 2**DBL_MAX_EXP, 2**(DBL_MIN_EXP-DBL_MANT_DIG) bases = (0, DBL_MANT_DIG, DBL_MIN_EXP, DBL_MAX_EXP, DBL_MIN_EXP - DBL_MANT_DIG) for base in bases: for exp in range(base - 15, base + 15): self.check_truediv(75312*2**max(exp, 0), 69187*2**max(-exp, 0)) self.check_truediv(69187*2**max(exp, 0), 75312*2**max(-exp, 0)) # overflow corner case for m in [1, 2, 7, 17, 12345, 7**100, -1, -2, -5, -23, -67891, -41**50]: for n in range(-10, 10): self.check_truediv(m*DBL_MIN_OVERFLOW + n, m) self.check_truediv(m*DBL_MIN_OVERFLOW + n, -m) # check detection of inexactness in shifting stage for n in range(250): # (2**DBL_MANT_DIG+1)/(2**DBL_MANT_DIG) lies halfway # between two representable floats, and would usually be # rounded down under round-half-to-even. The tiniest of # additions to the numerator should cause it to be rounded # up instead. self.check_truediv((2**DBL_MANT_DIG + 1)*12345*2**200 + 2**n, 2**DBL_MANT_DIG*12345) # 1/2731 is one of the smallest division cases that's subject # to double rounding on IEEE 754 machines working internally with # 64-bit precision. On such machines, the next check would fail, # were it not explicitly skipped in check_truediv. self.check_truediv(1, 2731) # a particularly bad case for the old algorithm: gives an # error of close to 3.5 ulps. self.check_truediv(295147931372582273023, 295147932265116303360) for i in range(1000): self.check_truediv(10**(i+1), 10**i) self.check_truediv(10**i, 10**(i+1)) # test round-half-to-even behaviour, normal result for m in [1, 2, 4, 7, 8, 16, 17, 32, 12345, 7**100, -1, -2, -5, -23, -67891, -41**50]: for n in range(-10, 10): self.check_truediv(2**DBL_MANT_DIG*m + n, m) # test round-half-to-even, subnormal result for n in range(-20, 20): self.check_truediv(n, 2**1076) # largeish random divisions: a/b where |a| <= |b| <= # 2*|a|; |ans| is between 0.5 and 1.0, so error should # always be bounded by 2**-54 with equality possible only # if the least significant bit of q=ans*2**53 is zero. for M in [10**10, 10**100, 10**1000]: for i in range(1000): a = random.randrange(1, M) b = random.randrange(a, 2*a+1) self.check_truediv(a, b) self.check_truediv(-a, b) self.check_truediv(a, -b) self.check_truediv(-a, -b) # and some (genuinely) random tests for _ in range(10000): a_bits = random.randrange(1000) b_bits = random.randrange(1, 1000) x = random.randrange(2**a_bits) y = random.randrange(1, 2**b_bits) self.check_truediv(x, y) self.check_truediv(x, -y) self.check_truediv(-x, y) self.check_truediv(-x, -y) def test_small_ints(self): for i in range(-5, 257): self.assertTrue(i is i + 0) self.assertTrue(i is i * 1) self.assertTrue(i is i - 0) self.assertTrue(i is i // 1) self.assertTrue(i is i & -1) self.assertTrue(i is i | 0) self.assertTrue(i is i ^ 0) self.assertTrue(i is ~~i) self.assertTrue(i is i**1) self.assertTrue(i is int(str(i))) self.assertTrue(i is i<<2>>2, str(i)) # corner cases i = 1 << 70 self.assertTrue(i - i is 0) self.assertTrue(0 * i is 0) def test_bit_length(self): tiny = 1e-10 for x in range(-65000, 65000): k = x.bit_length() # Check equivalence with Python version self.assertEqual(k, len(bin(x).lstrip('-0b'))) # Behaviour as specified in the docs if x != 0: self.assertTrue(2**(k-1) <= abs(x) < 2**k) else: self.assertEqual(k, 0) # Alternative definition: x.bit_length() == 1 + floor(log_2(x)) if x != 0: # When x is an exact power of 2, numeric errors can # cause floor(log(x)/log(2)) to be one too small; for # small x this can be fixed by adding a small quantity # to the quotient before taking the floor. self.assertEqual(k, 1 + math.floor( math.log(abs(x))/math.log(2) + tiny)) self.assertEqual((0).bit_length(), 0) self.assertEqual((1).bit_length(), 1) self.assertEqual((-1).bit_length(), 1) self.assertEqual((2).bit_length(), 2) self.assertEqual((-2).bit_length(), 2) for i in [2, 3, 15, 16, 17, 31, 32, 33, 63, 64, 234]: a = 2**i self.assertEqual((a-1).bit_length(), i) self.assertEqual((1-a).bit_length(), i) self.assertEqual((a).bit_length(), i+1) self.assertEqual((-a).bit_length(), i+1) self.assertEqual((a+1).bit_length(), i+1) self.assertEqual((-a-1).bit_length(), i+1) def test_round(self): # check round-half-even algorithm. For round to nearest ten; # rounding map is invariant under adding multiples of 20 test_dict = {0:0, 1:0, 2:0, 3:0, 4:0, 5:0, 6:10, 7:10, 8:10, 9:10, 10:10, 11:10, 12:10, 13:10, 14:10, 15:20, 16:20, 17:20, 18:20, 19:20} for offset in range(-520, 520, 20): for k, v in test_dict.items(): got = round(k+offset, -1) expected = v+offset self.assertEqual(got, expected) self.assertTrue(type(got) is int) # larger second argument self.assertEqual(round(-150, -2), -200) self.assertEqual(round(-149, -2), -100) self.assertEqual(round(-51, -2), -100) self.assertEqual(round(-50, -2), 0) self.assertEqual(round(-49, -2), 0) self.assertEqual(round(-1, -2), 0) self.assertEqual(round(0, -2), 0) self.assertEqual(round(1, -2), 0) self.assertEqual(round(49, -2), 0) self.assertEqual(round(50, -2), 0) self.assertEqual(round(51, -2), 100) self.assertEqual(round(149, -2), 100) self.assertEqual(round(150, -2), 200) self.assertEqual(round(250, -2), 200) self.assertEqual(round(251, -2), 300) self.assertEqual(round(172500, -3), 172000) self.assertEqual(round(173500, -3), 174000) self.assertEqual(round(31415926535, -1), 31415926540) self.assertEqual(round(31415926535, -2), 31415926500) self.assertEqual(round(31415926535, -3), 31415927000) self.assertEqual(round(31415926535, -4), 31415930000) self.assertEqual(round(31415926535, -5), 31415900000) self.assertEqual(round(31415926535, -6), 31416000000) self.assertEqual(round(31415926535, -7), 31420000000) self.assertEqual(round(31415926535, -8), 31400000000) self.assertEqual(round(31415926535, -9), 31000000000) self.assertEqual(round(31415926535, -10), 30000000000) self.assertEqual(round(31415926535, -11), 0) self.assertEqual(round(31415926535, -12), 0) self.assertEqual(round(31415926535, -999), 0) # should get correct results even for huge inputs for k in range(10, 100): got = round(10**k + 324678, -3) expect = 10**k + 325000 self.assertEqual(got, expect) self.assertTrue(type(got) is int) # nonnegative second argument: round(x, n) should just return x for n in range(5): for i in range(100): x = random.randrange(-10000, 10000) got = round(x, n) self.assertEqual(got, x) self.assertTrue(type(got) is int) for huge_n in 2**31-1, 2**31, 2**63-1, 2**63, 2**100, 10**100: self.assertEqual(round(8979323, huge_n), 8979323) # omitted second argument for i in range(100): x = random.randrange(-10000, 10000) got = round(x) self.assertEqual(got, x) self.assertTrue(type(got) is int) # bad second argument bad_exponents = ('brian', 2.0, 0j, None) for e in bad_exponents: self.assertRaises(TypeError, round, 3, e) def test_to_bytes(self): def check(tests, byteorder, signed=False): for test, expected in tests.items(): try: self.assertEqual( test.to_bytes(len(expected), byteorder, signed=signed), expected) except Exception as err: raise AssertionError( "failed to convert {0} with byteorder={1} and signed={2}" .format(test, byteorder, signed)) from err # Convert integers to signed big-endian byte arrays. tests1 = { 0: b'\x00', 1: b'\x01', -1: b'\xff', -127: b'\x81', -128: b'\x80', -129: b'\xff\x7f', 127: b'\x7f', 129: b'\x00\x81', -255: b'\xff\x01', -256: b'\xff\x00', 255: b'\x00\xff', 256: b'\x01\x00', 32767: b'\x7f\xff', -32768: b'\xff\x80\x00', 65535: b'\x00\xff\xff', -65536: b'\xff\x00\x00', -8388608: b'\x80\x00\x00' } check(tests1, 'big', signed=True) # Convert integers to signed little-endian byte arrays. tests2 = { 0: b'\x00', 1: b'\x01', -1: b'\xff', -127: b'\x81', -128: b'\x80', -129: b'\x7f\xff', 127: b'\x7f', 129: b'\x81\x00', -255: b'\x01\xff', -256: b'\x00\xff', 255: b'\xff\x00', 256: b'\x00\x01', 32767: b'\xff\x7f', -32768: b'\x00\x80', 65535: b'\xff\xff\x00', -65536: b'\x00\x00\xff', -8388608: b'\x00\x00\x80' } check(tests2, 'little', signed=True) # Convert integers to unsigned big-endian byte arrays. tests3 = { 0: b'\x00', 1: b'\x01', 127: b'\x7f', 128: b'\x80', 255: b'\xff', 256: b'\x01\x00', 32767: b'\x7f\xff', 32768: b'\x80\x00', 65535: b'\xff\xff', 65536: b'\x01\x00\x00' } check(tests3, 'big', signed=False) # Convert integers to unsigned little-endian byte arrays. tests4 = { 0: b'\x00', 1: b'\x01', 127: b'\x7f', 128: b'\x80', 255: b'\xff', 256: b'\x00\x01', 32767: b'\xff\x7f', 32768: b'\x00\x80', 65535: b'\xff\xff', 65536: b'\x00\x00\x01' } check(tests4, 'little', signed=False) self.assertRaises(OverflowError, (256).to_bytes, 1, 'big', signed=False) self.assertRaises(OverflowError, (256).to_bytes, 1, 'big', signed=True) self.assertRaises(OverflowError, (256).to_bytes, 1, 'little', signed=False) self.assertRaises(OverflowError, (256).to_bytes, 1, 'little', signed=True) self.assertRaises(OverflowError, (-1).to_bytes, 2, 'big', signed=False), self.assertRaises(OverflowError, (-1).to_bytes, 2, 'little', signed=False) self.assertEqual((0).to_bytes(0, 'big'), b'') self.assertEqual((1).to_bytes(5, 'big'), b'\x00\x00\x00\x00\x01') self.assertEqual((0).to_bytes(5, 'big'), b'\x00\x00\x00\x00\x00') self.assertEqual((-1).to_bytes(5, 'big', signed=True), b'\xff\xff\xff\xff\xff') self.assertRaises(OverflowError, (1).to_bytes, 0, 'big') def test_from_bytes(self): def check(tests, byteorder, signed=False): for test, expected in tests.items(): try: self.assertEqual( int.from_bytes(test, byteorder, signed=signed), expected) except Exception as err: raise AssertionError( "failed to convert {0} with byteorder={1!r} and signed={2}" .format(test, byteorder, signed)) from err # Convert signed big-endian byte arrays to integers. tests1 = { b'': 0, b'\x00': 0, b'\x00\x00': 0, b'\x01': 1, b'\x00\x01': 1, b'\xff': -1, b'\xff\xff': -1, b'\x81': -127, b'\x80': -128, b'\xff\x7f': -129, b'\x7f': 127, b'\x00\x81': 129, b'\xff\x01': -255, b'\xff\x00': -256, b'\x00\xff': 255, b'\x01\x00': 256, b'\x7f\xff': 32767, b'\x80\x00': -32768, b'\x00\xff\xff': 65535, b'\xff\x00\x00': -65536, b'\x80\x00\x00': -8388608 } check(tests1, 'big', signed=True) # Convert signed little-endian byte arrays to integers. tests2 = { b'': 0, b'\x00': 0, b'\x00\x00': 0, b'\x01': 1, b'\x00\x01': 256, b'\xff': -1, b'\xff\xff': -1, b'\x81': -127, b'\x80': -128, b'\x7f\xff': -129, b'\x7f': 127, b'\x81\x00': 129, b'\x01\xff': -255, b'\x00\xff': -256, b'\xff\x00': 255, b'\x00\x01': 256, b'\xff\x7f': 32767, b'\x00\x80': -32768, b'\xff\xff\x00': 65535, b'\x00\x00\xff': -65536, b'\x00\x00\x80': -8388608 } check(tests2, 'little', signed=True) # Convert unsigned big-endian byte arrays to integers. tests3 = { b'': 0, b'\x00': 0, b'\x01': 1, b'\x7f': 127, b'\x80': 128, b'\xff': 255, b'\x01\x00': 256, b'\x7f\xff': 32767, b'\x80\x00': 32768, b'\xff\xff': 65535, b'\x01\x00\x00': 65536, } check(tests3, 'big', signed=False) # Convert integers to unsigned little-endian byte arrays. tests4 = { b'': 0, b'\x00': 0, b'\x01': 1, b'\x7f': 127, b'\x80': 128, b'\xff': 255, b'\x00\x01': 256, b'\xff\x7f': 32767, b'\x00\x80': 32768, b'\xff\xff': 65535, b'\x00\x00\x01': 65536, } check(tests4, 'little', signed=False) class myint(int): pass self.assertTrue(type(myint.from_bytes(b'\x00', 'big')) is myint) self.assertEqual(myint.from_bytes(b'\x01', 'big'), 1) self.assertTrue( type(myint.from_bytes(b'\x00', 'big', signed=False)) is myint) self.assertEqual(myint.from_bytes(b'\x01', 'big', signed=False), 1) self.assertTrue(type(myint.from_bytes(b'\x00', 'little')) is myint) self.assertEqual(myint.from_bytes(b'\x01', 'little'), 1) self.assertTrue(type(myint.from_bytes( b'\x00', 'little', signed=False)) is myint) self.assertEqual(myint.from_bytes(b'\x01', 'little', signed=False), 1) self.assertEqual( int.from_bytes([255, 0, 0], 'big', signed=True), -65536) self.assertEqual( int.from_bytes((255, 0, 0), 'big', signed=True), -65536) self.assertEqual(int.from_bytes( bytearray(b'\xff\x00\x00'), 'big', signed=True), -65536) self.assertEqual(int.from_bytes( bytearray(b'\xff\x00\x00'), 'big', signed=True), -65536) self.assertEqual(int.from_bytes( array.array('B', b'\xff\x00\x00'), 'big', signed=True), -65536) self.assertEqual(int.from_bytes( memoryview(b'\xff\x00\x00'), 'big', signed=True), -65536) self.assertRaises(ValueError, int.from_bytes, [256], 'big') self.assertRaises(ValueError, int.from_bytes, [0], 'big\x00') self.assertRaises(ValueError, int.from_bytes, [0], 'little\x00') self.assertRaises(TypeError, int.from_bytes, "", 'big') self.assertRaises(TypeError, int.from_bytes, "\x00", 'big') self.assertRaises(TypeError, int.from_bytes, 0, 'big') self.assertRaises(TypeError, int.from_bytes, 0, 'big', True) self.assertRaises(TypeError, myint.from_bytes, "", 'big') self.assertRaises(TypeError, myint.from_bytes, "\x00", 'big') self.assertRaises(TypeError, myint.from_bytes, 0, 'big') self.assertRaises(TypeError, int.from_bytes, 0, 'big', True) def test_access_to_nonexistent_digit_0(self): # http://bugs.python.org/issue14630: A bug in _PyLong_Copy meant that # ob_digit[0] was being incorrectly accessed for instances of a # subclass of int, with value 0. class Integer(int): def __new__(cls, value=0): self = int.__new__(cls, value) self.foo = 'foo' return self integers = [Integer(0) for i in range(1000)] for n in map(int, integers): self.assertEqual(n, 0) def test_main(): support.run_unittest(LongTest) if __name__ == "__main__": test_main()
py
b417c0d9fd6bda0744470a052bae9619ec0b049f
from alembic import op import sqlalchemy as sa """empty message Revision ID: eaf653f36fc8 Revises: f32ba62f1e77 Create Date: 2017-09-18 18:27:49.542019 """ # revision identifiers, used by Alembic. revision = 'eaf653f36fc8' down_revision = 'f32ba62f1e77' def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column( 'communication_requests', sa.Column( 'lr_uuid', sa.Text(), nullable=False)) op.add_column( 'communication_requests', sa.Column( 'notify_post_qb_start', sa.Text(), nullable=False)) op.add_column( 'communication_requests', sa.Column( 'qb_iteration', sa.Integer(), nullable=True)) op.drop_constraint( u'_communication_request_qb_days', 'communication_requests', type_='unique') op.create_unique_constraint( '_communication_request_qb_days', 'communication_requests', [ 'questionnaire_bank_id', 'notify_post_qb_start', 'qb_iteration']) op.drop_column('communication_requests', 'notify_days_after_event') # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column( 'communication_requests', sa.Column( 'notify_days_after_event', sa.INTEGER(), autoincrement=False, nullable=False)) op.drop_constraint( '_communication_request_qb_days', 'communication_requests', type_='unique') op.create_unique_constraint( u'_communication_request_qb_days', 'communication_requests', [ 'questionnaire_bank_id', 'notify_days_after_event']) op.drop_column('communication_requests', 'qb_iteration') op.drop_column('communication_requests', 'notify_post_qb_start') op.drop_column('communication_requests', 'lr_uuid') # ### end Alembic commands ###
py
b417c128913dcc7e615107f83555a5ec8ab109fe
"""Support for Minut Point binary sensors.""" import logging from homeassistant.components.binary_sensor import DOMAIN, BinarySensorDevice from homeassistant.core import callback from homeassistant.helpers.dispatcher import async_dispatcher_connect from . import MinutPointEntity from .const import DOMAIN as POINT_DOMAIN, POINT_DISCOVERY_NEW, SIGNAL_WEBHOOK _LOGGER = logging.getLogger(__name__) EVENTS = { "battery": ("battery_low", ""), # On means low, Off means normal "button_press": ( # On means the button was pressed, Off means normal "short_button_press", "", ), "cold": ( # On means cold, Off means normal "temperature_low", "temperature_risen_normal", ), "connectivity": ( # On means connected, Off means disconnected "device_online", "device_offline", ), "dry": ( # On means too dry, Off means normal "humidity_low", "humidity_risen_normal", ), "heat": ( # On means hot, Off means normal "temperature_high", "temperature_dropped_normal", ), "moisture": ( # On means wet, Off means dry "humidity_high", "humidity_dropped_normal", ), "sound": ( # On means sound detected, Off means no sound (clear) "avg_sound_high", "sound_level_dropped_normal", ), "tamper": ("tamper", ""), # On means the point was removed or attached } async def async_setup_entry(hass, config_entry, async_add_entities): """Set up a Point's binary sensors based on a config entry.""" async def async_discover_sensor(device_id): """Discover and add a discovered sensor.""" client = hass.data[POINT_DOMAIN][config_entry.entry_id] async_add_entities( ( MinutPointBinarySensor(client, device_id, device_class) for device_class in EVENTS ), True, ) async_dispatcher_connect( hass, POINT_DISCOVERY_NEW.format(DOMAIN, POINT_DOMAIN), async_discover_sensor ) class MinutPointBinarySensor(MinutPointEntity, BinarySensorDevice): """The platform class required by Home Assistant.""" def __init__(self, point_client, device_id, device_class): """Initialize the binary sensor.""" super().__init__(point_client, device_id, device_class) self._async_unsub_hook_dispatcher_connect = None self._events = EVENTS[device_class] self._is_on = None async def async_added_to_hass(self): """Call when entity is added to HOme Assistant.""" await super().async_added_to_hass() self._async_unsub_hook_dispatcher_connect = async_dispatcher_connect( self.hass, SIGNAL_WEBHOOK, self._webhook_event ) async def async_will_remove_from_hass(self): """Disconnect dispatcher listener when removed.""" await super().async_will_remove_from_hass() if self._async_unsub_hook_dispatcher_connect: self._async_unsub_hook_dispatcher_connect() async def _update_callback(self): """Update the value of the sensor.""" if not self.is_updated: return if self._events[0] in self.device.ongoing_events: self._is_on = True else: self._is_on = None self.async_write_ha_state() @callback def _webhook_event(self, data, webhook): """Process new event from the webhook.""" if self.device.webhook != webhook: return _type = data.get("event", {}).get("type") _device_id = data.get("event", {}).get("device_id") if _type not in self._events or _device_id != self.device.device_id: return _LOGGER.debug("Received webhook: %s", _type) if _type == self._events[0]: self._is_on = True if _type == self._events[1]: self._is_on = None self.async_write_ha_state() @property def is_on(self): """Return the state of the binary sensor.""" if self.device_class == "connectivity": # connectivity is the other way around. return not self._is_on return self._is_on
py
b417c1f004008d3013b81820ca67641d2fe29ded
class Solution1: """ @param A: a list of integers @param K: a integer @return: return two integers """ def kthSmallestPrimeFraction(self, A, K): # write your code here from fractions import Fraction def under(x): count = res = left = 0 for right in range(1, len(A)): while A[left] < x * A[right]: left += 1 count += left if left > 0: res = max(res, Fraction(A[left - 1], A[right])) return count, res low, high = 0.0, 1.0 while high - low > 1e-8: mid = (low + high) / 2 count, res = under(mid) if count < K: low = mid else: ans = res high = mid return ans.numerator, ans.denominator class Solution2: """ @param A: a list of integers @param K: a integer @return: return two integers """ def kthSmallestPrimeFraction(self, A, K): # write your code here import heapq pq = [(A[0] / A[i], 0, i) for i in range(len(A) - 1, 0, -1)] for _ in range(K - 1): frac, p, q = heapq.heappop(pq) p += 1 if p < q: heapq.heappush(pq, (A[p] / A[q], p, q)) return A[pq[0][1]], A[pq[0][2]]
py
b417c2157275324e7d55a3df3796781de8c93c2b
import matplotlib.pyplot as plt import numpy as np mu = 100 sigma = 15 x = mu + sigma * np.random.randn(200) num_bins = 25 plt.figure(figsize=(9, 6), dpi=100) n, bins, patches = plt.hist(x, num_bins, color='w', edgecolor='k', hatch=r'ooo', density=1, label='频率') y = (1 / (np.sqrt(2 * np.pi) * sigma)) * np.exp(-0.5 * (1 / sigma * (bins - mu)) ** 2) plt.plot(bins, y, '--', label='概率密度函数') plt.rcParams['font.sans-serif'] = ['SimHei'] # 支持中文 plt.rcParams['axes.unicode_minus'] = False # 正确显示负号 plt.xlabel('聪明度') plt.ylabel('概率密度') plt.title('IQ直方图: $\mu=100$,$\sigma=15$') plt.legend() plt.show()
py
b417c367dcfc2507b78d7baeae950f4ccfedbb09
"""Runtime helpers""" # pylint: disable=invalid-name,too-many-arguments,too-many-instance-attributes import os from os.path import join import numpy as np def convert_arg(arg): """Convert string to type""" # pylint: disable=broad-except if arg.lower() == 'none': arg = None elif arg.lower() == 'false': arg = False elif arg.lower() == 'true': arg = True elif '.' in arg: try: arg = float(arg) except Exception: pass else: try: arg = int(arg) except Exception: pass return arg def build_kwargs(args): """Build a kwargs dict from a list of key-value pairs""" kwargs = {} if not args: return kwargs assert len(args) % 2 == 0, "argument list %r does not appear to have key, value pairs" % args while args: k = args.pop(0) v = args.pop(0) if ':' in v: v = tuple(convert_arg(a) for a in v.split(':')) else: v = convert_arg(v) kwargs[str(k)] = v return kwargs def compute_ncorrect(p, y): """Accuracy over a tensor of predictions""" _, p = p.max(1) correct = (p == y).sum().item() return correct def compute_auc(x): """Compute AUC (composite trapezoidal rule)""" T = len(x) v = 0 for i in range(1, T): v += ((x[i] - x[i-1]) / 2 + x[i-1]) / T return v def unlink(path): """Unlink logfiles""" for f in os.listdir(path): f = os.path.join(path, f) if f.endswith('.log'): os.unlink(f) ############################################################################### def write(step, meta_loss, loss, accuracy, losses, accuracies, f): """Write results data to file""" lstr = "" for l in losses: lstr += "{:f};".format(l) astr = "" for a in accuracies: astr += "{:f};".format(a) msg = "{:d},{:f},{:f},{:f},{:s},{:s}\n".format( step, meta_loss, loss, accuracy, lstr, astr) with open(f, 'a') as fo: fo.write(msg) def log_status(results, idx, time): """Print status""" #pylint: disable=unbalanced-tuple-unpacking,too-many-star-expressions print("[{:9s}] time:{:3.3f} " "train: outer={:0.4f} inner={:0.4f} acc={:2.2f} " "val: outer={:0.4f} inner={:0.4f} acc={:2.2f}".format( str(idx), time, results.train_meta_loss, results.train_loss, results.train_acc, results.val_meta_loss, results.val_loss, results.val_acc) ) def write_train_res(results, step, log_dir): """Write results from a meta-train step to file""" write(step, results.train_meta_loss, results.train_loss, results.train_acc, results.train_losses, results.train_accs, join(log_dir, 'results_train_train.log')) write(step, results.val_meta_loss, results.val_loss, results.val_acc, results.val_losses, results.val_accs, join(log_dir, 'results_train_val.log')) def write_val_res(results, step, case, log_dir): """Write task results data to file""" for task_id, res in enumerate(results): write(step, res.train_meta_loss, res.train_loss, res.train_acc, res.train_losses, res.train_accs, join(log_dir, 'results_{}_{}_train.log'.format(task_id, case))) write(step, res.val_meta_loss, res.val_loss, res.val_acc, res.val_losses, res.val_accs, join(log_dir, 'results_{}_{}_val.log'.format(task_id, case))) ############################################################################### class Res: """Results container Attributes: losses (list): list of losses over batch iterator accs (list): list of accs over batch iterator meta_loss (float): auc over losses loss (float): mean loss over losses. Call ``aggregate`` to compute. acc (float): mean acc over accs. Call ``aggregate`` to compute. """ def __init__(self): self.losses = [] self.accs = [] self.ncorrects = [] self.nsamples = [] self.meta_loss = 0 self.loss = 0 self.acc = 0 def log(self, loss, pred, target): """Log loss and accuracies""" nsamples = target.size(0) ncorr = compute_ncorrect(pred.data, target.data) accuracy = ncorr / target.size(0) self.losses.append(loss) self.ncorrects.append(ncorr) self.nsamples.append(nsamples) self.accs.append(accuracy) def aggregate(self): """Compute aggregate statistics""" self.accs = np.array(self.accs) self.losses = np.array(self.losses) self.nsamples = np.array(self.nsamples) self.ncorrects = np.array(self.ncorrects) self.loss = self.losses.mean() self.meta_loss = compute_auc(self.losses) self.acc = self.ncorrects.sum() / self.nsamples.sum() class AggRes: """Results aggregation container Aggregates results over a mini-batch of tasks """ def __init__(self, results): self.train_res, self.val_res = zip(*results) self.aggregate_train() self.aggregate_val() def aggregate_train(self): """Aggregate train results""" (self.train_meta_loss, self.train_loss, self.train_acc, self.train_losses, self.train_accs) = self.aggregate(self.train_res) def aggregate_val(self): """Aggregate val results""" (self.val_meta_loss, self.val_loss, self.val_acc, self.val_losses, self.val_accs) = self.aggregate(self.val_res) @staticmethod def aggregate(results): """Aggregate losses and accs across Res instances""" agg_losses = np.stack([res.losses for res in results], axis=1) agg_ncorrects = np.stack([res.ncorrects for res in results], axis=1) agg_nsamples = np.stack([res.nsamples for res in results], axis=1) mean_loss = agg_losses.mean() mean_losses = agg_losses.mean(axis=1) mean_meta_loss = compute_auc(mean_losses) mean_acc = agg_ncorrects.sum() / agg_nsamples.sum() mean_accs = agg_ncorrects.sum(axis=1) / agg_nsamples.sum(axis=1) return mean_meta_loss, mean_loss, mean_acc, mean_losses, mean_accs def consolidate(agg_res): """Merge a list of agg_res into one agg_res""" results = [sum((r.train_res, r.val_res), ()) for r in agg_res] return AggRes(results)
py
b417c5209fac59713df6826979d3c224fbcafec7
#!/usr/bin/env python import json import unittest import app import app_config class IndexTestCase(unittest.TestCase): """ Test the index page. """ def setUp(self): app.app.config['TESTING'] = True self.client = app.app.test_client() def test_index_exists(self): response = self.client.get('/') assert app_config.PROJECT_SLUG in response.data class AppConfigTestCase(unittest.TestCase): """ Testing dynamic conversion of Python app_config into Javascript. """ def setUp(self): app.app.config['TESTING'] = True self.client = app.app.test_client() def parse_data(self, response): """ Trim leading variable declaration and load JSON data. """ return json.loads(response.data[20:]) def test_app_config_staging(self): response = self.client.get('/js/app_config.js') data = self.parse_data(response) assert data['DEBUG'] == True def test_app_config_production(self): app_config.configure_targets('production') response = self.client.get('/js/app_config.js') data = self.parse_data(response) assert data['DEBUG'] == False app_config.configure_targets('staging') if __name__ == '__main__': unittest.main()
py
b417c5e9cbc2591207e26db8ea642d256fbcea55
"""This module exports the PythonLinter subclass of Linter.""" from functools import lru_cache import logging import os import re import sublime from .. import linter, util logger = logging.getLogger(__name__) class PythonLinter(linter.Linter): """ This Linter subclass provides Python-specific functionality. Linters that check python should inherit from this class. By doing so, they automatically get the following features: - Automatic discovery of virtual environments using `pipenv` - Support for a "python" setting. - Support for a "executable" setting. """ def context_sensitive_executable_path(self, cmd): """Try to find an executable for a given cmd.""" # The default implementation will look for a user defined `executable` # setting. success, executable = super().context_sensitive_executable_path(cmd) if success: return success, executable settings = self.get_view_settings() # `python` can be number or a string. If it is a string it should # point to a python environment, NOT a python binary. python = settings.get('python', None) logger.info( "{}: wanted python is '{}'".format(self.name, python) ) cmd_name = cmd[0] if isinstance(cmd, (list, tuple)) else cmd if python: python = str(python) if VERSION_RE.match(python): python_bin = find_python_version(python) if python_bin is None: logger.error( "{} deactivated, cannot locate '{}' " "for given python '{}'" .format(self.name, cmd_name, python) ) # Do not fallback, user specified something we didn't find return True, None logger.info( "{}: Using '{}' for given python '{}'" .format(self.name, python_bin, python) ) return True, [python_bin, '-m', cmd_name] else: if not os.path.exists(python): logger.error( "{} deactivated, cannot locate '{}'" .format(self.name, python) ) # Do not fallback, user specified something we didn't find return True, None return True, [python, '-m', cmd_name] # If we're here the user didn't specify anything. This is the default # experience. So we kick in some 'magic' cwd = self.get_working_dir(settings) executable = ask_pipenv(cmd_name, cwd) if executable: logger.info( "{}: Using {} according to 'pipenv'" .format(self.name, executable) ) return True, executable # Should we try a `pyenv which` as well? Problem: I don't have it, # it's MacOS only. logger.info( "{}: trying to use globally installed {}" .format(self.name, cmd_name) ) # fallback, similiar to a which(cmd) executable = util.which(cmd_name) if executable is None: logger.warning( "cannot locate '{}'. Fill in the 'python' or " "'executable' setting." .format(self.name) ) return True, executable def find_python_version(version): # type: (str) -> str """Return python binaries on PATH matching a specific version.""" requested_version = extract_major_minor_version(version) for python in util.find_executables('python'): python_version = get_python_version(python) if version_fulfills_request(python_version, requested_version): return python return None def find_script_by_python_env(python_env_path, script): """Return full path to a script, given a python environment base dir.""" posix = sublime.platform() in ('osx', 'linux') if posix: full_path = os.path.join(python_env_path, 'bin', script) else: full_path = os.path.join(python_env_path, 'Scripts', script + '.exe') logger.info("trying {}".format(full_path)) if os.path.exists(full_path): return full_path return None def ask_pipenv(linter_name, cwd): """Ask pipenv for a virtual environment and maybe resolve the linter.""" # Some pre-checks bc `pipenv` is super slow if cwd is None: return pipfile = os.path.join(cwd, 'Pipfile') if not os.path.exists(pipfile): return try: venv = ask_pipenv_for_venv(linter_name, cwd) except Exception: return return find_script_by_python_env(venv, linter_name) @lru_cache(maxsize=None) def ask_pipenv_for_venv(linter_name, cwd): cmd = ['pipenv', '--venv'] return util.check_output(cmd, cwd=cwd).strip().split('\n')[-1] VERSION_RE = re.compile(r'(?P<major>\d+)(?:\.(?P<minor>\d+))?') @lru_cache(maxsize=None) def get_python_version(path): """Return a dict with the major/minor version of the python at path.""" try: output = util.check_output([path, '-V']) except Exception: output = '' return extract_major_minor_version(output.split(' ')[-1]) def extract_major_minor_version(version): """Extract and return major and minor versions from a string version.""" match = VERSION_RE.match(version) if match: return {key: int(value) if value is not None else None for key, value in match.groupdict().items()} else: return {'major': None, 'minor': None} def version_fulfills_request(available_version, requested_version): """ Return whether available_version fulfills requested_version. Both are dicts with 'major' and 'minor' items. """ # No requested major version is fulfilled by anything if requested_version['major'] is None: return True # If major version is requested, that at least must match if requested_version['major'] != available_version['major']: return False # Major version matches, if no requested minor version it's a match if requested_version['minor'] is None: return True # If a minor version is requested, the available minor version must be >= return ( available_version['minor'] is not None and available_version['minor'] >= requested_version['minor'] )
py
b417c878d8e1279e4dd13fef48d61ce7e51d850b
# Copyright (c) 2012-2013, 2015 ARM Limited # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Copyright (c) 2005-2007 The Regents of The University of Michigan # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Nathan Binkert # Andreas Hansson from m5.params import * from m5.proxy import * from MemObject import MemObject from Prefetcher import BasePrefetcher from Tags import * class BaseCache(MemObject): type = 'BaseCache' abstract = True cxx_header = "mem/cache/base.hh" size = Param.MemorySize("Capacity") assoc = Param.Unsigned("Associativity") hit_latency = Param.Cycles("Hit latency") response_latency = Param.Cycles("Latency for the return path on a miss"); #AMHM Start write_latency = Param.Cycles("The write latency for this cache") outdir = Param.String("gem5 output directory") address_lookup_latency = Param.Cycles("Approx table to TLB address lookup latency for this cache") sttmram = Param.String("STT-MRAM parameters and config files path") #AMHM End max_miss_count = Param.Counter(0, "Number of misses to handle before calling exit") mshrs = Param.Unsigned("Number of MSHRs (max outstanding requests)") demand_mshr_reserve = Param.Unsigned(1, "MSHRs reserved for demand access") tgts_per_mshr = Param.Unsigned("Max number of accesses per MSHR") write_buffers = Param.Unsigned(8, "Number of write buffers") is_read_only = Param.Bool(False, "Is this cache read only (e.g. inst)") prefetcher = Param.BasePrefetcher(NULL,"Prefetcher attached to cache") prefetch_on_access = Param.Bool(False, "Notify the hardware prefetcher on every access (not just misses)") tags = Param.BaseTags(LRU(), "Tag store (replacement policy)") sequential_access = Param.Bool(False, "Whether to access tags and data sequentially") cpu_side = SlavePort("Upstream port closer to the CPU and/or device") mem_side = MasterPort("Downstream port closer to memory") addr_ranges = VectorParam.AddrRange([AllMemory], "Address range for the CPU-side port (to allow striping)") system = Param.System(Parent.any, "System we belong to") # Enum for cache clusivity, currently mostly inclusive or mostly # exclusive. class Clusivity(Enum): vals = ['mostly_incl', 'mostly_excl'] class Cache(BaseCache): type = 'Cache' cxx_header = 'mem/cache/cache.hh' # Control whether this cache should be mostly inclusive or mostly # exclusive with respect to upstream caches. The behaviour on a # fill is determined accordingly. For a mostly inclusive cache, # blocks are allocated on all fill operations. Thus, L1 caches # should be set as mostly inclusive even if they have no upstream # caches. In the case of a mostly exclusive cache, fills are not # allocating unless they came directly from a non-caching source, # e.g. a table walker. Additionally, on a hit from an upstream # cache a line is dropped for a mostly exclusive cache. clusivity = Param.Clusivity('mostly_incl', "Clusivity with upstream cache") # Determine if this cache sends out writebacks for clean lines, or # simply clean evicts. In cases where a downstream cache is mostly # exclusive with respect to this cache (acting as a victim cache), # the clean writebacks are essential for performance. In general # this should be set to True for anything but the last-level # cache. writeback_clean = Param.Bool(False, "Writeback clean lines")
py
b417c9ca0dd84277ae18b61bfb6fb9cbf569358e
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Libevent(AutotoolsPackage): """The libevent API provides a mechanism to execute a callback function when a specific event occurs on a file descriptor or after a timeout has been reached. Furthermore, libevent also support callbacks due to signals or regular timeouts. """ homepage = "http://libevent.org" url = "https://github.com/libevent/libevent/releases/download/release-2.1.8-stable/libevent-2.1.8-stable.tar.gz" list_url = "http://libevent.org/old-releases.html" version('2.1.8', '965cc5a8bb46ce4199a47e9b2c9e1cae3b137e8356ffdad6d94d3b9069b71dc2') version('2.0.22', '71c2c49f0adadacfdbe6332a372c38cf9c8b7895bb73dabeaa53cdcc1d4e1fa3') version('2.0.21', 'b2405cc9ebf264aa47ff615d9de527a2') version('2.0.20', '94270cdee32c0cd0aa9f4ee6ede27e8e') version('2.0.19', '91111579769f46055b0a438f5cc59572') version('2.0.18', 'aa1ce9bc0dee7b8084f6855765f2c86a') version('2.0.17', 'dad64aaaaff16b5fbec25160c06fee9a') version('2.0.16', '899efcffccdb3d5111419df76e7dc8df') version('2.0.15', '2643abe7ba242df15c08b2cc14ec8759') version('2.0.14', 'cac0f379da35d3b98f83ac16fcfe1df4') version('2.0.13', 'af786b4b3f790c9d3279792edf7867fc') version('2.0.12', '42986228baf95e325778ed328a93e070') variant('openssl', default=True, description="Build with encryption enabled at the libevent level.") # Versions before 2.1 do not build with OpenSSL 1.1 depends_on('openssl@:1.0', when='@:2.0.99+openssl') depends_on('openssl', when='+openssl') def url_for_version(self, version): if version >= Version('2.0.22'): url = "https://github.com/libevent/libevent/releases/download/release-{0}-stable/libevent-{0}-stable.tar.gz" else: url = "https://github.com/downloads/libevent/libevent/libevent-{0}-stable.tar.gz" return url.format(version) def configure_args(self): spec = self.spec configure_args = [] if '+openssl' in spec: configure_args.append('--enable-openssl') else: configure_args.append('--disable-openssl') return configure_args
py
b417cafbf76d7ea65546ff92d28cc7c9c65317ac
import numpy import matplotlib.pyplot as plt from sklearn.datasets import load_iris from sklearn.neighbors import KNeighborsClassifier, BallTree from sklearn.datasets import fetch_openml from sklearn.metrics import f1_score import warnings warnings.simplefilter('ignore') numpy.random.seed(7) #Ответ выходит 0.9725 для этого рпимера и для RandomForestClassifier. Я перебрал все параметры (ты чё пёс, я математик), порог перейти не удалось. #SVC, на котором у ребят всё таки всё получилось, у меня работает больше 4 часов (я так и не дождался). Несмотря на почти лучшее железо и отказ от Юпитера. Я не знаю в чём проблема, эта реализация работает за минуту. # Load data from https://www.openml.org/d/554 X, Y = fetch_openml('mnist_784', return_X_y=True) print(f"shape of X is {X.shape}") test_shuffle = numpy.random.permutation(X.shape[0]) X_test, X_train = X[test_shuffle[:10000]], X[test_shuffle[10000:]] Y_test, Y_train = Y[test_shuffle[:10000]], Y[test_shuffle[10000:]] print(f"train size: {X_train.shape[0]}") print(f"test size: {X_test.shape[0]}") model = KNeighborsClassifier(3, algorithm='kd_tree', n_jobs=-1) model.fit(X_train, Y_train) y_pred = model.predict(X_test) print(f"test score is {f1_score(Y_test, y_pred, average='micro')}")
py
b417cb0ea4d7860472a3f49915be3456332ca881
import numpy as np import pandas as pd from scipy import stats from typing import List, Union from moonstone.utils.pandas.series import SeriesBinning import logging logger = logging.getLogger(__name__) DEFAULT_STATS_TEST = "mann_whitney_u" def _preprocess_groups_comparison( series: pd.Series, group_series: pd.Series, stat_test: str ): groups = list(group_series.unique()) groups.sort() list_of_series = [] new_groups = [] for i in groups: groupi = series[group_series[group_series == i].index].dropna() if groupi.size < 1: logger.warning( f"No observations for group {i} in data. Group dropped." ) elif groupi.size < 20 and stat_test == "mann_whitney_u": logger.warning(f"Less than 20 observations for group {i} in data.") list_of_series += [groupi] new_groups += [i] elif groupi.size < 10 and stat_test == "chi2_contingency": logger.warning( f"Not enough observations (<10) for group {i} in data. Group dropped." ) else: list_of_series += [groupi] new_groups += [i] if len(new_groups) == 0: raise RuntimeError("All groups have been dropped: not enough observations by group.") return new_groups, list_of_series def statistical_test_groups_comparison( series: pd.Series, group_series: pd.Series, stat_test: str, output: str = "dataframe", sym: str = True, **kwargs, ): """ :param output: {'series', 'dataframe'} :param sym: whether generated dataframe (or MultiIndexed series) is symetric or half-full In kwargs, you can pass argument for statistical test, like : :param equal_var: For ttest_ind, set to True if your samples have the same variance and you wish to perform a Student's t-test rather than a Welch's t-test (here default is False) :param alternative: {None, 'two-sided', 'less', 'greater'} For Mann - Whitney-U, you can define the alternative hypothesis. :param bins: For chi2_contingency, you can define bins. """ method = ["mann_whitney_u", "ttest_independence", "chi2_contingency"] if stat_test not in method: logger.warning( "%s not a available mode, set to default (%s)", stat_test, DEFAULT_STATS_TEST, ) stat_test = DEFAULT_STATS_TEST # raise NotImplementedError("Method %s not implemented" % stat_test) # split dataframe by group + warn and/or drop groups not respecting minimum number of observations groups, list_of_series = _preprocess_groups_comparison( series, group_series, stat_test ) # if no bins defined, compute automatically bins that will be used to bin every series of group if stat_test == "chi2_contingency" and "bins" not in kwargs.keys(): kwargs["bins"] = _compute_best_bins_values(list_of_series) if output == "series": dic_df = {} for i in range(len(groups)): for j in range(i + 1, len(groups)): if stat_test == "mann_whitney_u": pval = mann_whitney_u( list_of_series[i], list_of_series[j], **kwargs )[1] elif stat_test == "ttest_independence": pval = ttest_independence( list_of_series[i], list_of_series[j], **kwargs )[1] elif stat_test == "chi2_contingency": pval = chi2_contingency( list_of_series[i], list_of_series[j], **kwargs )[1] dic_df[(groups[i], groups[j])] = pval if sym: dic_df[(groups[j], groups[i])] = pval pvalue_df = pd.Series(dic_df) pvalue_df.index = pd.MultiIndex.from_tuples( pvalue_df.index, names=["Group", "Group"] ) return pvalue_df tab = [[np.nan] * len(groups) for _ in range(len(groups))] for i in range(len(groups)): for j in range(i + 1, len(groups)): if stat_test == "mann_whitney_u": pval = mann_whitney_u(list_of_series[i], list_of_series[j], **kwargs)[1] elif stat_test == "ttest_independence": pval = ttest_independence( list_of_series[i], list_of_series[j], **kwargs )[1] if stat_test == "chi2_contingency": pval = chi2_contingency(list_of_series[i], list_of_series[j], **kwargs )[1] tab[i][j] = pval if sym: tab[j][i] = pval return pd.DataFrame(tab, index=groups, columns=groups) def mann_whitney_u( series1: pd.Series, series2: pd.Series, alternative: str = "two-sided", **kwargs ): # alternative = 'two-sided' is the default but using None gives a warning return stats.mannwhitneyu(series1, series2, alternative=alternative) def ttest_independence( series1: pd.Series, series2: pd.Series, equal_var: bool = False, **kwargs ): # equal_var = False is Welch's t-test return stats.ttest_ind(series1, series2, equal_var=equal_var) def _compute_best_bins_values(list_of_series): max_cat = 99 max = -float("inf") min = float("inf") for series in list_of_series: tmp = int(series.size / 5) # maybe add other criterium to lower the max_cat if tmp < max_cat: max_cat = tmp tmp = series.min() if tmp < min: min = tmp tmp = series.max() if tmp > max: max = tmp nb_cat = max_cat bins = None ind = 0 ind_validated = [] for series in list_of_series: if bins is not None: s_binning = SeriesBinning(series) s_binning.bins_values = bins if ( s_binning.binned_data[s_binning.binned_data >= 5].size == s_binning.binned_data.size ): ind_validated += [ind] ind += 1 continue bins = None nb_cat -= 1 while nb_cat > 1: s_binning = SeriesBinning(series) bins_values = s_binning.compute_homogeneous_bins( min=min, max=max, nb_bins=nb_cat ) s_binning.bins_values = bins_values if ( s_binning.binned_data[s_binning.binned_data >= 5].size == s_binning.binned_data.size ): bins = ( s_binning.bins_values ) # since bins_values has changed, we need ... list_of_series += [ list_of_series[i] for i in ind_validated ] # to check new bins with previous series ind_validated = [ind] break nb_cat -= 1 if nb_cat < 1: raise RuntimeError( "moonstone wasn't able to compute a contingency table of at least 2 x 2 with the data." ) ind += 1 return s_binning.bins_values def chi2_contingency( series1: pd.Series, series2: pd.Series, retbins: bool = False, bins: List[Union[int, float]] = None, **kwargs, ): """ NB : Cells with 0 raise an error in the scipy.stats.chi2_contingency test. Furthermore, they recommand to use the test only if the observed and expected frequencies in each cell are at least 5. :param retbins: Whether to return the bins used to make the Chi2 contingency table """ if series1.size < 10 or series2.size < 10: logger.warning( "Data have less than 10 observations by groups. \ Another statistical test would be more appropriate to compare the 2 groups." ) return (np.nan, np.nan) if bins is None: bins = _compute_best_bins_values([series1, series2]) s1_binning = SeriesBinning(series1) s2_binning = SeriesBinning(series2) s1_binning.bins_values = bins s2_binning.bins_values = bins merged_df = pd.concat( [s1_binning.binned_data, s2_binning.binned_data], axis=1, names=["series1", "series2"], ) merged_df = merged_df.fillna(0) to_return = list(stats.chi2_contingency(merged_df)) to_return += s1_binning.bins_values return tuple(to_return)
py
b417cb24bcf19cea829c73313baa0cbd161360ba
from __future__ import absolute_import from past.builtins import basestring import os import tempfile import stat import time import logging import errno import json import re from tqdm import tqdm from zipfile import ZipFile, BadZipfile import os.path as op import shutil from arcana.utils import JSON_ENCODING from arcana.utils import makedirs from arcana.data import Fileset, Field from arcana.repository.base import Repository from arcana.exceptions import ( ArcanaError, ArcanaUsageError, ArcanaFileFormatError, ArcanaWrongRepositoryError) from arcana.pipeline.provenance import Record from arcana.utils import dir_modtime, get_class_info, parse_value import xnat from .dataset import Dataset logger = logging.getLogger('arcana') special_char_re = re.compile(r'[^a-zA-Z_0-9]') tag_parse_re = re.compile(r'\((\d+),(\d+)\)') RELEVANT_DICOM_TAG_TYPES = set(('UI', 'CS', 'DA', 'TM', 'SH', 'LO', 'PN', 'ST', 'AS')) class XnatRepo(Repository): """ An 'Repository' class for XNAT repositories Parameters ---------- server : str (URI) URI of XNAT server to connect to project_id : str The ID of the project in the XNAT repository cache_dir : str (path) Path to local directory to cache remote data in user : str Username with which to connect to XNAT with password : str Password to connect to the XNAT repository with check_md5 : bool Whether to check the MD5 digest of cached files before using. This checks for updates on the server since the file was cached race_cond_delay : int The amount of time to wait before checking that the required fileset has been downloaded to cache by another process has completed if they are attempting to download the same fileset session_filter : str A regular expression that is used to prefilter the discovered sessions to avoid having to retrieve metadata for them, and potentially speeding up the initialisation of the Analysis. Note that if the processing relies on summary derivatives (i.e. of 'per_visit/subject/analysis' frequency) then the filter should match all sessions in the Analysis's subject_ids and visit_ids. """ type = 'xnat' SUMMARY_NAME = 'ALL' MD5_SUFFIX = '.__md5__.json' DERIVED_FROM_FIELD = '__derived_from__' PROV_SCAN = '__prov__' PROV_RESOURCE = 'PROV' depth = 2 def __init__(self, server, cache_dir, user=None, password=None, check_md5=True, race_cond_delay=30, session_filter=None): super().__init__() if not isinstance(server, basestring): raise ArcanaUsageError( "Invalid server url {}".format(server)) self._server = server self._cache_dir = cache_dir makedirs(self._cache_dir, exist_ok=True) self._user = user self._password = password self._race_cond_delay = race_cond_delay self._check_md5 = check_md5 self._session_filter = session_filter self._login = None def __hash__(self): return (hash(self.server) ^ hash(self.cache_dir) ^ hash(self._race_cond_delay) ^ hash(self._check_md5)) def __repr__(self): return ("{}(server={}, cache_dir={})" .format(type(self).__name__, self.server, self._cache_dir)) def __eq__(self, other): try: return (self.server == other.server and self._cache_dir == other._cache_dir and self.cache_dir == other.cache_dir and self._race_cond_delay == other._race_cond_delay and self._check_md5 == other._check_md5) except AttributeError: return False # For comparison with other types def __getstate__(self): dct = self.__dict__.copy() del dct['_login'] del dct['_connection_depth'] return dct def __setstate__(self, state): self.__dict__.update(state) self._login = None self._connection_depth = 0 @property def prov(self): return { 'type': get_class_info(type(self)), 'server': self.server} @property def login(self): if self._login is None: raise ArcanaError("XNAT repository has been disconnected before " "exiting outer context") return self._login @property def server(self): return self._server @property def cache_dir(self): return self._cache_dir def dataset_cache_dir(self, dataset_name): return op.join(self.cache_dir, dataset_name) @property def check_md5(self): return self._check_md5 @property def session_filter(self): return (re.compile(self._session_filter) if self._session_filter is not None else None) def connect(self): """ Parameters ---------- prev_login : xnat.XNATSession An XNAT login that has been opened in the code that calls the method that calls login. It is wrapped in a NoExitWrapper so the returned connection can be used in a "with" statement in the method. """ sess_kwargs = {} if self._user is not None: sess_kwargs['user'] = self._user if self._password is not None: sess_kwargs['password'] = self._password self._login = xnat.connect(server=self._server, **sess_kwargs) def disconnect(self): self._login.disconnect() self._login = None def dataset(self, name, **kwargs): """ Returns a dataset from the XNAT repository Parameters ---------- name : str The name, path or ID of the dataset within the repository subject_ids : list[str] The list of subjects to include in the dataset visit_ids : list[str] The list of visits to include in the dataset """ return Dataset(name, repository=self, depth=2, **kwargs) def get_fileset(self, fileset): """ Caches a single fileset (if the 'path' attribute is accessed and it has not been previously cached for example Parameters ---------- fileset : Fileset The fileset to cache prev_login : xnat.XNATSession An XNATSession object to use for the connection. A new one is created if one isn't provided Returns ------- primary_path : str The path of the primary file once it has been cached aux_paths : dict[str, str] A dictionary containing a mapping of auxiliary file names to paths """ if fileset.format is None: raise ArcanaUsageError( "Attempting to download {}, which has not been assigned a " "file format (see Fileset.formatted)".format(fileset)) self._check_repository(fileset) with self: # Connect to the XNAT repository if haven't already xsession = self.get_xsession(fileset) xscan = xsession.scans[fileset.name] # Set URI so we can retrieve checksums if required fileset.uri = xscan.uri fileset.id = xscan.id cache_path = self._cache_path(fileset) need_to_download = True if op.exists(cache_path): if self._check_md5: md5_path = cache_path + XnatRepo.MD5_SUFFIX try: with open(md5_path, 'r') as f: cached_checksums = json.load(f) if cached_checksums == fileset.checksums: need_to_download = False except IOError: pass else: need_to_download = False if need_to_download: xresource = xscan.resources[fileset._resource_name] # The path to the directory which the files will be # downloaded to. tmp_dir = cache_path + '.download' try: # Attempt to make tmp download directory. This will # fail if another process (or previous attempt) has # already created it. In that case this process will # wait to see if that download finishes successfully, # and if so use the cached version. os.mkdir(tmp_dir) except OSError as e: if e.errno == errno.EEXIST: # Another process may be concurrently downloading # the same file to the cache. Wait for # 'race_cond_delay' seconds and then check that it # has been completed or assume interrupted and # redownload. self._delayed_download( tmp_dir, xresource, xscan, fileset, xsession.label, cache_path, delay=self._race_cond_delay) else: raise else: self.download_fileset( tmp_dir, xresource, xscan, fileset, xsession.label, cache_path) shutil.rmtree(tmp_dir) if not fileset.format.directory: (primary_path, aux_paths) = fileset.format.assort_files( op.join(cache_path, f) for f in os.listdir(cache_path)) else: primary_path = cache_path aux_paths = None return primary_path, aux_paths def get_field(self, field): self._check_repository(field) with self: xsession = self.get_xsession(field) val = xsession.fields[field.name] val = val.replace('&quot;', '"') val = parse_value(val) return val def put_fileset(self, fileset): if fileset.format is None: raise ArcanaFileFormatError( "Format of {} needs to be set before it is uploaded to {}" .format(fileset, self)) self._check_repository(fileset) # Open XNAT session with self: # Add session for derived scans if not present xsession = self.get_xsession(fileset) cache_path = self._cache_path(fileset) # Make session cache dir cache_path_dir = (op.dirname(cache_path) if fileset.format.directory else cache_path) if os.path.exists(cache_path_dir): shutil.rmtree(cache_path_dir) os.makedirs(cache_path_dir, stat.S_IRWXU | stat.S_IRWXG) if fileset.format.directory: shutil.copytree(fileset.path, cache_path) else: # Copy primary file shutil.copyfile(fileset.path, op.join(cache_path, fileset.fname)) # Copy auxiliaries for sc_fname, sc_path in fileset.aux_file_fnames_and_paths: shutil.copyfile(sc_path, op.join(cache_path, sc_fname)) with open(cache_path + XnatRepo.MD5_SUFFIX, 'w', **JSON_ENCODING) as f: json.dump(fileset.calculate_checksums(), f, indent=2) # Upload to XNAT xscan = self._login.classes.MrScanData( id=fileset.id, type=fileset.basename, parent=xsession) fileset.uri = xscan.uri # Select the first xnat_resource name to use to upload the data to resource_name = fileset.format.resource_names(self.type)[0] try: xresource = xscan.resources[resource_name] except KeyError: pass else: # Delete existing resource # TODO: probably should have check to see if we want to # override it xresource.delete() xresource = xscan.create_resource(resource_name) if fileset.format.directory: for fname in os.listdir(fileset.path): xresource.upload(op.join(fileset.path, fname), fname) else: xresource.upload(fileset.path, fileset.fname) for sc_fname, sc_path in fileset.aux_file_fnames_and_paths: xresource.upload(sc_path, sc_fname) def put_field(self, field): self._check_repository(field) val = field.value if field.array: if field.dtype is str: val = ['"{}"'.format(v) for v in val] val = '[' + ','.join(str(v) for v in val) + ']' if field.dtype is str: val = '"{}"'.format(val) with self: xsession = self.get_xsession(field) xsession.fields[field.name] = val def put_record(self, record, dataset): base_cache_path = self._cache_path( record, name=self.PROV_SCAN, dataset=dataset) if not op.exists(base_cache_path): os.mkdir(base_cache_path) else: if not op.isdir(base_cache_path): raise ArcanaError( "Base provenance cache path ('{}') should be a directory" .format(base_cache_path)) cache_path = op.join(base_cache_path, record.pipeline_name + '.json') record.save(cache_path) # TODO: Should also save digest of prov.json to check to see if it # has been altered remotely xsession = self.get_xsession(record, dataset=dataset) xprov = self._login.classes.MrScanData( id=self.PROV_SCAN, type=self.PROV_SCAN, parent=xsession) # Delete existing provenance if present try: xresource = xprov.resources[record.pipeline_name] except KeyError: pass else: xresource.delete() # FIXME: should reuse the same resource for all provenance jsons xresource = xprov.create_resource(record.pipeline_name) xresource.upload(cache_path, op.basename(cache_path)) def get_checksums(self, fileset): """ Downloads the MD5 digests associated with the files in the file-set. These are saved with the downloaded files in the cache and used to check if the files have been updated on the server Parameters ---------- resource : xnat.ResourceCatalog The xnat resource file_format : FileFormat The format of the fileset to get the checksums for. Used to determine the primary file within the resource and change the corresponding key in the checksums dictionary to '.' to match the way it is generated locally by Arcana. """ if fileset.uri is None: raise ArcanaUsageError( "Can't retrieve checksums as URI has not been set for {}" .format(fileset)) with self: checksums = {r['Name']: r['digest'] for r in self.login.get_json(fileset.uri + '/files')[ 'ResultSet']['Result']} if not fileset.format.directory: # Replace the key corresponding to the primary file with '.' to # match the way that checksums are created by Arcana primary = fileset.format.assort_files(checksums.keys())[0] checksums['.'] = checksums.pop(primary) return checksums def find_data(self, dataset, subject_ids=None, visit_ids=None, **kwargs): """ Find all filesets, fields and provenance records within an XNAT project Parameters ---------- subject_ids : list(str) List of subject IDs with which to filter the tree with. If None all are returned visit_ids : list(str) List of visit IDs with which to filter the tree with. If None all are returned Returns ------- filesets : list[Fileset] All the filesets found in the repository fields : list[Field] All the fields found in the repository records : list[Record] The provenance records found in the repository """ subject_ids = self.convert_subject_ids(subject_ids) # Add derived visit IDs to list of visit ids to filter all_filesets = [] all_fields = [] all_records = [] project_id = dataset.name # Note we prefer the use of raw REST API calls here for performance # reasons over using XnatPy's data structures. with self: # Get map of internal subject IDs to subject labels in project subject_xids_to_labels = { s['ID']: s['label'] for s in self._login.get_json( '/data/projects/{}/subjects'.format(project_id))[ 'ResultSet']['Result']} # Get list of all sessions within project session_xids = [ s['ID'] for s in self._login.get_json( '/data/projects/{}/experiments'.format(project_id))[ 'ResultSet']['Result'] if (self.session_filter is None or self.session_filter.match(s['label']))] for session_xid in tqdm(session_xids, "Scanning sessions in '{}' project" .format(project_id)): session_json = self._login.get_json( '/data/projects/{}/experiments/{}'.format( project_id, session_xid))['items'][0] subject_xid = session_json['data_fields']['subject_ID'] subject_id = subject_xids_to_labels[subject_xid] session_label = session_json['data_fields']['label'] session_uri = ( '/data/archive/projects/{}/subjects/{}/experiments/{}' .format(project_id, subject_xid, session_xid)) # Get field values. We do this first so we can check for the # DERIVED_FROM_FIELD to determine the correct session label and # analysis name field_values = {} try: fields_json = next( c['items'] for c in session_json['children'] if c['field'] == 'fields/field') except StopIteration: pass else: for js in fields_json: try: value = js['data_fields']['field'] except KeyError: pass else: field_values[js['data_fields']['name']] = value # Extract analysis name and derived-from session if self.DERIVED_FROM_FIELD in field_values: df_sess_label = field_values.pop(self.DERIVED_FROM_FIELD) from_analysis = session_label[len(df_sess_label) + 1:] session_label = df_sess_label else: from_analysis = None # Strip subject ID from session label if required if session_label.startswith(subject_id + '_'): visit_id = session_label[len(subject_id) + 1:] else: visit_id = session_label # Strip project ID from subject ID if required if subject_id.startswith(project_id + '_'): subject_id = subject_id[len(project_id) + 1:] # Check subject is summary or not and whether it is to be # filtered if subject_id == XnatRepo.SUMMARY_NAME: subject_id = None elif not (subject_ids is None or subject_id in subject_ids): continue # Check visit is summary or not and whether it is to be # filtered if visit_id == XnatRepo.SUMMARY_NAME: visit_id = None elif not (visit_ids is None or visit_id in visit_ids): continue # Determine frequency if (subject_id, visit_id) == (None, None): frequency = 'per_dataset' elif visit_id is None: frequency = 'per_subject' elif subject_id is None: frequency = 'per_visit' else: frequency = 'per_session' # Append fields for name, value in field_values.items(): value = value.replace('&quot;', '"') all_fields.append(Field( name=name, value=value, dataset=dataset, frequency=frequency, subject_id=subject_id, visit_id=visit_id, from_analysis=from_analysis, **kwargs)) # Extract part of JSON relating to files try: scans_json = next( c['items'] for c in session_json['children'] if c['field'] == 'scans/scan') except StopIteration: scans_json = [] for scan_json in scans_json: scan_id = scan_json['data_fields']['ID'] scan_type = scan_json['data_fields'].get('type', '') scan_quality = scan_json['data_fields'].get('quality', None) scan_uri = '{}/scans/{}'.format(session_uri, scan_id) try: resources_json = next( c['items'] for c in scan_json['children'] if c['field'] == 'file') except StopIteration: resources = {} else: resources = {js['data_fields']['label']: js['data_fields'].get('format', None) for js in resources_json} # Remove auto-generated snapshots directory resources.pop('SNAPSHOTS', None) if scan_type == self.PROV_SCAN: # Download provenance JSON files and parse into # records temp_dir = tempfile.mkdtemp() try: with tempfile.TemporaryFile() as temp_zip: self._login.download_stream( scan_uri + '/files', temp_zip, format='zip') with ZipFile(temp_zip) as zip_file: zip_file.extractall(temp_dir) for base_dir, _, fnames in os.walk(temp_dir): for fname in fnames: if fname.endswith('.json'): pipeline_name = fname[:-len('.json')] json_path = op.join(base_dir, fname) all_records.append( Record.load( pipeline_name, frequency, subject_id, visit_id, from_analysis, json_path)) finally: shutil.rmtree(temp_dir, ignore_errors=True) else: for resource in resources: all_filesets.append(Fileset( scan_type, id=scan_id, uri=scan_uri, dataset=dataset, frequency=frequency, subject_id=subject_id, visit_id=visit_id, from_analysis=from_analysis, quality=scan_quality, resource_name=resource, **kwargs)) logger.debug("Found node {}:{} on {}:{}".format( subject_id, visit_id, self.server, project_id)) return all_filesets, all_fields, all_records def convert_subject_ids(self, subject_ids): """ Convert subject ids to strings if they are integers """ # TODO: need to make this generalisable via a # splitting+mapping function passed to the repository if subject_ids is not None: subject_ids = set( ('{:03d}'.format(s) if isinstance(s, int) else s) for s in subject_ids) return subject_ids def extract_subject_id(self, xsubject_label): """ This assumes that the subject ID is prepended with the project ID. """ return xsubject_label.split('_')[1] def extract_visit_id(self, xsession_label): """ This assumes that the session ID is preprended """ return '_'.join(xsession_label.split('_')[2:]) def dicom_header(self, fileset): def convert(val, code): if code == 'TM': try: val = float(val) except ValueError: pass elif code == 'CS': val = val.split('\\') return val with self: response = self._login.get( '/REST/services/dicomdump?src=' + fileset.uri[len('/data'):]).json()['ResultSet']['Result'] hdr = {tag_parse_re.match(t['tag1']).groups(): convert(t['value'], t['vr']) for t in response if (tag_parse_re.match(t['tag1']) and t['vr'] in RELEVANT_DICOM_TAG_TYPES)} return hdr def download_fileset(self, tmp_dir, xresource, xscan, fileset, session_label, cache_path): # Download resource to zip file zip_path = op.join(tmp_dir, 'download.zip') with open(zip_path, 'wb') as f: xresource.xnat_session.download_stream( xresource.uri + '/files', f, format='zip', verbose=True) checksums = self.get_checksums(fileset) # Extract downloaded zip file expanded_dir = op.join(tmp_dir, 'expanded') try: with ZipFile(zip_path) as zip_file: zip_file.extractall(expanded_dir) except BadZipfile as e: raise ArcanaError( "Could not unzip file '{}' ({})" .format(xresource.id, e)) data_path = op.join( expanded_dir, session_label, 'scans', (xscan.id + '-' + special_char_re.sub('_', xscan.type)), 'resources', xresource.label, 'files') # Remove existing cache if present try: shutil.rmtree(cache_path) except OSError as e: if e.errno != errno.ENOENT: raise e shutil.move(data_path, cache_path) with open(cache_path + XnatRepo.MD5_SUFFIX, 'w', **JSON_ENCODING) as f: json.dump(checksums, f, indent=2) def _delayed_download(self, tmp_dir, xresource, xscan, fileset, session_label, cache_path, delay): logger.info("Waiting {} seconds for incomplete download of '{}' " "initiated another process to finish" .format(delay, cache_path)) initial_mod_time = dir_modtime(tmp_dir) time.sleep(delay) if op.exists(cache_path): logger.info("The download of '{}' has completed " "successfully in the other process, continuing" .format(cache_path)) return elif initial_mod_time != dir_modtime(tmp_dir): logger.info( "The download of '{}' hasn't completed yet, but it has" " been updated. Waiting another {} seconds before " "checking again.".format(cache_path, delay)) self._delayed_download(tmp_dir, xresource, xscan, fileset, session_label, cache_path, delay) else: logger.warning( "The download of '{}' hasn't updated in {} " "seconds, assuming that it was interrupted and " "restarting download".format(cache_path, delay)) shutil.rmtree(tmp_dir) os.mkdir(tmp_dir) self.download_fileset( tmp_dir, xresource, xscan, fileset, session_label, cache_path) def get_xsession(self, item, dataset=None): """ Returns the XNAT session and cache dir corresponding to the item. """ if dataset is None: dataset = item.dataset subj_label, sess_label = self._get_item_labels(item, dataset=dataset) with self: xproject = self._login.projects[dataset.name] try: xsubject = xproject.subjects[subj_label] except KeyError: xsubject = self._login.classes.SubjectData( label=subj_label, parent=xproject) try: xsession = xsubject.experiments[sess_label] except KeyError: xsession = self._login.classes.MrSessionData( label=sess_label, parent=xsubject) if item.derived: xsession.fields[ self.DERIVED_FROM_FIELD] = self._get_item_labels( item, dataset=dataset, no_from_analysis=True)[1] return xsession def _get_item_labels(self, item, no_from_analysis=False, dataset=None): """ Returns the labels for the XNAT subject and sessions given the frequency and provided IDs. """ if dataset is None: dataset = item.dataset subject_id = dataset.inv_map_subject_id(item.subject_id) visit_id = dataset.inv_map_visit_id(item.visit_id) subj_label, sess_label = self._get_labels( dataset.name, item.frequency, subject_id, visit_id) if not no_from_analysis and item.from_analysis is not None: sess_label += '_' + item.from_analysis return (subj_label, sess_label) def _get_labels(self, project_id, frequency, subject_id=None, visit_id=None): """ Returns the labels for the XNAT subject and sessions given the frequency and provided IDs. """ # FIXME: Move this logic into the dataset map IDs and make them # default arguments for 'dataset' method if frequency == 'per_session': subj_label = '{}_{}'.format(project_id, subject_id) sess_label = '{}_{}_{}'.format(project_id, subject_id, visit_id) elif frequency == 'per_subject': subj_label = '{}_{}'.format(project_id, subject_id) sess_label = '{}_{}_{}'.format(project_id, subject_id, self.SUMMARY_NAME) elif frequency == 'per_visit': subj_label = '{}_{}'.format(project_id, self.SUMMARY_NAME) sess_label = '{}_{}_{}'.format(project_id, self.SUMMARY_NAME, visit_id) elif frequency == 'per_dataset': subj_label = '{}_{}'.format(project_id, self.SUMMARY_NAME) sess_label = '{}_{}_{}'.format(project_id, self.SUMMARY_NAME, self.SUMMARY_NAME) else: assert False return (subj_label, sess_label) def _cache_path(self, fileset, name=None, dataset=None): if dataset is None: dataset = fileset.dataset subj_dir, sess_dir = self._get_item_labels(fileset, dataset=dataset) cache_dir = op.join(self._cache_dir, dataset.name, subj_dir, sess_dir) makedirs(cache_dir, exist_ok=True) if name is None: name = '{}-{}'.format(fileset.id, special_char_re.sub('_', fileset.name)) return op.join(cache_dir, name) def _check_repository(self, item): if item.dataset.repository is not self: raise ArcanaWrongRepositoryError( "{} is from {} instead of {}".format( item, item.dataset.repository, self))
py
b417cc3d76346e2724290efc5707f11c54f98d25
import sys, time import xmconst import json, os, subprocess, socket from struct import pack, unpack from pprint import pprint, pformat if sys.version_info[0] == 2: from threading import _Timer as Timer else: from threading import Timer class RepeatingTimer(Timer): def run(self): while not self.finished.is_set(): self.function(*self.args, **self.kwargs) self.finished.wait(self.interval) class XMCam: instance = None main_socket = None socket_timeout = 20 sid = 0 sequence = 0 ip = '' port = 0 username = password = '' keepalive_timer = None def __init__(self, ip, port, username, password, sid=0, autoconnect=True, instance=None): self.ip = ip self.port = port self.username = username self.password = password self.sid = sid self.instance = instance if autoconnect: self.connect() def __del__(self): try: self.disconnect() except: pass def is_sub_connection(self): return self.instance != None def connect(self): try: self.main_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.main_socket.settimeout(self.socket_timeout) self.main_socket.connect((self.ip, self.port)) except Exception as e: print(e) return False return True def disconnect(self): try: self.main_socket.close() self._stop_keepalive_interval() except: pass @staticmethod def prettify(json_data): data_dict = json.loads(json_data) return pformat(data_dict) @staticmethod def to_dict(json_data): data_dict = json.loads(json_data) return data_dict def _generic_command_head(self, msgid, params): pkt = params if msgid != xmconst.LOGIN_REQ2 and type(params) != bytes: pkt['SessionID'] = self._build_packet_sid() cmd_data = self._build_packet(msgid, pkt) self.main_socket.send(cmd_data) if type(params) == bytes: return cmd_data response_head = self._get_response_head() return response_head def _generic_command(self, msgid, params): response_head = self._generic_command_head(msgid, params) out = self._get_response_data(response_head) if msgid == xmconst.LOGIN_REQ2 and 'SessionID' in response_head: self.sid = response_head['SessionID'] if out: return out return None def _generic_command_download(self, msgid, params, file): reply_head = self._generic_command_head(msgid, params) out = self._get_response_data(reply_head) if out: with open(file, 'wb') as f: f.write(out) return True return False def _get_response_head(self): data = self.main_socket.recv(4) head_flag, version, _, _ = unpack('BBBB', data) data = self.main_socket.recv(8) sid, seq = unpack('ii', data) data = self.main_socket.recv(8) channel, endflag, msgid, size = unpack('BBHI', data) reply_head = { 'Version': version, 'SessionID': sid, 'Sequence': seq, 'MessageId': msgid, 'Content_Length': size } self.sequence = seq return reply_head def _get_response_data(self, reply_head): reply = reply_head length = reply['Content_Length'] out = '' for i in range(0, length): data = self.main_socket.recv(1) out += data.decode('utf-8') return out.rstrip('\x00') def _build_packet_sid(self): return '0x%08x' % self.sid def _build_packet(self, ptype, data): pkt_type = ptype pkt_prefix_1 = (0xff, 0x01, 0x00, 0x00) pkt_prefix_2 = (0x00, 0x00, 0x00, 0x00) header = pack('B'*len(pkt_prefix_1), *pkt_prefix_1) header += pack('I', self.sid) header += pack('B'*len(pkt_prefix_2), *pkt_prefix_2) header += pack('H', 0) + pack('H', pkt_type) # If data is bytes, designed for sending stream bytes to server if type(data) == bytes: pkt_data = data pkt_data = header + pack('I', len(pkt_data)) + pkt_data else: pkt_data = json.dumps(data) pkt_data = header + pack('I', len(pkt_data)) + bytes(pkt_data.encode('ascii')) return pkt_data def _start_keepalive_interval(self): self.keepalive_timer = RepeatingTimer(20.0, self._interval_keepalive) self.keepalive_timer.start() def _stop_keepalive_interval(self): if self.keepalive_timer != None: self.keepalive_timer.cancel() def _interval_keepalive(self): pkt = { "Name" : "KeepAlive" } response = self._generic_command(xmconst.KEEPALIVE_REQ, pkt) print(response) def create_sub_connection(self, autoconnect=False): subconn = XMCam(self.ip, self.port, self.username, self.password, sid=self.sid, instance=self, autoconnect=autoconnect) return subconn def cmd_login(self): pkt = { 'EncryptType': 'MD5', 'LoginType': 'DVRIP-Web', 'PassWord': self.password, 'UserName': self.username } response = self._generic_command(xmconst.LOGIN_REQ2, pkt) respdict = self.to_dict(response) if not self.is_sub_connection() and respdict != None and 'Ret' in respdict and respdict['Ret'] == 100: self._start_keepalive_interval() else: print(__name__, 'Cannot start keepalive') return response def cmd_system_function(self): pkt = { 'Name': 'SystemFunction' } response = self._generic_command(xmconst.ABILITY_GET, pkt) return self.prettify(response) def cmd_system_info(self): pkt = { 'Name': 'SystemInfo' } response = self._generic_command(xmconst.SYSINFO_REQ, pkt) return self.prettify(response) def cmd_keep_alive(self): pkt = { 'Name': 'KeepAlive' } return self._generic_command(xmconst.KEEPALIVE_REQ, pkt) def cmd_channel_title(self): pkt = { 'Name': 'ChannelTitle' } response = self._generic_command(xmconst.CONFIG_CHANNELTILE_GET, pkt) return self.prettify(response) def cmd_OEM_info(self): pkt = { 'Name': 'OEMInfo' } response = self._generic_command(xmconst.SYSINFO_REQ, pkt) return self.prettify(response) def cmd_storage_info(self): pkt = { 'Name': 'StorageInfo' } response = self._generic_command(xmconst.SYSINFO_REQ, pkt) return self.prettify(response) def cmd_sync_time(self, noRTC = False): cmd = 'OPTimeSetting' pkt_type = xmconst.SYSMANAGER_REQ if noRTC: cmd += 'NoRTC' pkt_type = xmconst.SYNC_TIME_REQ pkt = { 'Name': cmd, cmd: time.strftime('%Y-%m-%d %H:%M:%S') } response = self._generic_command(pkt_type, pkt) return response def cmd_get_time(self): pkt = { 'Name': 'OPTimeQuery' } response = self._generic_command(xmconst.TIMEQUERY_REQ, pkt) return response def cmd_users(self): pkt = { } response = self._generic_command(xmconst.USERS_GET, pkt) return self.prettify(response) def cmd_ptz_control(self, direction, stop=False): pkt = { "Name": "OPPTZControl", "OPPTZControl": { "Command": direction, #DirectionLeft, DirectionRight, DirectionUp, DirectionDown "Parameter": { "AUX": { "Number": 0, "Status": "On" }, "Channel": 0, "MenuOpts": "Enter", "POINT": { "bottom": 0, "left": 0, "right": 0, "top": 0 }, "Pattern": "Start", #""SetBegin", "Preset": -1 if stop else 65535, "Step": 30, "Tour": 0 } } } response = self._generic_command(xmconst.PTZ_REQ, pkt) return self.prettify(response) def cmd_photo(self, file): pkt = { } reply = self._generic_command_download(xmconst.PHOTO_GET_REQ, pkt, file) return reply def cmd_config_export(self, file): pkt = { 'Name': '' } reply = self._generic_command_download(xmconst.CONFIG_EXPORT_REQ, pkt, file) return reply # Just because no snap command supported, we need external program to capture from RTSP stream # using avconv or ffmpeg @staticmethod def cmd_external_snap(snap_file, app='/usr/bin/avconv', rtsp='rtsp://192.168.1.10/user=admin&password=admin&channel=1&stream=0.sdp', args=('-y', '-f', 'image2', '-vframes', '1', '-pix_fmt', 'yuvj420p')): if not os.path.exists(app): return False # Add executable fullargs = [app] # Make silent except errors fullargs.append('-loglevel') fullargs.append('panic') # Append input arg fullargs.append('-i') fullargs.append(rtsp) # Append other args [fullargs.append(a) for a in args] # Lastly, append output arg fullargs.append(snap_file) # child = subprocess.Popen(process, stdout=subprocess.PIPE) child = subprocess.Popen(fullargs) child.communicate() return child.returncode == 0 # True if 0 @staticmethod def cmd_external_record(video_file, app='/usr/bin/avconv', rtsp='rtsp://192.168.1.10/user=admin&password=admin&channel=1&stream=0.sdp', args=('-vcodec', 'copy', '-f', 'mp4', '-y', '-an'), time_limit=5 ): if not os.path.exists(app): return False # Add executable fullargs = [app] # Make silent except errors fullargs.append('-loglevel') fullargs.append('panic') # Append input arg fullargs.append('-i') fullargs.append(rtsp) # Append other args [fullargs.append(a) for a in args] # Append record time limit in secs fullargs.append('-t') fullargs.append(str(time_limit) if time_limit > 0 else '5') # Append output arg fullargs.append(video_file) # child = subprocess.Popen(process, stdout=subprocess.PIPE) child = subprocess.Popen(fullargs) child.communicate() return child.returncode == 0 # True if 0 @staticmethod def cmd_snap(snap_file): retval = XMCam.cmd_external_snap(snap_file) return retval def cmd_talk_claim(self): assert self.is_sub_connection(), 'cmd_talk_claim need run on a sub connection' pkt = { "Name": "OPTalk", "OPTalk": { "Action": "Claim", "AudioFormat": { "BitRate": 0, "EncodeType": "G711_ALAW", "SampleBit": 8, "SampleRate": 8 } } } response = self._generic_command(xmconst.TALK_CLAIM, pkt) return response def cmd_talk_send_stream(self, data): #assert type(data) == bytes, 'Data should be a PCM bytes type' # final_data = bytes.fromhex('000001fa0e024001') + data final_data = b'\x00\x00\x01\xfa\x0e\x02\x40\x01' + data sent = self._generic_command_head(xmconst.TALK_CU_PU_DATA, final_data) return sent def cmd_talk_start(self): pkt = { "Name" : "OPTalk", "OPTalk" : { "Action" : "Start", "AudioFormat" : { "BitRate" : 128, "EncodeType" : "G711_ALAW", "SampleBit" : 8, "SampleRate" : 8000 } } } response = self._generic_command(xmconst.TALK_REQ, pkt) return response def cmd_talk_stop(self): pkt = { "Name" : "OPTalk", "OPTalk" : { "Action" : "Stop", "AudioFormat" : { "BitRate" : 128, "EncodeType" : "G711_ALAW", "SampleBit" : 8, "SampleRate" : 8000 } } } response = self._generic_command(xmconst.TALK_REQ, pkt) return response @staticmethod def talk_convert_to_pcm(src, volume=1.0, app='/usr/bin/avconv', args=( '-y', '-f', 'alaw', '-ar', '8000', '-ac', '1', )): if not os.path.exists(app): return (False, None) if not os.path.exists(src): return (False, None) dst_final = src + '.pcm' fullargs = [app] fullargs.append('-loglevel') fullargs.append('panic') fullargs.append('-i') fullargs.append(src) [fullargs.append(a) for a in args] if volume != 1.0: fullargs.append('-filter:a') fullargs.append('volume={}'.format(volume)) fullargs.append(dst_final) child = subprocess.Popen(fullargs) child.communicate() return (child.returncode == 0, dst_final) # True if 0 @staticmethod def talk_get_chunks(pcmfile): retdata = None try: pcmdata = open(pcmfile, 'rb').read() data = [pcmdata[i:i+320] for i in range(0, len(pcmdata), 320)] retdata = data except: print('Got an exception on talk_get_chunks') return retdata
py
b417ccb7c14d732d51b9dc5c22954fb4ab7f4df6
# To 500HZ import numpy as np import pandas as pd from scipy.fft import dct, idct from tqdm import tqdm def normalize(wave: np.array, wanted_std: float): std = wave.std() if std == 0: return wave else: return (wave / wave.std()) * wanted_std def padding(wave: np.array, wanted_pt_length: int): return np.concatenate([ wave, np.zeros((len(wave), wanted_pt_length - wave.shape[1])) ], axis=1) def cg_hz(wave: np.array, wanted_pt_length: int): if wave.shape[1] == wanted_pt_length: return wave.astype(np.float32) elif wave.shape[1] > wanted_pt_length: return normalize(idct(dct(wave, axis=1)[:, :wanted_pt_length], axis=1), wave.std()) else: return normalize(idct(padding(dct(wave, axis=1), wanted_pt_length), axis=1), wave.std()) # %% def cinc2020_loader(file_path: str, idx: int, meta_df: pd.DataFrame): fp2 = np.memmap(file_path, np.int32, mode='r', shape=tuple(meta_df['shapes'][idx]), offset=4 * meta_df['offsets'][idx]) return np.copy(fp2) meta_df = pd.read_pickle('cinc2020_meta_info.pkl') wanted_wave_length = ((500 / meta_df['freq']) * meta_df['raw_wave_length']).to_numpy() for el in wanted_wave_length: if el != int(el): print(el) wanted_wave_length = np.asarray([round(el) for el in wanted_wave_length]) for el in wanted_wave_length: if el != int(el): print(el) all_num = sum(wanted_wave_length) * 12 fp = np.memmap('cinc2020_500.npy', dtype=np.float32, mode='w+', shape=(all_num,)) offsets = [] offset = 0 for i, el in tqdm(enumerate(wanted_wave_length)): original_data = cinc2020_loader('cinc2020.npy', i, meta_df).astype(np.float32) new_data = cg_hz(original_data, el) tmp = new_data.flatten().astype(np.float32) offsets.append(offset) fp[offset:offset + len(tmp)] = tmp offset += len(tmp) offsets = np.asarray(offsets) meta_df['new_raw_wave_length'] = wanted_wave_length meta_df['new_offsets'] = offsets meta_df.to_pickle('cinc2020_meta_info.pkl') def cinc2020_500_loader(file_path: str, idx: int, meta_df: pd.DataFrame): fp2 = np.memmap(file_path, np.float32, mode='r', shape=(12, meta_df['new_raw_wave_length'][idx]), offset=4 * meta_df['new_offsets'][idx]) return np.copy(fp2) # %% def ptb_xl_loader(file_path: str, idx: int, meta_df: pd.DataFrame): fp2 = np.memmap(file_path, np.float64, mode='r', shape=tuple(meta_df['shapes'][idx]), offset=8 * meta_df['offsets'][idx]) return np.copy(fp2) meta_df = pd.read_pickle('ptb_xl_meta_info.pkl') wanted_wave_length = ((500 / meta_df['freq']) * meta_df['raw_wave_length']).to_numpy() for el in wanted_wave_length: if el != int(el): print(el) wanted_wave_length = np.asarray([round(el) for el in wanted_wave_length]) for el in wanted_wave_length: if el != int(el): print(el) all_num = sum(wanted_wave_length) * 12 fp = np.memmap('ptb_xl_500.npy', dtype=np.float32, mode='w+', shape=(all_num,)) offsets = [] offset = 0 for i, el in tqdm(enumerate(wanted_wave_length)): original_data = ptb_xl_loader('ptb_xl.npy', i, meta_df).astype(np.float32) new_data = cg_hz(original_data, el) tmp = new_data.flatten().astype(np.float32) offsets.append(offset) fp[offset:offset + len(tmp)] = tmp offset += len(tmp) offsets = np.asarray(offsets) meta_df['new_raw_wave_length'] = wanted_wave_length meta_df['new_offsets'] = offsets meta_df.to_pickle('ptb_xl_meta_info.pkl') def ptb_xl_500_loader(file_path: str, idx: int, meta_df: pd.DataFrame): fp2 = np.memmap(file_path, np.float32, mode='r', shape=(12, meta_df['new_raw_wave_length'][idx]), offset=4 * meta_df['new_offsets'][idx]) return np.copy(fp2) # %% def ribeiro2020_train_loader(file_path: str, idx: int, meta_df: pd.DataFrame): fp2 = np.memmap(file_path, np.float32, mode='r', shape=tuple(meta_df['shapes'][idx]), offset=4 * meta_df['offsets'][idx]) return np.copy(fp2) meta_df = pd.read_pickle('ribeiro2020_train_meta_info.pkl') wanted_wave_length = ((500 / meta_df['freq']) * meta_df['raw_wave_length']).to_numpy() for el in wanted_wave_length: if el != int(el): print(el) wanted_wave_length = np.asarray([round(el) for el in wanted_wave_length]) for el in wanted_wave_length: if el != int(el): print(el) all_num = sum(wanted_wave_length) * 12 fp = np.memmap('ribeiro2020_train_500.npy', dtype=np.float32, mode='w+', shape=(all_num,)) offsets = [] offset = 0 # for i, el in tqdm(enumerate(wanted_wave_length)): original_data = ribeiro2020_train_loader('ribeiro2020_train.npy', i, meta_df).astype(np.float32) new_data = cg_hz(original_data, el) tmp = new_data.flatten().astype(np.float32) offsets.append(offset) fp[offset:offset + len(tmp)] = tmp offset += len(tmp) offsets = np.asarray(offsets) meta_df['new_raw_wave_length'] = wanted_wave_length meta_df['new_offsets'] = offsets meta_df.to_pickle('ribeiro2020_train_meta_info.pkl') def ribeiro2020_train_500_loader(file_path: str, idx: int, meta_df: pd.DataFrame): fp2 = np.memmap(file_path, np.float32, mode='r', shape=(12, meta_df['new_raw_wave_length'][idx]), offset=4 * meta_df['new_offsets'][idx]) return np.copy(fp2) # %%k def ribeiro2020_test_loader(file_path: str, idx: int, meta_df: pd.DataFrame): fp2 = np.memmap(file_path, np.float64, mode='r', shape=tuple(meta_df['shapes'][idx]), offset=8 * meta_df['offsets'][idx]) return np.copy(fp2) meta_df = pd.read_pickle('ribeiro2020_test_meta_info.pkl') wanted_wave_length = ((500 / meta_df['freq']) * meta_df['raw_wave_length']).to_numpy() for el in wanted_wave_length: if el != int(el): print(el) wanted_wave_length = np.asarray([round(el) for el in wanted_wave_length]) for el in wanted_wave_length: if el != int(el): print(el) all_num = sum(wanted_wave_length) * 12 fp = np.memmap('ribeiro2020_test_500.npy', dtype=np.float32, mode='w+', shape=(all_num,)) offsets = [] offset = 0 # for i, el in tqdm(enumerate(wanted_wave_length)): original_data = ribeiro2020_test_loader('ribeiro2020_test.npy', i, meta_df).astype(np.float32) new_data = cg_hz(original_data, el) tmp = new_data.flatten().astype(np.float32) offsets.append(offset) fp[offset:offset + len(tmp)] = tmp offset += len(tmp) offsets = np.asarray(offsets) meta_df['new_raw_wave_length'] = wanted_wave_length meta_df['new_offsets'] = offsets meta_df.to_pickle('ribeiro2020_test_meta_info.pkl') def ribeiro2020_test_500_loader(file_path: str, idx: int, meta_df: pd.DataFrame): fp2 = np.memmap(file_path, np.float32, mode='r', shape=(12, meta_df['new_raw_wave_length'][idx]), offset=4 * meta_df['new_offsets'][idx]) return np.copy(fp2) # %% def zheng2020_loader(file_path: str, idx: int, meta_df: pd.DataFrame): fp2 = np.memmap(file_path, np.float64, mode='r', shape=tuple(meta_df['shapes'][idx]), offset=8 * meta_df['offsets'][idx]) return np.copy(fp2) meta_df = pd.read_pickle('zheng2020_meta_info.pkl') wanted_wave_length = ((500 / meta_df['freq']) * meta_df['raw_wave_length']).to_numpy() for el in wanted_wave_length: if el != int(el): print(el) wanted_wave_length = np.asarray([round(el) for el in wanted_wave_length]) for el in wanted_wave_length: if el != int(el): print(el) all_num = sum(wanted_wave_length) * 12 fp = np.memmap('zheng2020_500.npy', dtype=np.float32, mode='w+', shape=(all_num,)) offsets = [] offset = 0 # for i, el in tqdm(enumerate(wanted_wave_length)): original_data = zheng2020_loader('zheng2020.npy', i, meta_df).astype(np.float32) new_data = cg_hz(original_data, el) tmp = new_data.flatten().astype(np.float32) offsets.append(offset) fp[offset:offset + len(tmp)] = tmp offset += len(tmp) offsets = np.asarray(offsets) meta_df['new_raw_wave_length'] = wanted_wave_length meta_df['new_offsets'] = offsets meta_df.to_pickle('zheng2020_meta_info.pkl') def zheng2020_500_loader(file_path: str, idx: int, meta_df: pd.DataFrame): fp2 = np.memmap(file_path, np.float32, mode='r', shape=(12, meta_df['new_raw_wave_length'][idx]), offset=4 * meta_df['new_offsets'][idx]) return np.copy(fp2)
py
b417ccf1f74e0f81bc4a73a38fa9dd5848a94987
#!/usr/bin/env python3 import time import colorsys import sys import ST7735 try: # Transitional fix for breaking change in LTR559 from ltr559 import LTR559 ltr559 = LTR559() except ImportError: import ltr559 from bme280 import BME280 from pms5003 import PMS5003, ReadTimeoutError as pmsReadTimeoutError, SerialTimeoutError from enviroplus import gas from subprocess import PIPE, Popen from PIL import Image from PIL import ImageDraw from PIL import ImageFont from fonts.ttf import RobotoMedium as UserFont import logging logging.basicConfig( format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S') logging.info("""all-in-one.py - Displays readings from all of Enviro plus' sensors Press Ctrl+C to exit! """) # BME280 temperature/pressure/humidity sensor bme280 = BME280() # PMS5003 particulate sensor pms5003 = PMS5003() time.sleep(1.0) # Create ST7735 LCD display class st7735 = ST7735.ST7735( port=0, cs=1, dc=9, backlight=12, rotation=270, spi_speed_hz=10000000 ) # Initialize display st7735.begin() WIDTH = st7735.width HEIGHT = st7735.height # Set up canvas and font img = Image.new('RGB', (WIDTH, HEIGHT), color=(0, 0, 0)) draw = ImageDraw.Draw(img) font_size_small = 10 font_size_large = 20 font = ImageFont.truetype(UserFont, font_size_large) smallfont = ImageFont.truetype(UserFont, font_size_small) x_offset = 2 y_offset = 2 message = "" # The position of the top bar top_pos = 25 # Create a values dict to store the data variables = ["temperature", "pressure", "humidity", "light", "oxidised", "reduced", "nh3", "pm1", "pm25", "pm10"] units = ["C", "hPa", "%", "Lux", "kO", "kO", "kO", "ug/m3", "ug/m3", "ug/m3"] # Define your own warning limits # The limits definition follows the order of the variables array # Example limits explanation for temperature: # [4,18,28,35] means # [-273.15 .. 4] -> Dangerously Low # (4 .. 18] -> Low # (18 .. 28] -> Normal # (28 .. 35] -> High # (35 .. MAX] -> Dangerously High # DISCLAIMER: The limits provided here are just examples and come # with NO WARRANTY. The authors of this example code claim # NO RESPONSIBILITY if reliance on the following values or this # code in general leads to ANY DAMAGES or DEATH. limits = [[4, 18, 28, 35], [250, 650, 1013.25, 1015], [20, 30, 60, 70], [-1, -1, 30000, 100000], [-1, -1, 40, 50], [-1, -1, 450, 550], [-1, -1, 200, 300], [-1, -1, 50, 100], [-1, -1, 50, 100], [-1, -1, 50, 100]] # RGB palette for values on the combined screen palette = [(0, 0, 255), # Dangerously Low (0, 255, 255), # Low (0, 255, 0), # Normal (255, 255, 0), # High (255, 0, 0)] # Dangerously High values = {} # Displays data and text on the 0.96" LCD def display_text(variable, data, unit): # Maintain length of list values[variable] = values[variable][1:] + [data] # Scale the values for the variable between 0 and 1 vmin = min(values[variable]) vmax = max(values[variable]) colours = [(v - vmin + 1) / (vmax - vmin + 1) for v in values[variable]] # Format the variable name and value message = "{}: {:.1f} {}".format(variable[:4], data, unit) logging.info(message) draw.rectangle((0, 0, WIDTH, HEIGHT), (255, 255, 255)) for i in range(len(colours)): # Convert the values to colours from red to blue colour = (1.0 - colours[i]) * 0.6 r, g, b = [int(x * 255.0) for x in colorsys.hsv_to_rgb(colour, 1.0, 1.0)] # Draw a 1-pixel wide rectangle of colour draw.rectangle((i, top_pos, i + 1, HEIGHT), (r, g, b)) # Draw a line graph in black line_y = HEIGHT - (top_pos + (colours[i] * (HEIGHT - top_pos))) + top_pos draw.rectangle((i, line_y, i + 1, line_y + 1), (0, 0, 0)) # Write the text at the top in black draw.text((0, 0), message, font=font, fill=(0, 0, 0)) st7735.display(img) # Saves the data to be used in the graphs later and prints to the log def save_data(idx, data): variable = variables[idx] # Maintain length of list values[variable] = values[variable][1:] + [data] unit = units[idx] message = "{}: {:.1f} {}".format(variable[:4], data, unit) logging.info(message) # Displays all the text on the 0.96" LCD def display_everything(): draw.rectangle((0, 0, WIDTH, HEIGHT), (0, 0, 0)) column_count = 2 row_count = (len(variables) / column_count) for i in range(len(variables)): variable = variables[i] data_value = values[variable][-1] unit = units[i] x = x_offset + ((WIDTH / column_count) * (i / row_count)) y = y_offset + ((HEIGHT / row_count) * (i % row_count)) message = "{}: {:.1f} {}".format(variable[:4], data_value, unit) lim = limits[i] rgb = palette[0] for j in range(len(lim)): if data_value > lim[j]: rgb = palette[j + 1] draw.text((x, y), message, font=smallfont, fill=rgb) st7735.display(img) # Get the temperature of the CPU for compensation def get_cpu_temperature(): process = Popen(['vcgencmd', 'measure_temp'], stdout=PIPE, universal_newlines=True) output, _error = process.communicate() return float(output[output.index('=') + 1:output.rindex("'")]) def main(): # Tuning factor for compensation. Decrease this number to adjust the # temperature down, and increase to adjust up factor = 2.25 cpu_temps = [get_cpu_temperature()] * 5 delay = 0.5 # Debounce the proximity tap mode = 10 # The starting mode last_page = 0 for v in variables: values[v] = [1] * WIDTH # The main loop try: while True: proximity = ltr559.get_proximity() # If the proximity crosses the threshold, toggle the mode if proximity > 1500 and time.time() - last_page > delay: mode += 1 mode %= (len(variables) + 1) last_page = time.time() # One mode for each variable if mode == 0: # variable = "temperature" unit = "C" cpu_temp = get_cpu_temperature() # Smooth out with some averaging to decrease jitter cpu_temps = cpu_temps[1:] + [cpu_temp] avg_cpu_temp = sum(cpu_temps) / float(len(cpu_temps)) raw_temp = bme280.get_temperature() data = raw_temp - ((avg_cpu_temp - raw_temp) / factor) display_text(variables[mode], data, unit) if mode == 1: # variable = "pressure" unit = "hPa" data = bme280.get_pressure() display_text(variables[mode], data, unit) if mode == 2: # variable = "humidity" unit = "%" data = bme280.get_humidity() display_text(variables[mode], data, unit) if mode == 3: # variable = "light" unit = "Lux" if proximity < 10: data = ltr559.get_lux() else: data = 1 display_text(variables[mode], data, unit) if mode == 4: # variable = "oxidised" unit = "kO" data = gas.read_all() data = data.oxidising / 1000 display_text(variables[mode], data, unit) if mode == 5: # variable = "reduced" unit = "kO" data = gas.read_all() data = data.reducing / 1000 display_text(variables[mode], data, unit) if mode == 6: # variable = "nh3" unit = "kO" data = gas.read_all() data = data.nh3 / 1000 display_text(variables[mode], data, unit) if mode == 7: # variable = "pm1" unit = "ug/m3" try: data = pms5003.read() except pmsReadTimeoutError: logging.warning("Failed to read PMS5003") else: data = float(data.pm_ug_per_m3(1.0)) display_text(variables[mode], data, unit) if mode == 8: # variable = "pm25" unit = "ug/m3" try: data = pms5003.read() except pmsReadTimeoutError: logging.warning("Failed to read PMS5003") else: data = float(data.pm_ug_per_m3(2.5)) display_text(variables[mode], data, unit) if mode == 9: # variable = "pm10" unit = "ug/m3" try: data = pms5003.read() except pmsReadTimeoutError: logging.warning("Failed to read PMS5003") else: data = float(data.pm_ug_per_m3(10)) display_text(variables[mode], data, unit) if mode == 10: # Everything on one screen cpu_temp = get_cpu_temperature() # Smooth out with some averaging to decrease jitter cpu_temps = cpu_temps[1:] + [cpu_temp] avg_cpu_temp = sum(cpu_temps) / float(len(cpu_temps)) raw_temp = bme280.get_temperature() raw_data = raw_temp - ((avg_cpu_temp - raw_temp) / factor) save_data(0, raw_data) display_everything() raw_data = bme280.get_pressure() save_data(1, raw_data) display_everything() raw_data = bme280.get_humidity() save_data(2, raw_data) if proximity < 10: raw_data = ltr559.get_lux() else: raw_data = 1 save_data(3, raw_data) display_everything() gas_data = gas.read_all() save_data(4, gas_data.oxidising / 1000) save_data(5, gas_data.reducing / 1000) save_data(6, gas_data.nh3 / 1000) display_everything() pms_data = None try: pms_data = pms5003.read() except (SerialTimeoutError, pmsReadTimeoutError): logging.warning("Failed to read PMS5003") else: save_data(7, float(pms_data.pm_ug_per_m3(1.0))) save_data(8, float(pms_data.pm_ug_per_m3(2.5))) save_data(9, float(pms_data.pm_ug_per_m3(10))) display_everything() # Exit cleanly except KeyboardInterrupt: sys.exit(0) if __name__ == "__main__": main()
py
b417cd99fc5c9ea98433504861a7f9e4ad7ef13a
#!/usr/bin/env python3 # Copyright (c) 2013-2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. import biplist from ds_store import DSStore from mac_alias import Alias import sys output_file = sys.argv[1] package_name_ns = sys.argv[2] ds = DSStore.open(output_file, 'w+') ds['.']['bwsp'] = { 'ShowStatusBar': False, 'WindowBounds': '{{300, 280}, {500, 343}}', 'ContainerShowSidebar': False, 'SidebarWidth': 0, 'ShowTabView': False, 'PreviewPaneVisibility': False, 'ShowToolbar': False, 'ShowSidebar': False, 'ShowPathbar': True } icvp = { 'gridOffsetX': 0.0, 'textSize': 12.0, 'viewOptionsVersion': 1, 'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00', 'backgroundColorBlue': 1.0, 'iconSize': 96.0, 'backgroundColorGreen': 1.0, 'arrangeBy': 'none', 'showIconPreview': True, 'gridSpacing': 100.0, 'gridOffsetY': 0.0, 'showItemInfo': False, 'labelOnBottom': True, 'backgroundType': 2, 'backgroundColorRed': 1.0 } alias = Alias.from_bytes(icvp['backgroundImageAlias']) alias.volume.name = package_name_ns alias.volume.posix_path = '/Volumes/' + package_name_ns alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg' alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00pfandcoinuser:\x00Documents:\x00pfandcoin:\x00pfandcoin:\x00' + package_name_ns + '.temp.dmg' alias.volume.disk_image_alias.target.posix_path = 'Users/pfandcoinuser/Documents/pfandcoin/pfandcoin/' + package_name_ns + '.temp.dmg' alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff' icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes()) ds['.']['icvp'] = icvp ds['.']['vSrn'] = ('long', 1) ds['Applications']['Iloc'] = (370, 156) ds['Pfandcoin-Qt.app']['Iloc'] = (128, 156) ds.flush() ds.close()
py
b417d0027832c9b1a02be55460469e8912a75427
# coding: utf-8 import gym import argparse from dynamics import * from controller import * from utils import * from quanser_robots.common import GentlyTerminating import time parser = argparse.ArgumentParser(description='Specify the configuraton file path') parser.add_argument('--path', required=False, type=str, default='config.yml', help='Specify the configuraton file path') args = parser.parse_args() config_path = args.path # "config.yml" config = load_config(config_path) print_config(config_path) env_id = "DoublePendulum-v0" env = GentlyTerminating(gym.make(env_id)) model = DynamicModel(config) data_fac = DatasetFactory(env,config) data_fac.collect_random_dataset() loss = model.train(data_fac.random_trainset,data_fac.random_testset) mpc = MPC(env,config) rewards_list = [] for itr in range(config["dataset_config"]["n_mpc_itrs"]): t = time.time() print("**********************************************") print("The reinforce process [%s], collecting data ..." % itr) rewards = data_fac.collect_mpc_dataset(mpc, model) trainset, testset = data_fac.make_dataset() rewards_list += rewards plt.close("all") plt.figure(figsize=(12, 5)) plt.title('Reward Trend with %s iteration' % itr) plt.plot(rewards_list) plt.savefig("storage/reward-" + str(model.exp_number) + ".png") print("Consume %s s in this iteration" % (time.time() - t)) loss = model.train(trainset, testset)
py
b417d08572b484637c0828aff196dcbaa3cc927e
'''Setup: algorithms.''' ## External modules. import numpy as np ## Internal modules. from mml.algos.gd import GD_ERM ############################################################################### ## Simple parser for algorithm objects. ## Note that step size is modulated here by dimension. def get_algo(name, model, loss, **kwargs): if name == "SGD": return GD_ERM(step_coef=kwargs["step_size"], model=model, loss=loss) else: raise ValueError("Please pass a valid algorithm name.") ###############################################################################
py
b417d0dcd4947f601a35db485e81ecc3fe568189
def isUgly(self, num): return num > 0 == 30**32 % num # Don't worry about the runtime, it's mostly judge overhead. Check this out: def isUgly(self, num): return [num > 0 == 30**32 % num for _ in range(1000)][-1] # That does it 1000 times and gets accepted in about 420 ms. # Meaning the actual solution takes only about 0.4 ms and the rest of the # ~60 ms is judge overhead, and it varies considerably. # https://discuss.leetcode.com/topic/42589/python-1-line-solution
py
b417d0f8a140fe2599d20f4146a4174c526b2ce9
from itertools import zip_longest class Solution: def leafSimilar(self, root1, root2) -> bool: # yields leaf nodes recursively. def yield_leafs(node): if not node.left and not node.right: yield node.val if node.left: yield from yield_leafs(node.left) if node.right: yield from yield_leafs(node.right) # check that they are all equal. yl = yield_leafs return all(i == j for i, j in zip_longest(yl(root1), yl(root2)))
py
b417d1d810b9de26004b5ccef10bde48966420f2
import sys import os import uuid import shutil sys.path.append("..") from config import DEFAULT_TABLE, TOP_K from logs import LOGGER from frame_extract import FrameExtract def get_object_vector(model, path): images = os.listdir(path) images.sort() vectors = [] times = [] time = 0 for image in images: obj_vecs = model.yolo(path + '/' + image) for vec in obj_vecs: vectors.append(vec) time = time + 1 new_time = '%010d' % (time) for _ in range(len(obj_vecs)): times.append(new_time) return vectors, times def do_search(table_name, video_path, model, milvus_client, mysql_cli): try: if not table_name: table_name = DEFAULT_TABLE fe = FrameExtract() obj_path, _ = fe.extract_frame(video_path) paths = [] objects = [] vecs, times = get_object_vector(model, obj_path) #print(len(vecs)) results = milvus_client.search_vectors(collection_name=table_name, vectors=vecs, top_k=TOP_K) ids = [] distances = [] for result in results: ids.append(result[0].id) distances.append(result[0].distance) paths, objects = mysql_cli.search_by_milvus_ids(ids, table_name) shutil.rmtree(obj_path) return paths, objects, distances, times except Exception as e: LOGGER.error(f"Error with search : {e}") sys.exit(1)
py
b417d500a2c02223468545b4d9bd5ff0e30baebd
from kapteyn import maputils from matplotlib import pyplot as plt import numpy # This script shows that you can plot shapes that cross the pole. # A shape is plotted with respect to its center and the border points # are derived in a way that distance and angle are correct for a sphere. # This makes it impossible to have objects centered at the pole because at # the pole, longitudes are undefined. To avoid this problem, one can shift # the center of such shapes a little as we have done with pcra and # pcdec below. # The try excepts in this program is to catch problems with special # projections (e.g. NCP where dec > 0) delta = 0.0001 pcra = delta pcdec = 90. -delta def shapes(proj, fig, plnr, crval2=0.0, **pv): naxis1 = 800; naxis2 = 800 header = {'NAXIS': 2, 'NAXIS1': naxis1, 'NAXIS2': naxis2, 'CRPIX1': naxis1/2.0, 'CRPIX2': naxis2/2.0, 'CRVAL1': 0.0, 'CRVAL2': crval2, 'CDELT1': -0.5, 'CDELT2': 0.5, 'CUNIT1': 'deg', 'CUNIT2': 'deg', 'CTYPE1': 'RA---%s'%proj, 'CTYPE2': 'DEC--%s'%proj} if len(pv): header.update(pv) X = numpy.arange(0,390.0,30.0); Y = numpy.arange(-30,91,30.0) f = maputils.FITSimage(externalheader=header) frame = fig.add_subplot(2, 2, plnr) annim = f.Annotatedimage(frame) grat = annim.Graticule(axnum=(1,2), wylim=(-30.0,90.0), wxlim=(-180,180), startx=X, starty=Y) grat.setp_gratline(color='0.75') if plnr in [1,2]: grat.setp_axislabel(plotaxis='bottom', visible=False) print("Projection %d is %s" % (plnr, proj)) # Ellipse centered on crossing of two graticule lines try: annim.Skypolygon("ellipse", cpos="5h00m 20d0m", major=50, minor=30, pa=-30.0, fill=False) print("Plotted ellipse with cpos='5h00m 20d0m', major=50, minor=30, pa=-30.0, fill=False") except: print("Failed to plot ellipse") # Ellipse at given pixel coordinates try: cpos = "%f %f"%(naxis1/2.0+20, naxis2/2.0+10) annim.Skypolygon("ellipse", cpos=cpos, major=40, minor=10, pa=0.0, fc='m') print("Plotted ellipse major=40, minor=10, pa=-30.0, fc='m'") except: print("Failed to plot ellipse") # Circle with radius in arc minutes try: annim.Skypolygon("ellipse", xc=pcra, yc = pcdec, #cpos="0 deg 60 deg", major=30, minor=30, fc='g', alpha=0.3, lw=3, ec='r') print("Plotted red circle, green with red border transparent") except: print("Failed to plot circle") # Rectangle at the projection center try: annim.Skypolygon("rectangle", xc=pcra, yc=pcdec, major=50, minor=20, pa=30.0, ec='g', fc='b', alpha=0.3) print("Plotted blue rectangle at projection center") except: print("Failed to plot blue rectangle at projection center") # Square centered at 315 deg -45 deg and with size equal # to distance on sphere between 300,-30 and 330,-30 deg (=25.9) try: annim.Skypolygon("rectangle", cpos="315 deg -45 deg", major=25.9, minor=25.9, pa=0.0, ec='g', fc='#ff33dd', alpha=0.8) print("Plotted square with color #ff33dd") except: print("Failed to plot square") # Regular polygon with 6 angles at some position in galactic coordinates try: annim.Skypolygon("npoly", cpos="ga 102d11m35.239s ga 59d50m25.734", major=20, nangles=6, ec='g', fc='y', alpha=0.3) print("Plotted npoly in yellow") except: print("Failed to plot regular polygon") # Regular polygon as a triangle try: annim.Skypolygon("npolygon", cpos="ga 0 ga 90", major=70, nangles=3, ec='g', fc='c', alpha=0.7) print("Plotted npoly triangle in cyan") except: print("Failed to plot triangle") # Set of (absolute) coordinates, no prescription lons = [270, 240, 240, 270] lats = [-30, -30, 0, 0] try: annim.Skypolygon(prescription=None, lons=lons, lats=lats, fc='r', alpha=0.9) print("Plotted polygon without prescription") except: print("Failed to plot set of coordinates as polygon") grat.Insidelabels(wcsaxis=0, world=list(range(0,360,30)), constval=0, fmt='Hms', color='b', fontsize=5) grat.Insidelabels(wcsaxis=1, world=[-60, -30, 30, 60], constval=0, fmt='Dms', color='b', fontsize=5) annim.interact_toolbarinfo() annim.interact_writepos(wcsfmt="%f",zfmt=None, pixfmt=None, hmsdms=False) frame.set_title(proj, y=0.8) annim.plot() fig = plt.figure() fig.subplots_adjust(left=0.03, bottom=0.05, right=0.97, top=0.97, wspace=0.02, hspace=0.02) shapes("STG", fig, 1, crval2=90) shapes("ARC", fig, 2, crval2=90) pvkwargs = {'PV2_0' : 0.05, 'PV2_1' : 0.975, 'PV2_2' : -0.807, 'PV2_3' : 0.337, 'PV2_4' : -0.065, 'PV2_5' : 0.01, 'PV2_6' : 0.003,' PV2_7' : -0.001} shapes("ZPN", fig, 3, crval2=90, **pvkwargs) shapes("NCP", fig, 4, crval2=90) #xi = -1/numpy.sqrt(6); eta = 1/numpy.sqrt(6) #shapes("SIN", fig, 4, crval2=90, PV2_1=xi, PV2_2=eta) plt.show()
py
b417d5c3aef369e5295e7bc39bc168325db3b754
# -*- coding: utf-8 -*- # Generated by Django 1.11.21 on 2019-12-20 15:06 from __future__ import unicode_literals import wagtail.core.blocks import wagtail.core.fields import wagtail.documents.blocks import wagtail.embeds.blocks import wagtail.images.blocks from django.db import migrations class Migration(migrations.Migration): dependencies = [ ("pages", "0011_svg_image_block"), ] operations = [ migrations.AlterField( model_name="contentpage", name="body", field=wagtail.core.fields.StreamField( [ ( "paragraph", wagtail.core.blocks.RichTextBlock( features=[ "h2", "h3", "bold", "italic", "link", "ol", "ul", "hr", "blockquote", "document", ] ), ), ( "captioned_image", wagtail.core.blocks.StructBlock( [ ("image", wagtail.images.blocks.ImageChooserBlock()), ( "alternative_text", wagtail.core.blocks.TextBlock( help_text="Alternative text for visually impaired users to\nbriefly communicate the intended message of the image in this context.", required=True, ), ), ( "caption", wagtail.core.blocks.RichTextBlock( features=["bold", "italic", "link"], required=False, ), ), ( "style", wagtail.core.blocks.ChoiceBlock( choices=[ ("full", "Full Width"), ("left", "Floated Left"), ("right", "Floated Right"), ], help_text="Controls how other content flows around the image. Note that this will only take effect on larger screens. Float consecutive images in opposite directions for side-by-side display.", ), ), ], label="image", ), ), ( "svg_image", wagtail.core.blocks.StructBlock( [ ( "image", wagtail.documents.blocks.DocumentChooserBlock(), ), ( "alternative_text", wagtail.core.blocks.TextBlock( help_text="Alternative text for visually impaired users to\nbriefly communicate the intended message of the image in this context.", required=True, ), ), ( "caption", wagtail.core.blocks.RichTextBlock( features=["bold", "italic", "link"], required=False, ), ), ( "extended_description", wagtail.core.blocks.RichTextBlock( features=["p"], help_text="This text will only be read to non-sighted users and should describe the major insights or takeaways from the graphic. Multiple paragraphs are allowed.", required=False, ), ), ] ), ), ( "footnotes", wagtail.core.blocks.RichTextBlock( classname="footnotes", features=["ol", "ul", "bold", "italic", "link"], ), ), ("document", wagtail.documents.blocks.DocumentChooserBlock()), ( "linkable_section", wagtail.core.blocks.StructBlock( [ ("title", wagtail.core.blocks.CharBlock()), ( "anchor_text", wagtail.core.blocks.CharBlock( help_text="Short label for anchor link" ), ), ("body", wagtail.core.blocks.RichTextBlock()), ] ), ), ("embed", wagtail.embeds.blocks.EmbedBlock()), ] ), ), migrations.AlterField( model_name="contributorpage", name="body", field=wagtail.core.fields.StreamField( [ ( "paragraph", wagtail.core.blocks.RichTextBlock( features=[ "h2", "h3", "bold", "italic", "link", "ol", "ul", "hr", "blockquote", "document", ] ), ), ( "captioned_image", wagtail.core.blocks.StructBlock( [ ("image", wagtail.images.blocks.ImageChooserBlock()), ( "alternative_text", wagtail.core.blocks.TextBlock( help_text="Alternative text for visually impaired users to\nbriefly communicate the intended message of the image in this context.", required=True, ), ), ( "caption", wagtail.core.blocks.RichTextBlock( features=["bold", "italic", "link"], required=False, ), ), ( "style", wagtail.core.blocks.ChoiceBlock( choices=[ ("full", "Full Width"), ("left", "Floated Left"), ("right", "Floated Right"), ], help_text="Controls how other content flows around the image. Note that this will only take effect on larger screens. Float consecutive images in opposite directions for side-by-side display.", ), ), ], label="image", ), ), ( "svg_image", wagtail.core.blocks.StructBlock( [ ( "image", wagtail.documents.blocks.DocumentChooserBlock(), ), ( "alternative_text", wagtail.core.blocks.TextBlock( help_text="Alternative text for visually impaired users to\nbriefly communicate the intended message of the image in this context.", required=True, ), ), ( "caption", wagtail.core.blocks.RichTextBlock( features=["bold", "italic", "link"], required=False, ), ), ( "extended_description", wagtail.core.blocks.RichTextBlock( features=["p"], help_text="This text will only be read to non-sighted users and should describe the major insights or takeaways from the graphic. Multiple paragraphs are allowed.", required=False, ), ), ] ), ), ( "footnotes", wagtail.core.blocks.RichTextBlock( classname="footnotes", features=["ol", "ul", "bold", "italic", "link"], ), ), ("document", wagtail.documents.blocks.DocumentChooserBlock()), ( "linkable_section", wagtail.core.blocks.StructBlock( [ ("title", wagtail.core.blocks.CharBlock()), ( "anchor_text", wagtail.core.blocks.CharBlock( help_text="Short label for anchor link" ), ), ("body", wagtail.core.blocks.RichTextBlock()), ] ), ), ("embed", wagtail.embeds.blocks.EmbedBlock()), ], blank=True, ), ), ]